repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
sht47/mmtracking | configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py | 5a25e418e9c598d1b576bce8702f5e156cbbefe7 | _base_ = ['./tracktor_faster-rcnn_r50_fpn_4e_mot17-public-half.py']
model = dict(
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth' # noqa: E501
))
data_root = 'data/MOT17/'
test_set = 'test'
data = dict(
train=dict(ann_file=data_root + 'annotations/train_cocoformat.json'),
val=dict(
ann_file=data_root + 'annotations/train_cocoformat.json',
detection_file=data_root + 'annotations/train_detections.pkl'),
test=dict(
ann_file=data_root + f'annotations/{test_set}_cocoformat.json',
img_prefix=data_root + test_set,
detection_file=data_root + f'annotations/{test_set}_detections.pkl'))
| [] |
Shaimyst/scrive_test | browserstack/first_sample_build.py | 38e3ea0192885d1776d24afdbea110d73adc4e8b | from threading import Thread
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# This array 'caps' defines the capabilities browser, device and OS combinations where the test will run
caps=[{
'os_version': '10',
'os': 'Windows',
'browser': 'ie',
'browser_version': '11.0',
'name': 'Parallel Test1', # test name
'build': 'browserstack-build-1' # Your tests will be organized within this build
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'chrome',
'browser_version': '95.0',
'name': 'Parallel Test2',
'build': 'browserstack-build-1'
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'firefox',
'browser_version': '93.0',
'name': 'Parallel Test3',
'build': 'browserstack-build-1'
}]
#run_session function searches for 'BrowserStack' on google.com
def run_session(desired_cap):
driver = webdriver.Remote(
command_executor='https://jessicasadler_RbBTVv:[email protected]/wd/hub',
desired_capabilities=desired_cap)
driver.get("https://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("BrowserStack")
elem.submit()
try:
WebDriverWait(driver, 5).until(EC.title_contains("BrowserStack"))
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"passed", "reason": "Title matched!"}}')
except TimeoutException:
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"failed", "reason": "Title not matched"}}')
print(driver.title)
driver.quit()
#The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly
for cap in caps:
Thread(target=run_session, args=(cap,)).start() | [((1243, 1407), 'selenium.webdriver.Remote', 'webdriver.Remote', ([], {'command_executor': '"""https://jessicasadler_RbBTVv:[email protected]/wd/hub"""', 'desired_capabilities': 'desired_cap'}), "(command_executor=\n 'https://jessicasadler_RbBTVv:[email protected]/wd/hub'\n , desired_capabilities=desired_cap)\n", (1259, 1407), False, 'from selenium import webdriver\n'), ((1673, 1706), 'selenium.webdriver.support.expected_conditions.title_contains', 'EC.title_contains', (['"""BrowserStack"""'], {}), "('BrowserStack')\n", (1690, 1706), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2230, 2269), 'threading.Thread', 'Thread', ([], {'target': 'run_session', 'args': '(cap,)'}), '(target=run_session, args=(cap,))\n', (2236, 2269), False, 'from threading import Thread\n'), ((1642, 1666), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(5)'], {}), '(driver, 5)\n', (1655, 1666), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
suutari-ai/mvj | sanitizers/mvj.py | c39dbc692afcb3b26366783414c2d5a88a57b25a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
from random import choice
from string import digits
from faker import Faker
fake = Faker("fi_FI")
def sanitize_address(value):
return fake.address()
def sanitize_address_if_exist(value):
if value:
return sanitize_address(value)
def sanitize_business_id(value):
return fake.pystr_format(string_format="#######-#", letters="0123456789")
def sanitize_business_id_if_exist(value):
if value:
return sanitize_business_id(value)
def sanitize_city(value):
return fake.city()
def sanitize_city_if_exist(value):
if value:
return sanitize_city(value)
def sanitize_company(value):
return fake.company()
def sanitize_company_if_exist(value):
if value:
return sanitize_company(value)
def sanitize_email(value):
return fake.email()
def sanitize_email_if_exist(value):
if value:
return sanitize_email(value)
def sanitize_first_name(value):
return fake.first_name()
def sanitize_first_name_if_exist(value):
if value:
return sanitize_first_name(value)
def sanitize_generate_random_numbers(value):
return "".join([choice(digits) for i in range(random.randint(0, 10))])
def sanitize_generate_random_numbers_if_exist(value):
if value:
return sanitize_generate_random_numbers(value)
def sanitize_last_name(value):
return fake.first_name()
def sanitize_last_name_if_exist(value):
if value:
return sanitize_last_name(value)
def sanitize_national_identification_number(value):
return fake.pystr_format(string_format="######-####", letters="0123456789")
def sanitize_national_identification_number_if_exist(value):
if value:
return sanitize_national_identification_number(value)
def sanitize_name(value):
return fake.name()
def sanitize_paragraph(value):
return fake.paragraph()
def sanitize_paragraph_if_exist(value):
if value:
return sanitize_paragraph(value)
def sanitize_phone_number(value):
return fake.phone_number()
def sanitize_phone_number_if_exist(value):
if value:
return sanitize_phone_number(value)
def sanitize_postcode(value):
return fake.postcode()
def sanitize_postcode_if_exist(value):
if value:
return sanitize_postcode(value)
def sanitize_url(value):
return fake.url()
| [((164, 178), 'faker.Faker', 'Faker', (['"""fi_FI"""'], {}), "('fi_FI')\n", (169, 178), False, 'from faker import Faker\n'), ((1202, 1216), 'random.choice', 'choice', (['digits'], {}), '(digits)\n', (1208, 1216), False, 'from random import choice\n'), ((1232, 1253), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1246, 1253), False, 'import random\n')] |
rhmdnd/compliance-trestle-demos | ISM_catalog_profile/scripts/ISM/ISM.py | 1d92c91cca1d23cf707f82f035b2d58ec67c953a | #!/usr/bin/env python3
# # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# limitations under the License.
"""Create ISM catalogs.
This script is used to convert Australian Government Information Security Manual (ISM) into OSCAL formats.
The ISM is the equivalent of NIST 800-53 / FedRAMP / IL6 and similar documents in the USA. The goal is to produce a
similar set OSCAL documents to what NIST and FedRAMP are currently publishing.
It does this via pulling the ISM xml doc and creating:
1 Catalog for all the controls
4 profiles (Official, protected, secret, TS)
Ideally this would be a cron job based script, however, as ACSC publish revisions
with specific names this would need to be discovered by crawling. This will be a potential future enhancement.
This script pulls down the controls in a 'dumb' way from the xml to get the actual controls. A full featured catalog
will need to parse appropriate word / xml documents to provide groups /guidance.
"""
import io
import json
import logging
import pathlib
import sys
import urllib.request
import zipfile
from datetime import datetime
from uuid import uuid4
from ilcli import Command
import trestle.oscal.catalog as catalog
import trestle.oscal.common as common
import trestle.oscal.profile as profile
import xmltodict
# Globally define logging behaviour.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
remarks_tuple = '\n'.join(
[
'This is not an official version of the Australian Government Information Security Manual.',
'',
'Find the official versions here: https://www.cyber.gov.au/acsc/view-all-content/ism',
'This content was generated using scrips/ISM/ISM.py'
]
)
class ISMManager():
"""ISMManager a class to manage conversion of ISM artifacts into OSCAL."""
def __init__(self):
"""Initialize ISM manager. No required parameters."""
self._profile_controls = {'OFFICIAL': [], 'PROTECTED': [], 'SECRET': [], 'TOP_SECRET': []}
self._profiles = {}
def fetch_ism(self, url):
"""Fetch an Australian government ISM and covert to a dict."""
logger.debug('Fetching ISM from: ' + url)
request_url = urllib.request.urlopen(url)
document = request_url.read()
zipfile_content = zipfile.ZipFile(io.BytesIO(document))
content_list = zipfile_content.namelist()
xml_files = [x for x in content_list if '.xml' in x]
assert len(xml_files) == 1
self.ism_xml = xmltodict.parse(zipfile_content.open(xml_files[0]).read())
def _populate_control_list(self, control, raw_id):
"""Populate control lists based on a dict from the xml version of the ISM."""
# TODO: Really not pythonic but anyway.
control_id = 'control-' + raw_id
for security_level in self._profile_controls.keys():
# Dealing with schema changes 'Yes' and 'true' appear to both be valid options.
if control[security_level].lower() == 'yes' or control[security_level].lower() == 'true':
self._profile_controls[security_level].append(control_id)
def _probe_for_keys(self, ism_control):
"""Probe for the appropriate keys for l2 groups based on whether or not section exists."""
l2_group_key = 'Section'
if l2_group_key not in ism_control.keys():
l2_group_key = 'Topic'
return l2_group_key
def _name_clean(self, name: str) -> str:
"""Normalize string to ncname format."""
return name.strip().lower().replace(' ', '_').replace('/', '-')
def create_ism_catalog(self, version: str) -> None:
"""Parse ISM object and create a catalog."""
m = common.Metadata(
**{
'title': 'Australian Government Information Security manual',
'last-modified': datetime.now().astimezone(),
'version': version,
'oscal-version': '1.0.0',
'remarks': remarks_tuple
}
)
ism_catalog = catalog.Catalog(metadata=m, uuid=str(uuid4()))
# Create basic metadata:
ism_controls = self.ism_xml['ISM']['Control']
l2_group_key = self._probe_for_keys(ism_controls[0])
"""
Approach:
- Two levels of groups - no sub controls.
- below this will be parts
"""
# Get list of top level controls
tl_group_titles = set(map(lambda x: x['Guideline'], ism_controls))
groups = []
for tl_group_name in tl_group_titles:
group = catalog.Group(id=self._name_clean(tl_group_name), title=tl_group_name)
# now add l2 groups
control_subset = list(filter(lambda x: x['Guideline'] == tl_group_name, ism_controls))
# get set l2 group names.
l2_group_titles = set(map(lambda x: x[l2_group_key], control_subset))
l2_groups = []
for l2_group_name in l2_group_titles:
clean_id = self._name_clean(l2_group_name)
l2_group = catalog.Group(id=clean_id, title=l2_group_name)
# Now identify and add the controls
oscal_controls = []
l2_control_subset = list(filter(lambda x: x[l2_group_key] == l2_group_name, control_subset))
# now we can create and add controls.
# TODO: Make more pythonic
for ism_control in l2_control_subset:
raw_id = ism_control['Identifier']
description = ism_control['Description']
topic = ism_control['Topic']
# make description the part statement
statement_part = common.Part(id='control-' + raw_id + '-stmt', name='statement', prose=description)
# this is very minimial
oscal_control = catalog.Control(id='control-' + raw_id, title=topic, parts=[statement_part])
self._populate_control_list(ism_control, raw_id)
oscal_controls.append(oscal_control)
l2_group.controls = oscal_controls
l2_groups.append(l2_group)
group.groups = l2_groups
groups.append(group)
ism_catalog.groups = groups
self._ism_catalog = ism_catalog
def create_ism_profiles(self, revision_date, uri='./ISM_catalog.yaml'):
"""Create profile for each ISM environment."""
for security_level in self._profile_controls.keys():
ism_profile = profile.Profile(
uuid=str(uuid4()),
metadata=common.Metadata(
**{
'title': 'Australian Government Information Security Manual profile for ' + security_level,
'version': revision_date,
'oscal-version': '1.0.0',
'last-modified': datetime.now().astimezone(),
'remarks': remarks_tuple
}
),
imports=[profile.Import(href=uri)]
)
controls_list = self._profile_controls[security_level]
ism_profile.imports[0].include_controls = self._populate_import_include(controls_list)
self._profiles[security_level] = ism_profile
def _populate_import_include(self, control_list):
include_controls = []
selector = profile.SelectControlById()
selector.with_ids = control_list
include_controls.append(selector)
return include_controls
def write_catalog(self, catalogs_path, ism_name):
"""Wrap and write oscal catalog object."""
ism_dir_path = catalogs_path / ism_name
ism_dir_path.mkdir(exist_ok=True)
ism_file_path = ism_dir_path / 'catalog.json'
self._ism_catalog.oscal_write(ism_file_path)
def write_profiles(self, profiles_dir, ism_name):
"""Write out all profiles."""
for security_level in self._profiles.keys():
profile_dir = profiles_dir / (ism_name + '_' + security_level)
profile_dir.mkdir(exist_ok=True)
profile_path = profile_dir / 'profile.json'
self._profiles[security_level].oscal_write(profile_path)
class ISM(Command):
"""
Convert the Australian goverment information security manual (in various versions) into catalogs and profiles.
This CLI has presumptions on resource structures that are returned.
Please note that this project current presumes information about the project structure.
"""
def _init_arguments(self):
self.add_argument('-r', '--root-dir', help='Trestle project root.', default='./')
def _run(self, args):
# little test
root_dir = pathlib.Path(args.root_dir).resolve()
catalogs_dir = root_dir.joinpath('catalogs').resolve()
profiles_dir = root_dir.joinpath('profiles').resolve()
ism_json_file = root_dir.joinpath('scripts/ISM/ism_editions.json').resolve()
if not root_dir.exists():
logger.error('Root trestle project does not exist')
return 1
if not catalogs_dir.exists():
logger.error('Catalogs directory does not exist.')
return 1
if not profiles_dir.exists():
logger.error('Profiles directory does not exist.')
return 1
ism_versions = json.load(ism_json_file.open())
for ism_file in ism_versions['isms']:
# ISM file format: 'ISM - List of Security Controls (August 2019).xml'
logger.info(ism_file)
url = ism_file['version_url']
ism_manager = ISMManager()
ism_manager.fetch_ism(url)
revision_date = ism_file['version_name'].split()
revision_string = revision_date[0] + '_' + revision_date[1]
logger.info(f'Revision date: {revision_date}')
logger.info(f'Revision string: {revision_string}')
logger.info(revision_string)
ism_name = 'ISM_' + revision_string
ism_manager.create_ism_catalog(revision_string)
# This is presumed to be relative for now to the catalog repo based on this
ism_manager.write_catalog(catalogs_dir, ism_name)
ism_manager.create_ism_profiles(revision_string, 'trestle://' + ism_name + '/catalog.json')
ism_manager.write_profiles(profiles_dir, ism_name)
if __name__ == '__main__':
sys.exit(ISM().run())
| [((1811, 1838), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1828, 1838), False, 'import logging\n'), ((1888, 1921), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1909, 1921), False, 'import logging\n'), ((7947, 7974), 'trestle.oscal.profile.SelectControlById', 'profile.SelectControlById', ([], {}), '()\n', (7972, 7974), True, 'import trestle.oscal.profile as profile\n'), ((2831, 2851), 'io.BytesIO', 'io.BytesIO', (['document'], {}), '(document)\n', (2841, 2851), False, 'import io\n'), ((5579, 5626), 'trestle.oscal.catalog.Group', 'catalog.Group', ([], {'id': 'clean_id', 'title': 'l2_group_name'}), '(id=clean_id, title=l2_group_name)\n', (5592, 5626), True, 'import trestle.oscal.catalog as catalog\n'), ((9293, 9320), 'pathlib.Path', 'pathlib.Path', (['args.root_dir'], {}), '(args.root_dir)\n', (9305, 9320), False, 'import pathlib\n'), ((4598, 4605), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4603, 4605), False, 'from uuid import uuid4\n'), ((6236, 6323), 'trestle.oscal.common.Part', 'common.Part', ([], {'id': "('control-' + raw_id + '-stmt')", 'name': '"""statement"""', 'prose': 'description'}), "(id='control-' + raw_id + '-stmt', name='statement', prose=\n description)\n", (6247, 6323), True, 'import trestle.oscal.common as common\n'), ((6399, 6475), 'trestle.oscal.catalog.Control', 'catalog.Control', ([], {'id': "('control-' + raw_id)", 'title': 'topic', 'parts': '[statement_part]'}), "(id='control-' + raw_id, title=topic, parts=[statement_part])\n", (6414, 6475), True, 'import trestle.oscal.catalog as catalog\n'), ((7103, 7110), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (7108, 7110), False, 'from uuid import uuid4\n'), ((7580, 7604), 'trestle.oscal.profile.Import', 'profile.Import', ([], {'href': 'uri'}), '(href=uri)\n', (7594, 7604), True, 'import trestle.oscal.profile as profile\n'), ((4367, 4381), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4379, 4381), False, 'from datetime import datetime\n'), ((7436, 7450), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7448, 7450), False, 'from datetime import datetime\n')] |
geo2tag-logistics/main | logistics/permissions.py | 3b55185ea97481bbabe38497e4608abefbf1ece1 | from rest_framework import permissions
def is_owner(user):
return user.groups.filter(name='OWNER').exists()
def is_driver(user):
return user.groups.filter(name='DRIVER').exists()
class IsOwnerPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_owner(request.user)
class IsDriverPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_driver(request.user)
class IsOwnerOrDriverPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_driver(request.user) or is_owner(request.user)
| [] |
blancKaty/alignmentFralework_and_classif | src/python/reduce_fps_parallel.py | 192565a928dad0d98553e0602e91eed59c4a193d | import os
import shutil
import sys
import multiprocessing
import glob
def copy(source, dest):
shutil.copyfile(source, dest)
def main():
input_folder = sys.argv[1]
output_folder = sys.argv[2]
print 'input reduce fps : ' , sys.argv
fps = int(sys.argv[3]);
final_length=float(sys.argv[4]) ;
max_length=final_length * fps ;
print 'normalisation param : ' , fps , final_length , max_length
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
pool = multiprocessing.Pool(multiprocessing.cpu_count())
print "Using a Pool of", multiprocessing.cpu_count(), "processes"
X = sorted(next(os.walk(input_folder))[1])
print X
for x in X:
folder = os.path.join(output_folder, x)
os.mkdir(folder)
#Y = os.listdir(os.path.join(input_folder, x))
#print input_folder , x
Y = glob.glob(input_folder+"/"+x+"/*.jpg")
Y.sort()
sizeV=len(Y)
#print sizeV
if (sizeV > max_length) :
Y=Y[int(sizeV/2)-int(max_length/2): int(sizeV/2)+int(max_length/2)]
for idx, i in enumerate(range(0, len(Y), fps)):
y = Y[i]
source = y
#print y , "image_{:05d}.jpg".format(idx + 1)
y = "image_{:05d}.jpg".format(idx + 1)
dest = os.path.join(folder, y)
#print source , dest
pool.apply_async(copy, (source, dest))
pool.close()
pool.join()
if __name__ == '__main__':
main()
| [] |
EnjoyLifeFund/macHighSierra-py36-pkgs | ansible/utils/module_docs_fragments/docker.py | 5668b5785296b314ea1321057420bcd077dba9ea | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = '''
options:
docker_host:
description:
- "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection,
the module will automatically replace 'tcp' in the connection URL with 'https'."
required: false
default: "unix://var/run/docker.sock"
aliases:
- docker_url
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
default: localhost
required: false
api_version:
description:
- The version of the Docker API running on the Docker Host. Defaults to the latest version of the API
supported by docker-py.
required: false
default: default provided by docker-py
aliases:
- docker_api_version
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
required: false
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
required: false
default: null
aliases:
- tls_ca_cert
cert_path:
description:
- Path to the client's TLS certificate file.
required: false
default: null
aliases:
- tls_client_cert
key_path:
description:
- Path to the client's TLS key file.
required: false
default: null
aliases:
- tls_client_key
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by docker-py, currently 1.0.
required: false
default: "1.0"
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
default: false
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION,
DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
https://docker-py.readthedocs.org/en/stable/machine/ for more details.
'''
| [] |
cyberjunky/python-garminconnect-aio | setup.py | fb913a15107edee5c5530f3bded7c553ec57923b | #!/usr/bin/env python
from setuptools import setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
author="Ron Klinkien",
author_email="[email protected]",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
description="Asynchronous Garmin Connect Python 3 API wrapper",
name="garminconnect_aio",
keywords=["garmin connect", "api", "client"],
license="MIT license",
install_requires=["aiohttp >= 3.6", "yarl", "brotlipy"],
long_description_content_type="text/markdown",
long_description=readme,
url="https://github.com/cyberjunky/python-garminconnect-aio",
packages=["garminconnect_aio"],
version="0.1.4",
)
| [((125, 764), 'setuptools.setup', 'setup', ([], {'author': '"""Ron Klinkien"""', 'author_email': '"""[email protected]"""', 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']", 'description': '"""Asynchronous Garmin Connect Python 3 API wrapper"""', 'name': '"""garminconnect_aio"""', 'keywords': "['garmin connect', 'api', 'client']", 'license': '"""MIT license"""', 'install_requires': "['aiohttp >= 3.6', 'yarl', 'brotlipy']", 'long_description_content_type': '"""text/markdown"""', 'long_description': 'readme', 'url': '"""https://github.com/cyberjunky/python-garminconnect-aio"""', 'packages': "['garminconnect_aio']", 'version': '"""0.1.4"""'}), "(author='Ron Klinkien', author_email='[email protected]', classifiers=\n ['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'], description=\n 'Asynchronous Garmin Connect Python 3 API wrapper', name=\n 'garminconnect_aio', keywords=['garmin connect', 'api', 'client'],\n license='MIT license', install_requires=['aiohttp >= 3.6', 'yarl',\n 'brotlipy'], long_description_content_type='text/markdown',\n long_description=readme, url=\n 'https://github.com/cyberjunky/python-garminconnect-aio', packages=[\n 'garminconnect_aio'], version='0.1.4')\n", (130, 764), False, 'from setuptools import setup\n')] |
ChameleonCloud/nova | nova/tests/unit/virt/libvirt/fake_imagebackend.py | 4bb9421b02b71f2b218278aa6f97abace871b111 | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import os
import fixtures
import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
class ImageBackendFixture(fixtures.Fixture):
def __init__(self, got_files=None, imported_files=None, exists=None):
"""This fixture mocks imagebackend.Backend.backend, which is the
only entry point to libvirt.imagebackend from libvirt.driver.
:param got_files: A list of {'filename': path, 'size': size} for every
file which was created.
:param imported_files: A list of (local_filename, remote_filename) for
every invocation of import_file().
:param exists: An optional lambda which takes the disk name as an
argument, and returns True if the disk exists,
False otherwise.
"""
self.got_files = got_files
self.imported_files = imported_files
self.disks = collections.defaultdict(self._mock_disk)
"""A dict of name -> Mock image object. This is a defaultdict,
so tests may access it directly before a disk has been created."""
self._exists = exists
def setUp(self):
super(ImageBackendFixture, self).setUp()
# Mock template functions passed to cache
self.mock_fetch_image = mock.create_autospec(libvirt_utils.fetch_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_image', self.mock_fetch_image))
self.mock_fetch_raw_image = \
mock.create_autospec(libvirt_utils.fetch_raw_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_raw_image',
self.mock_fetch_raw_image))
self.mock_create_ephemeral = \
mock.create_autospec(driver.LibvirtDriver._create_ephemeral)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral',
self.mock_create_ephemeral))
self.mock_create_swap = \
mock.create_autospec(driver.LibvirtDriver._create_swap)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_swap',
self.mock_create_swap))
# Backend.backend creates all Image objects
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.Backend.backend',
self._mock_backend))
@property
def created_disks(self):
"""disks, filtered to contain only disks which were actually created
by calling a relevant method.
"""
# A disk was created iff either cache() or import_file() was called.
return {name: disk for name, disk in self.disks.items()
if any([disk.cache.called, disk.import_file.called])}
def _mock_disk(self):
# This is the generator passed to the disks defaultdict. It returns
# a mocked Image object, but note that the returned object has not
# yet been 'constructed'. We don't know at this stage what arguments
# will be passed to the constructor, so we don't know, eg, its type
# or path.
#
# The reason for this 2 phase construction is to allow tests to
# manipulate mocks for disks before they have been created. eg a
# test can do the following before executing the method under test:
#
# disks['disk'].cache.side_effect = ImageNotFound...
#
# When the 'constructor' (image_init in _mock_backend) later runs,
# it will return the same object we created here, and when the
# caller calls cache() it will raise the requested exception.
disk = mock.create_autospec(imagebackend.Image)
# NOTE(mdbooth): fake_cache and fake_import_file are for compatibility
# with existing tests which test got_files and imported_files. They
# should be removed when they have no remaining users.
disk.cache.side_effect = self._fake_cache
disk.import_file.side_effect = self._fake_import_file
# NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional
# output
disk.libvirt_info.side_effect = \
functools.partial(self._fake_libvirt_info, disk)
return disk
def _mock_backend(self, backend_self, image_type=None):
# This method mocks Backend.backend, which returns a subclass of Image
# (it returns a class, not an instance). This mocked method doesn't
# return a class; it returns a function which returns a Mock. IOW,
# instead of the getting a QCow2, the caller gets image_init,
# so instead of:
#
# QCow2(instance, disk_name='disk')
#
# the caller effectively does:
#
# image_init(instance, disk_name='disk')
#
# Therefore image_init() must have the same signature as an Image
# subclass constructor, and return a mocked Image object.
#
# The returned mocked Image object has the following additional
# properties which are useful for testing:
#
# * Calls with the same disk_name return the same object from
# self.disks. This means tests can assert on multiple calls for
# the same disk without worrying about whether they were also on
# the same object.
#
# * Mocked objects have an additional image_type attribute set to
# the image_type originally passed to Backend.backend() during
# their construction. Tests can use this to assert that disks were
# created of the expected type.
def image_init(instance=None, disk_name=None, path=None):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
path = os.path.join(
libvirt_utils.get_instance_path(instance), disk_name)
else:
disk_name = os.path.basename(path)
disk = self.disks[disk_name]
# Used directly by callers. These would have been set if called
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
# Used by tests to manipulate which disks exist.
if self._exists is not None:
# We don't just cache the return value here because the
# caller may want, eg, a test where the disk initially does not
# exist and later exists.
disk.exists.side_effect = lambda: self._exists(disk_name)
else:
disk.exists.return_value = True
return disk
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
# Ditto for the 'is_shared_block_storage' function and
# 'is_file_in_instance_path'
def is_shared_block_storage():
return False
def is_file_in_instance_path():
return False
setattr(image_init, 'is_shared_block_storage', is_shared_block_storage)
setattr(image_init, 'is_file_in_instance_path',
is_file_in_instance_path)
return image_init
def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs):
# Execute the template function so we can test the arguments it was
# called with.
fetch_func(target=filename, *args, **kwargs)
# For legacy tests which use got_files
if self.got_files is not None:
self.got_files.append({'filename': filename, 'size': size})
def _fake_import_file(self, instance, local_filename, remote_filename):
# For legacy tests which use imported_files
if self.imported_files is not None:
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
extra_specs, hypervisor_version, disk_unit=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = disk_info['type']
info.target_bus = disk_info['bus']
info.target_dev = disk_info['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
return info
| [((1711, 1751), 'collections.defaultdict', 'collections.defaultdict', (['self._mock_disk'], {}), '(self._mock_disk)\n', (1734, 1751), False, 'import collections\n'), ((2083, 2130), 'mock.create_autospec', 'mock.create_autospec', (['libvirt_utils.fetch_image'], {}), '(libvirt_utils.fetch_image)\n', (2103, 2130), False, 'import mock\n'), ((2303, 2354), 'mock.create_autospec', 'mock.create_autospec', (['libvirt_utils.fetch_raw_image'], {}), '(libvirt_utils.fetch_raw_image)\n', (2323, 2354), False, 'import mock\n'), ((2548, 2608), 'mock.create_autospec', 'mock.create_autospec', (['driver.LibvirtDriver._create_ephemeral'], {}), '(driver.LibvirtDriver._create_ephemeral)\n', (2568, 2608), False, 'import mock\n'), ((2815, 2870), 'mock.create_autospec', 'mock.create_autospec', (['driver.LibvirtDriver._create_swap'], {}), '(driver.LibvirtDriver._create_swap)\n', (2835, 2870), False, 'import mock\n'), ((4492, 4532), 'mock.create_autospec', 'mock.create_autospec', (['imagebackend.Image'], {}), '(imagebackend.Image)\n', (4512, 4532), False, 'import mock\n'), ((5015, 5063), 'functools.partial', 'functools.partial', (['self._fake_libvirt_info', 'disk'], {}), '(self._fake_libvirt_info, disk)\n', (5032, 5063), False, 'import functools\n'), ((9178, 9209), 'nova.virt.libvirt.config.LibvirtConfigGuestDisk', 'config.LibvirtConfigGuestDisk', ([], {}), '()\n', (9207, 9209), False, 'from nova.virt.libvirt import config\n'), ((2155, 2242), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.utils.fetch_image"""', 'self.mock_fetch_image'], {}), "('nova.virt.libvirt.utils.fetch_image', self.\n mock_fetch_image)\n", (2175, 2242), False, 'import fixtures\n'), ((2379, 2474), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.utils.fetch_raw_image"""', 'self.mock_fetch_raw_image'], {}), "('nova.virt.libvirt.utils.fetch_raw_image', self.\n mock_fetch_raw_image)\n", (2399, 2474), False, 'import fixtures\n'), ((2633, 2746), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral"""', 'self.mock_create_ephemeral'], {}), "('nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral'\n , self.mock_create_ephemeral)\n", (2653, 2746), False, 'import fixtures\n'), ((2895, 2997), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.driver.LibvirtDriver._create_swap"""', 'self.mock_create_swap'], {}), "('nova.virt.libvirt.driver.LibvirtDriver._create_swap',\n self.mock_create_swap)\n", (2915, 2997), False, 'import fixtures\n'), ((3097, 3192), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.virt.libvirt.imagebackend.Backend.backend"""', 'self._mock_backend'], {}), "('nova.virt.libvirt.imagebackend.Backend.backend', self\n ._mock_backend)\n", (3117, 3192), False, 'import fixtures\n'), ((6835, 6857), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6851, 6857), False, 'import os\n'), ((6735, 6776), 'nova.virt.libvirt.utils.get_instance_path', 'libvirt_utils.get_instance_path', (['instance'], {}), '(instance)\n', (6766, 6776), True, 'from nova.virt.libvirt import utils as libvirt_utils\n')] |
coolboi567/dnstwister | tests/test_email_subscriptions.py | b809ca721a13efc6b59e11587c582f6ba4b11587 | """Tests of the email subscription mechanism."""
import binascii
import flask_webtest
import mock
import pytest
import webtest.app
import dnstwister
import dnstwister.tools
import patches
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_bad_domains_fail(webapp):
"""Test the email views check domain validity."""
with pytest.raises(webtest.app.AppError) as err:
webapp.get('/email/subscribe/3234jskdnfsdf7y34')
assert '400 BAD REQUEST' in err.value.message
with pytest.raises(webtest.app.AppError) as err:
webapp.post('/email/pending_verify/3234jskdnfsdf7y34')
assert '400 BAD REQUEST' in err.value.message
def test_bad_error_codes(webapp):
"""Test the email error codes being weird doesn't break the page."""
normal_html = webapp.get('/email/subscribe/7777772e6578616d706c652e636f6d').html
assert webapp.get(
'/email/subscribe/7777772e6578616d706c652e636f6d/9',
expect_errors=True
).html == normal_html
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_verification_with_bad_id(webapp):
"""Test that verifying with a dud subscription id just redirects to root.
"""
response = webapp.get('/email/verify/1234', expect_errors=True)
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/'
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_with_no_subscriptions():
repository = dnstwister.repository
assert list(repository.isubscriptions()) == []
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_during_subscription():
repository = dnstwister.repository
domain = 'www.example.com'
email = '[email protected]'
sub_id = '1234'
repository.subscribe_email(sub_id, email, domain, False)
subs = list(repository.isubscriptions())
assert len(subs) == 1
assert sorted(subs[0][1].keys()) == [
'domain', 'email_address', 'hide_noisy'
]
assert subs[0][1]['domain'] == domain
assert subs[0][1]['email_address'] == email
assert subs[0][1]['hide_noisy'] == False
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_email_address_required():
app = flask_webtest.TestApp(dnstwister.app)
domain = 'a.com'
hexdomain = binascii.hexlify(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
subscribe_page = app.get(subscribe_path)
assert 'Email address is required' not in subscribe_page.body
subscribe_page.form['email_address'] = ' '
response = subscribe_page.form.submit()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/email/subscribe/{}/0?hide_noisy=False'.format(hexdomain)
assert 'Email address is required' in response.follow().body
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_email_address_validation_remembers_hide_noisy_flag():
app = flask_webtest.TestApp(dnstwister.app)
domain = 'a.com'
hexdomain = binascii.hexlify(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
subscribe_page = app.get(subscribe_path)
subscribe_page.form['email_address'] = ' '
subscribe_page.form['hide_noisy'] = 'true'
response = subscribe_page.form.submit()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/email/subscribe/{}/0?hide_noisy=True'.format(hexdomain)
assert 'Email address is required' in response.follow().body
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_link():
app = flask_webtest.TestApp(dnstwister.app)
emailer = dnstwister.views.www.email.emailer
repository = dnstwister.repository
assert emailer.sent_emails == []
domain = 'a.com'
hexdomain = dnstwister.tools.encode_domain(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
search_page = app.get('/search/{}'.format(hexdomain))
assert subscribe_path in search_page.body
subscribe_page = app.get(subscribe_path)
subscribe_page.form['email_address'] = '[email protected]'
subscribe_page.form.submit()
assert list(repository.isubscriptions()) == []
verify_code = repository.db.data.items()[0][0].split(
'email_sub_pending:'
)[1]
verify_path = '/email/verify/{}'.format(
verify_code
)
verify_url = 'http://localhost{}'.format(verify_path)
assert len(emailer.sent_emails) == 1
sent_email = emailer.sent_emails[0][:2]
assert sent_email == (
'[email protected]', 'Please verify your subscription'
)
assert verify_url in emailer.sent_emails[0][2]
subscribed_page = app.get(verify_path)
assert 'You are now subscribed' in subscribed_page.body
assert len(list(repository.isubscriptions())) == 1
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unsubscribe():
"""Test can unsubscribe."""
app = flask_webtest.TestApp(dnstwister.app)
repository = dnstwister.repository
domain = 'www.example.com'
email = '[email protected]'
sub_id = '1234'
assert len(list(repository.isubscriptions())) == 0
repository.subscribe_email(sub_id, email, domain, False)
assert len(list(repository.isubscriptions())) == 1
app.get('/email/unsubscribe/{}'.format(sub_id))
assert len(list(repository.isubscriptions())) == 0
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_link_unicode():
app = flask_webtest.TestApp(dnstwister.app)
emailer = dnstwister.views.www.email.emailer
repository = dnstwister.repository
assert emailer.sent_emails == []
domain = u'\u0454a.com' # ea.com, but with a funny 'e'
hexdomain = dnstwister.tools.encode_domain(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
search_page = app.get('/search/{}'.format(hexdomain))
assert subscribe_path in search_page.body
subscribe_page = app.get(subscribe_path)
assert '\xd1\x94a.com (xn--a-9ub.com)' in subscribe_page.body
subscribe_page.form['email_address'] = '[email protected]'
pending_page = subscribe_page.form.submit()
assert pending_page.request.url.endswith('pending_verify/786e2d2d612d3975622e636f6d')
assert '\xd1\x94a.com (xn--a-9ub.com)' in pending_page.body
assert list(repository.isubscriptions()) == []
verify_code = repository.db.data.items()[0][0].split(
'email_sub_pending:'
)[1]
verify_path = '/email/verify/{}'.format(
verify_code
)
verify_url = 'http://localhost{}'.format(verify_path)
assert len(emailer.sent_emails) == 1
sent_email = emailer.sent_emails[0][:2]
assert sent_email == (
'[email protected]', 'Please verify your subscription'
)
assert verify_url in emailer.sent_emails[0][2]
subscribed_page = app.get(verify_path)
assert 'You are now subscribed' in subscribed_page.body
assert '\xd1\x94a.com (xn--a-9ub.com)' in subscribed_page.body
assert len(list(repository.isubscriptions())) == 1
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unsubscribe_unicode():
"""Test can unsubscribe."""
app = flask_webtest.TestApp(dnstwister.app)
repository = dnstwister.repository
domain = u'www.\u0454xample.com'
email = '[email protected]'
sub_id = '1234'
assert len(list(repository.isubscriptions())) == 0
repository.subscribe_email(sub_id, email, domain, False)
assert len(list(repository.isubscriptions())) == 1
app.get('/email/unsubscribe/{}'.format(sub_id))
assert len(list(repository.isubscriptions())) == 0
| [((253, 272), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (270, 272), False, 'import patches\n'), ((315, 341), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (339, 341), False, 'import patches\n'), ((1155, 1181), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (1179, 1181), False, 'import patches\n'), ((1535, 1561), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (1559, 1561), False, 'import patches\n'), ((1750, 1776), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (1774, 1776), False, 'import patches\n'), ((2516, 2553), 'flask_webtest.TestApp', 'flask_webtest.TestApp', (['dnstwister.app'], {}), '(dnstwister.app)\n', (2537, 2553), False, 'import flask_webtest\n'), ((2595, 2619), 'binascii.hexlify', 'binascii.hexlify', (['domain'], {}), '(domain)\n', (2611, 2619), False, 'import binascii\n'), ((2379, 2398), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (2396, 2398), False, 'import patches\n'), ((2441, 2467), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (2465, 2467), False, 'import patches\n'), ((3343, 3380), 'flask_webtest.TestApp', 'flask_webtest.TestApp', (['dnstwister.app'], {}), '(dnstwister.app)\n', (3364, 3380), False, 'import flask_webtest\n'), ((3422, 3446), 'binascii.hexlify', 'binascii.hexlify', (['domain'], {}), '(domain)\n', (3438, 3446), False, 'import binascii\n'), ((3178, 3197), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (3195, 3197), False, 'import patches\n'), ((3240, 3266), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (3264, 3266), False, 'import patches\n'), ((4117, 4154), 'flask_webtest.TestApp', 'flask_webtest.TestApp', (['dnstwister.app'], {}), '(dnstwister.app)\n', (4138, 4154), False, 'import flask_webtest\n'), ((4326, 4364), 'dnstwister.tools.encode_domain', 'dnstwister.tools.encode_domain', (['domain'], {}), '(domain)\n', (4356, 4364), False, 'import dnstwister\n'), ((3983, 4002), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (4000, 4002), False, 'import patches\n'), ((4045, 4071), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (4069, 4071), False, 'import patches\n'), ((5580, 5617), 'flask_webtest.TestApp', 'flask_webtest.TestApp', (['dnstwister.app'], {}), '(dnstwister.app)\n', (5601, 5617), False, 'import flask_webtest\n'), ((5421, 5440), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (5438, 5440), False, 'import patches\n'), ((5483, 5509), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (5507, 5509), False, 'import patches\n'), ((6228, 6265), 'flask_webtest.TestApp', 'flask_webtest.TestApp', (['dnstwister.app'], {}), '(dnstwister.app)\n', (6249, 6265), False, 'import flask_webtest\n'), ((6476, 6514), 'dnstwister.tools.encode_domain', 'dnstwister.tools.encode_domain', (['domain'], {}), '(domain)\n', (6506, 6514), False, 'import dnstwister\n'), ((6086, 6105), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (6103, 6105), False, 'import patches\n'), ((6148, 6174), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (6172, 6174), False, 'import patches\n'), ((8048, 8085), 'flask_webtest.TestApp', 'flask_webtest.TestApp', (['dnstwister.app'], {}), '(dnstwister.app)\n', (8069, 8085), False, 'import flask_webtest\n'), ((7881, 7900), 'patches.NoEmailer', 'patches.NoEmailer', ([], {}), '()\n', (7898, 7900), False, 'import patches\n'), ((7943, 7969), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (7967, 7969), False, 'import patches\n'), ((444, 479), 'pytest.raises', 'pytest.raises', (['webtest.app.AppError'], {}), '(webtest.app.AppError)\n', (457, 479), False, 'import pytest\n'), ((609, 644), 'pytest.raises', 'pytest.raises', (['webtest.app.AppError'], {}), '(webtest.app.AppError)\n', (622, 644), False, 'import pytest\n')] |
zlikun/python-crawler-lianjia | ershoufang/crawler_v2.py | 7e7bf0cbd333486ee62ac015e72b96d6003c8713 | """
第二版:多进程二手房信息爬虫
1. 将爬虫分解为下载任务和解析任务(可以继续分解,但在本案中意义不大)两部分,两部分各使用一个子进程,相互通过数据管道通信
2. 下载任务内部不使用队列,使用任务管道实现(在多进程:主进程、子进程、子进程内部进程池等场景下,队列并不好用)任务管理和通信
3. 解析任务从与下载任务间的管道中获取数据,解析并保存
问题:当目标被爬完后,怎样让爬虫停止?
"""
import csv
import datetime
import logging
import multiprocessing as mp
import re
import time
from collections import OrderedDict
import requests
from pyquery import PyQuery
from requests import RequestException
base_url = r'https://sh.lianjia.com/ershoufang'
# 已处理URL集合没有很好的表示方法,这里使用普通集合+锁来实现多进程场景下应用
seen_urls = set()
lock = mp.Lock()
# 下载失败重试次数
retries = 3
# 当前日期
today = datetime.date.today()
# 列表页、明细页URL正则表达式
list_page_pattern = '^{}/(pg\d+/)?$'.format(base_url)
item_page_pattern = '^{}/\d+.html$'.format(base_url)
# 数据存储路径
csv_file_path = r'../.data/ershoufang-{}.csv'.format(today)
# 日志配置
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(process)05d - %(levelname)s - %(message)s')
def start_download_job(data_writer, init_tasks):
"""
下载任务(作业)
:param data_writer: 数据管道(写)
:param init_tasks: 初始任务集合
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
pool = mp.Pool(pool_size)
# 任务不使用队列(在这种进程中使用子进程和进程池的应用中,队列会遇到各种问题),使用管道实现
(task_reader, task_writer) = mp.Pipe(duplex=False)
# 为了简化代码,初始任务直接通过任务管道发送出去,再接收
# 也可以直接在循环代码中实现,当初始任务集合为空时,再使用任务管道接收任务
task_writer.send(init_tasks)
# 循环从任务管道中读取任务数据,并进行处理
while True:
# 任务是一组URL
urls = task_reader.recv()
# 使用进程池,分别下载这些URL,将下载后的文档内容和url构成的元组通过管道发出
for url in urls:
# 判断任务是否重复
with lock:
if url in seen_urls:
continue
else:
seen_urls.add(url)
# 执行下载任务
pool.apply_async(download, (url, task_writer, data_writer))
pool.close()
pool.join()
def download(url, task_writer, data_writer):
"""
下载网页,最多重试3次
:param url: 下载url地址
:param task_writer: 任务管道(写)
:param data_writer: 数据管道(写)
:return:
"""
for _ in range(retries + 1):
try:
logging.info('download page {}'.format(url))
content = requests.get(url).text
if content is None:
continue
# 抽取列表页的中链接列表
if is_list_page(url):
links = parse_list_page(content, url)
# 将详情页链接列表通过管道发出去
if links and len(links) > 0:
task_writer.send(links)
else:
data_writer.send((content, url))
return
except RequestException:
# 异常时休眠2秒
time.sleep(2)
# 超过重试次数则打印错误消息
logging.error('重试{}次下载仍失败:{}'.format(retries, url))
# 将失败url重新加入任务队列
task_writer.send(set([url]))
def is_list_page(url):
"""
判断是否列表页
:param url:
:return:
"""
return re.match(list_page_pattern, url)
def parse_list_page(content, url):
"""
列表网页解析器
:param content:
:param url:
:return: 详情页链接集合
"""
pq = PyQuery(content, url=url)
return set([li.attr('href') for li in pq('ul.sellListContent div.title > a').items()])
def parse_item_page(content, url):
"""
详情页解析器
:param content:
:param url:
:return: 返回详情数据
"""
pq = PyQuery(content, url=url)
return OrderedDict({'title': pq('div.content > div.title > h1').text().strip(),
'sub_title': pq('div.content > div.title > div.sub').text().strip(),
'price': pq('div.price > span.total').text().strip(),
'unit_price': pq('div.unitPrice > span.unitPriceValue').text().replace('元/平米', '').strip(),
'down_payment_info': pq('div.tax > span.taxtext').text().strip(),
'area': re.search('(\d+\.?\d*)', pq('div.area > div.mainInfo').text()).group(1),
'year_info': pq('div.area > div.subInfo').text().strip(),
'house_type': pq('div.room > div.mainInfo').text().strip(),
'floor': pq('div.room > div.subInfo').text().strip(),
'towards': pq('div.type > div.mainInfo').text().strip(),
'housing_estate': pq('div.communityName > a:first').text().strip(),
'housing_estate_link': pq('div.communityName > a:first').attr('href'),
'location': tuple([i.text().strip() for i in pq('div.areaName > span > a').items()]),
'broker': pq('div.brokerName > a').text().strip(),
'broker_homepage': pq('div.brokerName > a').attr('href'),
'number': pq('div.houseRecord > span.info').text().replace('举报', '').strip()})
def start_parse_job(data_reader):
"""
解析任务(作业)
:param data_reader: 数据管道(读)
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
# 解析任务只使用下载任务进程池规模的一半(视情况而定,目前其处理速度要远大于下载任务,也避免进程过多)
pool = mp.Pool(pool_size // 2)
while True:
args = data_reader.recv()
if args is not None:
pool.apply_async(parse, args, callback=process)
pool.close()
pool.join()
def parse(content, url):
"""
解析网页
:param content:
:param url:
:return:
"""
if content is None or url is None:
return
try:
# 解析详情页,返回数据
return parse_item_page(content, url)
except Exception as e:
logging.error(e)
def process(data):
"""
处理数据
:param data:
:return:
"""
if data is None:
return
# 数据基本处理
# 处理小区链接不完整问题
if 'housing_estate_link' in data and not data['housing_estate_link'].startswith('https://'):
data['housing_estate_link'] = 'https://sh.lianjia.com' + data['housing_estate_link']
# 数据转换
# 提取户型中的室数
if 'house_type' in data:
data['house_type'] = (data['house_type'].split('室')[0], data['house_type'])
# 数据存储(写入CSV文件,文件按日期生成)
with open(csv_file_path,
'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data.values())
if __name__ == '__main__':
# 初始任务集合
init_tasks = set([base_url + '/'] + ['{}/pg{}/'.format(base_url, i) for i in range(2, 101)])
# 创建管道,用于任务(进程)间通信
(data_reader, data_writer) = mp.Pipe(duplex=False)
# 启动下载任务(写端)
mp.Process(target=start_download_job, args=(data_writer, init_tasks)).start()
# 启动解析任务(读端)
mp.Process(target=start_parse_job, args=(data_reader,)).start()
logging.info('--running--')
| [((530, 539), 'multiprocessing.Lock', 'mp.Lock', ([], {}), '()\n', (537, 539), True, 'import multiprocessing as mp\n'), ((578, 599), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (597, 599), False, 'import datetime\n'), ((804, 916), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(process)05d - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(process)05d - %(levelname)s - %(message)s')\n", (823, 916), False, 'import logging\n'), ((1204, 1222), 'multiprocessing.Pool', 'mp.Pool', (['pool_size'], {}), '(pool_size)\n', (1211, 1222), True, 'import multiprocessing as mp\n'), ((1309, 1330), 'multiprocessing.Pipe', 'mp.Pipe', ([], {'duplex': '(False)'}), '(duplex=False)\n', (1316, 1330), True, 'import multiprocessing as mp\n'), ((2927, 2959), 're.match', 're.match', (['list_page_pattern', 'url'], {}), '(list_page_pattern, url)\n', (2935, 2959), False, 'import re\n'), ((3091, 3116), 'pyquery.PyQuery', 'PyQuery', (['content'], {'url': 'url'}), '(content, url=url)\n', (3098, 3116), False, 'from pyquery import PyQuery\n'), ((3337, 3362), 'pyquery.PyQuery', 'PyQuery', (['content'], {'url': 'url'}), '(content, url=url)\n', (3344, 3362), False, 'from pyquery import PyQuery\n'), ((5095, 5118), 'multiprocessing.Pool', 'mp.Pool', (['(pool_size // 2)'], {}), '(pool_size // 2)\n', (5102, 5118), True, 'import multiprocessing as mp\n'), ((6430, 6451), 'multiprocessing.Pipe', 'mp.Pipe', ([], {'duplex': '(False)'}), '(duplex=False)\n', (6437, 6451), True, 'import multiprocessing as mp\n'), ((6643, 6670), 'logging.info', 'logging.info', (['"""--running--"""'], {}), "('--running--')\n", (6655, 6670), False, 'import logging\n'), ((6181, 6194), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6191, 6194), False, 'import csv\n'), ((1173, 1187), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1185, 1187), True, 'import multiprocessing as mp\n'), ((5007, 5021), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5019, 5021), True, 'import multiprocessing as mp\n'), ((5557, 5573), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (5570, 5573), False, 'import logging\n'), ((6474, 6543), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'start_download_job', 'args': '(data_writer, init_tasks)'}), '(target=start_download_job, args=(data_writer, init_tasks))\n', (6484, 6543), True, 'import multiprocessing as mp\n'), ((6574, 6629), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'start_parse_job', 'args': '(data_reader,)'}), '(target=start_parse_job, args=(data_reader,))\n', (6584, 6629), True, 'import multiprocessing as mp\n'), ((1150, 1164), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1162, 1164), True, 'import multiprocessing as mp\n'), ((2220, 2237), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2232, 2237), False, 'import requests\n'), ((2690, 2703), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2700, 2703), False, 'import time\n'), ((4984, 4998), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4996, 4998), True, 'import multiprocessing as mp\n')] |
yetsun/hue | desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
| [((2908, 2949), 'os.environ.get', 'os.environ.get', (['SK_SIGNING_PLUGIN_ENV_VAR'], {}), '(SK_SIGNING_PLUGIN_ENV_VAR)\n', (2922, 2949), False, 'import os\n'), ((5811, 5842), 'struct.pack', 'struct.pack', (['"""<I"""', 'input_length'], {}), "('<I', input_length)\n", (5822, 5842), False, 'import struct\n'), ((5933, 6001), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n', (5949, 6001), False, 'import subprocess\n'), ((8145, 8161), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (8159, 8161), False, 'import hashlib\n'), ((3724, 3765), 'os.environ.get', 'os.environ.get', (['SK_SIGNING_PLUGIN_ENV_VAR'], {}), '(SK_SIGNING_PLUGIN_ENV_VAR)\n', (3738, 3765), False, 'import os\n'), ((5099, 5126), 'json.dumps', 'json.dumps', (['signing_request'], {}), '(signing_request)\n', (5109, 5126), False, 'import json\n'), ((6255, 6291), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'response_len_le'], {}), "('<I', response_len_le)\n", (6268, 6291), False, 'import struct\n'), ((7352, 7392), 'pyu2f.errors.U2FError', 'errors.U2FError', (['errors.U2FError.TIMEOUT'], {}), '(errors.U2FError.TIMEOUT)\n', (7367, 7392), False, 'from pyu2f import errors\n'), ((7459, 7509), 'pyu2f.errors.U2FError', 'errors.U2FError', (['errors.U2FError.DEVICE_INELIGIBLE'], {}), '(errors.U2FError.DEVICE_INELIGIBLE)\n', (7474, 7509), False, 'from pyu2f import errors\n'), ((4248, 4324), 'pyu2f.model.ClientData', 'model.ClientData', (['model.ClientData.TYP_AUTHENTICATION', 'raw_challenge', 'origin'], {}), '(model.ClientData.TYP_AUTHENTICATION, raw_challenge, origin)\n', (4264, 4324), False, 'from pyu2f import model\n'), ((8357, 8393), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['bytes_data'], {}), '(bytes_data)\n', (8381, 8393), False, 'import base64\n')] |
dmacmillan/Kive | kive/portal/management/commands/graph_kive.py | 76bc8f289f66fb133f78cb6d5689568b7d015915 | import itertools
import os
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Generates class diagrams.'
def handle(self, *args, **options):
if 'django_extensions' not in settings.INSTALLED_APPS:
exit('django_extensions not found, try using --setting kive.UML_settings')
docs_path = os.path.join(os.path.pardir, 'docs', 'models')
apps = [app for app in settings.INSTALLED_APPS
if not (app.startswith('django') or app == 'rest_framework')]
apps.sort()
for app in apps:
print(app)
exclude_models = ['User', 'Group']
if app != 'metadata':
exclude_models.append('AccessControl')
call_command("graph_models",
app,
pygraphviz=True,
group_models=True,
outputfile=os.path.join(docs_path, app+'.png'),
exclude_models=','.join(exclude_models))
readme_path = os.path.join(docs_path, 'README.md')
with open(readme_path, 'rU+') as f:
models_section = '### Models ###\n'
header = itertools.takewhile(lambda line: line != models_section,
f.readlines())
f.seek(0)
for line in header:
f.write(line)
f.write(models_section)
for app in apps:
f.write('#### {} ####\n'.format(app))
f.write('\n\n'.format(app, app))
| [((442, 488), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""docs"""', '"""models"""'], {}), "(os.path.pardir, 'docs', 'models')\n", (454, 488), False, 'import os\n'), ((1145, 1181), 'os.path.join', 'os.path.join', (['docs_path', '"""README.md"""'], {}), "(docs_path, 'README.md')\n", (1157, 1181), False, 'import os\n'), ((1019, 1056), 'os.path.join', 'os.path.join', (['docs_path', "(app + '.png')"], {}), "(docs_path, app + '.png')\n", (1031, 1056), False, 'import os\n')] |
rpls/openlane_summary | summary.py | 5057fab80a4acaf08e6503ced7abb932684145a5 | #!/usr/bin/env python3
import argparse
import os
import glob
import csv
import sys
import re
from shutil import which
import datetime
def is_tool(name):
return which(name) is not None
def check_path(path):
paths = glob.glob(path)
if len(paths) == 0:
exit("file not found: %s" % path)
if len(paths) > 1:
print("warning: glob pattern found too many files, using first one: %s" % paths[0])
return paths[0]
def openlane_date_sort(e):
datestamp = os.path.basename(e)
if re.match(r'^\d+\-\d+\_\d+\-\d+$',datestamp):
timestamp = datetime.datetime.strptime(datestamp, '%d-%m_%H-%M')
return timestamp.timestamp()
return datestamp
def summary_report(summary_file):
# print short summary of the csv file
status = None
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
if "violation" in key or "error" in key:
print("%30s : %20s" % (key, value))
if "AREA" in key:
area = float(value)
if "flow_status" in key:
status = value
print("area %d um^2" % (1e6 * area))
if status is not None: # newer OpenLANE has status, older ones don't
print("flow status: %s" % status)
def full_summary_report(summary_file):
# print short summary of the csv file
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
print("%30s : %20s" % (key, value))
def drc_report(drc_file):
last_drc = None
drc_count = 0
with open(drc_file) as drc:
for line in drc.readlines():
drc_count += 1
if '(' in line:
if last_drc is not None:
print("* %s (%d)" % (last_drc, drc_count/4))
last_drc = line.strip()
drc_count = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OpenLANE summary tool")
group = parser.add_mutually_exclusive_group(required=True)
# either choose the design and interation
group.add_argument('--design', help="only run checks on specific design", action='store')
# or show standard cells
group.add_argument('--show-sky130', help='show all standard cells', action='store_const', const=True)
# optionally choose different name for top module and which run to use (default latest)
parser.add_argument('--top', help="name of top module if not same as design", action='store')
parser.add_argument('--run', help="choose a specific run. If not given use latest. If not arg, show a menu", action='store', default=-1, nargs='?', type=int)
# what to show
parser.add_argument('--drc', help='show DRC report', action='store_const', const=True)
parser.add_argument('--summary', help='show violations, area & status from summary report', action='store_const', const=True)
parser.add_argument('--full-summary', help='show the full summary report csv file', action='store_const', const=True)
parser.add_argument('--synth', help='show post techmap synth', action='store_const', const=True)
parser.add_argument('--yosys-report', help='show cell usage after yosys synth', action='store_const', const=True)
# klayout for intermediate files
parser.add_argument('--floorplan', help='show floorplan', action='store_const', const=True)
parser.add_argument('--pdn', help='show PDN', action='store_const', const=True)
parser.add_argument('--global-placement', help='show global placement PDN', action='store_const', const=True)
parser.add_argument('--detailed-placement', help='show detailed placement', action='store_const', const=True)
parser.add_argument('--gds', help='show final GDS', action='store_const', const=True)
# GDS3D for 3d view
parser.add_argument('--gds-3d', help='show final GDS in 3D', action='store_const', const=True)
parser.add_argument('--caravel', help='use caravel directory structure instead of standard openlane', action='store_const', const=True)
args = parser.parse_args()
if not args.top:
args.top = args.design
if not 'OPENLANE_ROOT' in os.environ:
exit("pls set OPENLANE_ROOT to where your OpenLANE is installed")
klayout_def = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_def.xml')
klayout_gds = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_gds.xml')
gds3d_tech = os.path.join(os.path.dirname(sys.argv[0]), 'sky130.txt')
# if showing off the sky130 cells
if args.show_sky130:
if not os.environ['PDK_ROOT']:
exit("pls set PDK_ROOT to where your PDK is installed")
path = check_path(os.path.join(os.environ['PDK_ROOT'], "sky130A", "libs.ref", "sky130_fd_sc_hd", "gds", "sky130_fd_sc_hd.gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
exit()
# otherwise need to know where openlane and the designs are
openlane_designs = ''
if args.caravel:
if os.path.exists('openlane'):
openlane_designs = 'openlane'
else:
openlane_designs = '.'
run_dir = os.path.join(openlane_designs, args.design, 'runs/*')
else:
openlane_designs = os.path.join(os.environ['OPENLANE_ROOT'], 'designs')
run_dir = os.path.join(openlane_designs, args.design, 'runs/*-*')
list_of_files = glob.glob(run_dir)
if len(list_of_files) == 0:
exit("couldn't find that design")
list_of_files.sort(key=openlane_date_sort)
# what run to show?
if args.run == -1:
# default is to use the latest
print("using latest run:")
run_path = max(list_of_files, key=os.path.getctime)
elif args.run is None:
# UI for asking for which run to use
for run_index, run in enumerate(list_of_files):
print("\n%2d: %s" % (run_index, os.path.basename(run)), end='')
print(" <default>\n")
n = input("which run? <enter for default>: ") or run_index
run_path = list_of_files[int(n)]
else:
# use the given run
print("using run %d:" % args.run)
run_path = list_of_files[args.run]
print(run_path)
if args.summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
summary_report(path)
if args.full_summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
full_summary_report(path)
if args.drc:
path = os.path.join(run_path, 'logs', 'magic', 'magic.drc') # don't check path because if DRC is clean, don't get the file
if os.path.exists(path):
drc_report(path)
else:
print("no DRC file, DRC clean?")
if args.synth:
path = check_path(os.path.join(run_path, "tmp", "synthesis", "post_techmap.dot")) # post_techmap is created by https://github.com/efabless/openlane/pull/282
os.system("xdot %s" % path)
if args.yosys_report:
filename = "*yosys_*.stat.rpt"
path = check_path(os.path.join(run_path, "reports", "synthesis", filename))
os.system("cat %s" % path)
if args.floorplan:
path = check_path(os.path.join(run_path, "results", "floorplan", args.top + ".floorplan.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.pdn:
filename = "*pdn.def"
path = check_path(os.path.join(run_path, "tmp", "floorplan", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.global_placement:
filename = "*replace.def"
path = check_path(os.path.join(run_path, "tmp", "placement", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.detailed_placement:
path = check_path(os.path.join(run_path, "results", "placement", args.top + ".placement.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.gds:
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
if args.gds_3d:
if not is_tool('GDS3D'):
exit("pls install GDS3D from https://github.com/trilomix/GDS3D")
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("GDS3D -p %s -i %s" % (gds3d_tech, path))
| [((225, 240), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (234, 240), False, 'import glob\n'), ((491, 510), 'os.path.basename', 'os.path.basename', (['e'], {}), '(e)\n', (507, 510), False, 'import os\n'), ((518, 568), 're.match', 're.match', (['"""^\\\\d+\\\\-\\\\d+\\\\_\\\\d+\\\\-\\\\d+$"""', 'datestamp'], {}), "('^\\\\d+\\\\-\\\\d+\\\\_\\\\d+\\\\-\\\\d+$', datestamp)\n", (526, 568), False, 'import re\n'), ((2066, 2126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""OpenLANE summary tool"""'}), "(description='OpenLANE summary tool')\n", (2089, 2126), False, 'import argparse\n'), ((5526, 5544), 'glob.glob', 'glob.glob', (['run_dir'], {}), '(run_dir)\n', (5535, 5544), False, 'import glob\n'), ((166, 177), 'shutil.which', 'which', (['name'], {}), '(name)\n', (171, 177), False, 'from shutil import which\n'), ((583, 635), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['datestamp', '"""%d-%m_%H-%M"""'], {}), "(datestamp, '%d-%m_%H-%M')\n", (609, 635), False, 'import datetime\n'), ((842, 860), 'csv.DictReader', 'csv.DictReader', (['fh'], {}), '(fh)\n', (856, 860), False, 'import csv\n'), ((1498, 1516), 'csv.DictReader', 'csv.DictReader', (['fh'], {}), '(fh)\n', (1512, 1516), False, 'import csv\n'), ((4441, 4469), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (4456, 4469), False, 'import os\n'), ((4521, 4549), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (4536, 4549), False, 'import os\n'), ((4601, 4629), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (4616, 4629), False, 'import os\n'), ((4960, 5011), 'os.system', 'os.system', (["('klayout -l %s %s' % (klayout_gds, path))"], {}), "('klayout -l %s %s' % (klayout_gds, path))\n", (4969, 5011), False, 'import os\n'), ((5150, 5176), 'os.path.exists', 'os.path.exists', (['"""openlane"""'], {}), "('openlane')\n", (5164, 5176), False, 'import os\n'), ((5287, 5340), 'os.path.join', 'os.path.join', (['openlane_designs', 'args.design', '"""runs/*"""'], {}), "(openlane_designs, args.design, 'runs/*')\n", (5299, 5340), False, 'import os\n'), ((5378, 5430), 'os.path.join', 'os.path.join', (["os.environ['OPENLANE_ROOT']", '"""designs"""'], {}), "(os.environ['OPENLANE_ROOT'], 'designs')\n", (5390, 5430), False, 'import os\n'), ((5449, 5504), 'os.path.join', 'os.path.join', (['openlane_designs', 'args.design', '"""runs/*-*"""'], {}), "(openlane_designs, args.design, 'runs/*-*')\n", (5461, 5504), False, 'import os\n'), ((6668, 6720), 'os.path.join', 'os.path.join', (['run_path', '"""logs"""', '"""magic"""', '"""magic.drc"""'], {}), "(run_path, 'logs', 'magic', 'magic.drc')\n", (6680, 6720), False, 'import os\n'), ((6795, 6815), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6809, 6815), False, 'import os\n'), ((7098, 7125), 'os.system', 'os.system', (["('xdot %s' % path)"], {}), "('xdot %s' % path)\n", (7107, 7125), False, 'import os\n'), ((7284, 7310), 'os.system', 'os.system', (["('cat %s' % path)"], {}), "('cat %s' % path)\n", (7293, 7310), False, 'import os\n'), ((7446, 7497), 'os.system', 'os.system', (["('klayout -l %s %s' % (klayout_def, path))"], {}), "('klayout -l %s %s' % (klayout_def, path))\n", (7455, 7497), False, 'import os\n'), ((7634, 7685), 'os.system', 'os.system', (["('klayout -l %s %s' % (klayout_def, path))"], {}), "('klayout -l %s %s' % (klayout_def, path))\n", (7643, 7685), False, 'import os\n'), ((7839, 7890), 'os.system', 'os.system', (["('klayout -l %s %s' % (klayout_def, path))"], {}), "('klayout -l %s %s' % (klayout_def, path))\n", (7848, 7890), False, 'import os\n'), ((8035, 8086), 'os.system', 'os.system', (["('klayout -l %s %s' % (klayout_def, path))"], {}), "('klayout -l %s %s' % (klayout_def, path))\n", (8044, 8086), False, 'import os\n'), ((8202, 8253), 'os.system', 'os.system', (["('klayout -l %s %s' % (klayout_gds, path))"], {}), "('klayout -l %s %s' % (klayout_gds, path))\n", (8211, 8253), False, 'import os\n'), ((8482, 8533), 'os.system', 'os.system', (["('GDS3D -p %s -i %s' % (gds3d_tech, path))"], {}), "('GDS3D -p %s -i %s' % (gds3d_tech, path))\n", (8491, 8533), False, 'import os\n'), ((4842, 4954), 'os.path.join', 'os.path.join', (["os.environ['PDK_ROOT']", '"""sky130A"""', '"""libs.ref"""', '"""sky130_fd_sc_hd"""', '"""gds"""', '"""sky130_fd_sc_hd.gds"""'], {}), "(os.environ['PDK_ROOT'], 'sky130A', 'libs.ref',\n 'sky130_fd_sc_hd', 'gds', 'sky130_fd_sc_hd.gds')\n", (4854, 4954), False, 'import os\n'), ((6393, 6454), 'os.path.join', 'os.path.join', (['run_path', '"""reports"""', '"""final_summary_report.csv"""'], {}), "(run_path, 'reports', 'final_summary_report.csv')\n", (6405, 6454), False, 'import os\n'), ((6538, 6599), 'os.path.join', 'os.path.join', (['run_path', '"""reports"""', '"""final_summary_report.csv"""'], {}), "(run_path, 'reports', 'final_summary_report.csv')\n", (6550, 6599), False, 'import os\n'), ((6951, 7013), 'os.path.join', 'os.path.join', (['run_path', '"""tmp"""', '"""synthesis"""', '"""post_techmap.dot"""'], {}), "(run_path, 'tmp', 'synthesis', 'post_techmap.dot')\n", (6963, 7013), False, 'import os\n'), ((7218, 7274), 'os.path.join', 'os.path.join', (['run_path', '"""reports"""', '"""synthesis"""', 'filename'], {}), "(run_path, 'reports', 'synthesis', filename)\n", (7230, 7274), False, 'import os\n'), ((7361, 7436), 'os.path.join', 'os.path.join', (['run_path', '"""results"""', '"""floorplan"""', "(args.top + '.floorplan.def')"], {}), "(run_path, 'results', 'floorplan', args.top + '.floorplan.def')\n", (7373, 7436), False, 'import os\n'), ((7572, 7624), 'os.path.join', 'os.path.join', (['run_path', '"""tmp"""', '"""floorplan"""', 'filename'], {}), "(run_path, 'tmp', 'floorplan', filename)\n", (7584, 7624), False, 'import os\n'), ((7777, 7829), 'os.path.join', 'os.path.join', (['run_path', '"""tmp"""', '"""placement"""', 'filename'], {}), "(run_path, 'tmp', 'placement', filename)\n", (7789, 7829), False, 'import os\n'), ((7950, 8025), 'os.path.join', 'os.path.join', (['run_path', '"""results"""', '"""placement"""', "(args.top + '.placement.def')"], {}), "(run_path, 'results', 'placement', args.top + '.placement.def')\n", (7962, 8025), False, 'import os\n'), ((8131, 8192), 'os.path.join', 'os.path.join', (['run_path', '"""results"""', '"""magic"""', "(args.top + '.gds')"], {}), "(run_path, 'results', 'magic', args.top + '.gds')\n", (8143, 8192), False, 'import os\n'), ((8411, 8472), 'os.path.join', 'os.path.join', (['run_path', '"""results"""', '"""magic"""', "(args.top + '.gds')"], {}), "(run_path, 'results', 'magic', args.top + '.gds')\n", (8423, 8472), False, 'import os\n'), ((6021, 6042), 'os.path.basename', 'os.path.basename', (['run'], {}), '(run)\n', (6037, 6042), False, 'import os\n')] |
player1537-forks/spack | var/spack/repos/builtin/packages/py-cupy/package.py | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCupy(PythonPackage):
"""CuPy is an open-source array library accelerated with
NVIDIA CUDA. CuPy provides GPU accelerated computing with
Python. CuPy uses CUDA-related libraries including cuBLAS,
cuDNN, cuRand, cuSolver, cuSPARSE, cuFFT and NCCL to make
full use of the GPU architecture."""
homepage = "https://cupy.dev/"
pypi = "cupy/cupy-8.0.0.tar.gz"
version('8.0.0', sha256='d1dcba5070dfa754445d010cdc952ff6b646d5f9bdcd7a63e8246e2472c3ddb8')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('cuda')
depends_on('nccl')
depends_on('cudnn')
| [] |
cfytrok/python-simple-rest-client | simple_rest_client/decorators.py | 4896e8226ffe194625c63773ea6f49531293b308 | import logging
from functools import wraps
import status
from httpx import exceptions
from .exceptions import AuthError, ClientConnectionError, ClientError, NotFoundError, ServerError
logger = logging.getLogger(__name__)
def validate_response(response):
error_suffix = " response={!r}".format(response)
if response.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
raise AuthError("operation=auth_error," + error_suffix, response)
if response.status_code == status.HTTP_404_NOT_FOUND:
raise NotFoundError("operation=not_found_error," + error_suffix, response)
if status.is_client_error(code=response.status_code):
raise ClientError("operation=client_error," + error_suffix, response)
if status.is_server_error(code=response.status_code):
raise ServerError("operation=server_error," + error_suffix, response)
def handle_request_error(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except (
exceptions.Timeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
def handle_async_request_error(f):
async def wrapper(*args, **kwargs):
try:
response = await f(*args, **kwargs)
except (
exceptions.ReadTimeout,
exceptions.ReadTimeout,
exceptions.WriteTimeout,
exceptions.PoolTimeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
| [((196, 223), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (213, 223), False, 'import logging\n'), ((624, 673), 'status.is_client_error', 'status.is_client_error', ([], {'code': 'response.status_code'}), '(code=response.status_code)\n', (646, 673), False, 'import status\n'), ((760, 809), 'status.is_server_error', 'status.is_server_error', ([], {'code': 'response.status_code'}), '(code=response.status_code)\n', (782, 809), False, 'import status\n'), ((925, 933), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (930, 933), False, 'from functools import wraps\n')] |
PhMueller/TrajectoryParser | HPOBenchExperimentUtils/resource_manager/__init__.py | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | from HPOBenchExperimentUtils.resource_manager.file_resource_manager import FileBasedResourceManager
| [] |
kkiningh/slime | tools/mirrors.bzl | 85853115e284bda35b3da10957823d23428b65d3 | DEFAULT_MIRRORS = {
"bitbucket": [
"https://bitbucket.org/{repository}/get/{commit}.tar.gz",
],
"buildifier": [
"https://github.com/bazelbuild/buildtools/releases/download/{version}/{filename}",
],
"github": [
"https://github.com/{repository}/archive/{commit}.tar.gz",
],
"pypi": [
"https://files.pythonhosted.org/packages/source/{p}/{package}/{package}-{version}.tar.gz",
],
}
| [] |
ScrippsPipkinLab/GenomeTracks | 201805_ChIP_ATAC/codes_old/read_txt.py | 89824daceba82f7a52cf8a31149845548fe1aa76 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 21:15:23 2017
@author: yolandatiao
"""
import csv
import glob
import os
from astropy.io import ascii # For using ascii table to open csv
from astropy.table import Table, Column # For using astropy table functions
os.chdir("/Volumes/Huitian/GSE88987/codes")
import fc_basic_astropy_subprocess as fc
os.chdir("/Volumes/Huitian/Genombrower/codes/txt")
flist=[]
for fname in glob.glob("*.txt"):
flist.append(fname)
nlist=[]
fnflist=[]
print len(flist)
for i in flist:
fnflist.append(i[:-4])
with open(i, "r") as fin:
rfin=csv.reader(fin, delimiter=",")
nlist.append(int(next(rfin)[0]))
#print nlist
outab=Table()
outab["filename_nf"]=fnflist
outab["bdgaccu"]=nlist
ascii.write(outab, "meta.csv", format="csv", overwrite=True)
metab=ascii.read("meta_write_bash.csv")
metab=fc.setcolnames(metab)
with open("bdgnorm.sh","r") as fin:
rfin=csv.reader(fin, delimiter=",")
inrow=next(rfin)[0]
print inrow
for x in xrange(0, len(metab)):
xshname="%s.sh"%x
with open(xshname, "w") as fout:
wfout=csv.writer(fout, delimiter="\t")
wfout.writerow(["cd /gpfs/home/hdiao/Geombrowser"])
outrow=inrow
osfactor=str(metab["1000000000_scalingfactor"][x])
ofname=str(metab["filename_nf"][x])
outrow=outrow.replace("sfactor", osfactor)
outrow=outrow.replace("inputfile", ofname)
fout.writelines(outrow)
with open("qsub.sh", "w") as fout:
for x in xrange(0, 66):
fout.writelines("qsub %s.sh"%x)
fout.writelines("\n")
os.chdir("/Volumes/Huitian/Genombrower/codes/rename")
meta=ascii.read("rename_meta.csv")
with open("rename.sh", "w") as fout:
for x in xrange(0, len(meta)):
fout.writelines("mv ")
fout.writelines(meta["oldname"][x])
fout.writelines(" ")
fout.writelines(meta["newnamenf"][x])
fout.writelines(".bdg")
fout.writelines("\n")
| [] |
josetaas/vendcrawler | tests/test_vendcrawler.py | 5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062 | import unittest
from vendcrawler.scripts.vendcrawler import VendCrawler
class TestVendCrawlerMethods(unittest.TestCase):
def test_get_links(self):
links = VendCrawler('a', 'b', 'c').get_links(2)
self.assertEqual(links,
['https://sarahserver.net/?module=vendor&p=1',
'https://sarahserver.net/?module=vendor&p=2'])
def test_get_page_count(self):
with open('test_vendcrawler.html', 'r') as f:
data = f.read()
page_count = VendCrawler('a', 'b', 'c').get_page_count(str(data))
self.assertEqual(int(page_count), 84)
if __name__ == '__main__':
unittest.main()
| [((658, 673), 'unittest.main', 'unittest.main', ([], {}), '()\n', (671, 673), False, 'import unittest\n'), ((170, 196), 'vendcrawler.scripts.vendcrawler.VendCrawler', 'VendCrawler', (['"""a"""', '"""b"""', '"""c"""'], {}), "('a', 'b', 'c')\n", (181, 196), False, 'from vendcrawler.scripts.vendcrawler import VendCrawler\n'), ((527, 553), 'vendcrawler.scripts.vendcrawler.VendCrawler', 'VendCrawler', (['"""a"""', '"""b"""', '"""c"""'], {}), "('a', 'b', 'c')\n", (538, 553), False, 'from vendcrawler.scripts.vendcrawler import VendCrawler\n')] |
thk4711/mediamanager | services/spotify-service.py | 8f6d21c220767aa9ee5d65635d2993dba07eceed | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import json
import os
import sys
import time
import urllib
import socket
import argparse
import requests
import lib.common as common
base_url = 'http://localhost:24879/player/'
#------------------------------------------------------------------------------#
# do something on startup #
#------------------------------------------------------------------------------#
def init():
global port
check_port()
script_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_path)
parser = argparse.ArgumentParser(description='media manager spotify connect service')
parser.add_argument('-p', '--port', type=int, help='WEB server port', required=True)
args = parser.parse_args()
port = args.port
#------------------------------------------------------------------------------#
# check if librespot-java is running #
#------------------------------------------------------------------------------#
def check_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('localhost', 24879))
if result == 0:
sock.close()
return
print("Please check if SpoCon is configured correctly and running", file = sys.stderr )
sock.close()
exit(1)
#------------------------------------------------------------------------------#
# get metadata from spotify #
#------------------------------------------------------------------------------#
def get_metadata():
meta_data = {}
global current_cover
try:
current_track = get_player()
album = current_track['item']['album']
current_cover = album['images'][0]['url']
tmp_cover = current_cover
tmp_cover=tmp_cover.replace('https://i.scdn.co/image/','')
meta_data['track'] = current_track['item']['name']
meta_data['album'] = album['name']
meta_data['artist'] = album['artists'][0]['name']
meta_data['cover'] = 'external_' + tmp_cover
meta_data['playstatus'] = get_play_status()
if meta_data['playstatus'] == False:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
return(bytes(json.dumps(meta_data), 'utf-8'))
except:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
meta_data['playstatus'] = False
return(bytes(json.dumps(meta_data), 'utf-8'))
#------------------------------------------------------------------------------#
# get play status #
#------------------------------------------------------------------------------#
def get_play_status(mode=False):
playing = False
ret_val = False
ret_str = 'NO'
try:
current_track = get_player()
playing = current_track['is_playing']
except:
pass
if playing == True:
try:
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
data = ret.json()
if 'current' in data:
ret_str = 'YES'
ret_val = True
get_player()
except:
pass
if mode:
return(bytes(ret_str, 'utf-8'))
return(ret_val)
#------------------------------------------------------------------------------#
# get whats currently playing #
#------------------------------------------------------------------------------#
def get_current():
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# get player data from API #
#------------------------------------------------------------------------------#
def get_player():
path = 'http://localhost:24879/web-api/v1/me/player'
ret = requests.get(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# read cover image fom spotify connect web #
#------------------------------------------------------------------------------#
def read_cover_image():
webURL = urllib.request.urlopen(current_cover)
data = webURL.read()
return(data)
#------------------------------------------------------------------------------#
# play next song #
#------------------------------------------------------------------------------#
def next():
requests.post(url = base_url + 'next')
#------------------------------------------------------------------------------#
# play previuous song #
#------------------------------------------------------------------------------#
def prev():
requests.post(url = base_url + 'prev')
#------------------------------------------------------------------------------#
# start playing #
#------------------------------------------------------------------------------#
def play():
requests.post(url = base_url + 'resume')
#------------------------------------------------------------------------------#
# stop playing #
#------------------------------------------------------------------------------#
def pause():
requests.post(url = base_url + 'pause')
#------------------------------------------------------------------------------#
# handle http get request #
#------------------------------------------------------------------------------#
def respond_to_get_request(data):
if 'action' not in data:
return(bytes('failed', 'utf-8'))
if data['action'] == 'play':
play()
elif data['action'] == 'pause':
pause()
elif data['action'] == 'prev':
get_metadata()
prev()
elif data['action'] == 'next':
get_metadata()
next()
elif data['action'] == 'metadata':
return(get_metadata())
elif data['action'] == 'coverimage':
return(read_cover_image())
elif data['action'] == 'getplaystatus':
return(get_play_status(True))
return(bytes('OK', 'utf-8'))
#------------------------------------------------------------------------------#
# main program #
#------------------------------------------------------------------------------#
init()
common.http_get_handler = respond_to_get_request
common.run_http(port)
while True:
time.sleep(2000)
| [((7049, 7070), 'lib.common.run_http', 'common.run_http', (['port'], {}), '(port)\n', (7064, 7070), True, 'import lib.common as common\n'), ((587, 608), 'os.chdir', 'os.chdir', (['script_path'], {}), '(script_path)\n', (595, 608), False, 'import os\n'), ((622, 698), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""media manager spotify connect service"""'}), "(description='media manager spotify connect service')\n", (645, 698), False, 'import argparse\n'), ((1113, 1162), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1126, 1162), False, 'import socket\n'), ((3909, 3932), 'requests.post', 'requests.post', ([], {'url': 'path'}), '(url=path)\n', (3922, 3932), False, 'import requests\n'), ((4286, 4308), 'requests.get', 'requests.get', ([], {'url': 'path'}), '(url=path)\n', (4298, 4308), False, 'import requests\n'), ((4614, 4651), 'urllib.request.urlopen', 'urllib.request.urlopen', (['current_cover'], {}), '(current_cover)\n', (4636, 4651), False, 'import urllib\n'), ((4954, 4990), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'next')"}), "(url=base_url + 'next')\n", (4967, 4990), False, 'import requests\n'), ((5253, 5289), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'prev')"}), "(url=base_url + 'prev')\n", (5266, 5289), False, 'import requests\n'), ((5552, 5590), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'resume')"}), "(url=base_url + 'resume')\n", (5565, 5590), False, 'import requests\n'), ((5854, 5891), 'requests.post', 'requests.post', ([], {'url': "(base_url + 'pause')"}), "(url=base_url + 'pause')\n", (5867, 5891), False, 'import requests\n'), ((7087, 7103), 'time.sleep', 'time.sleep', (['(2000)'], {}), '(2000)\n', (7097, 7103), False, 'import time\n'), ((556, 581), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (571, 581), False, 'import os\n'), ((2441, 2462), 'json.dumps', 'json.dumps', (['meta_data'], {}), '(meta_data)\n', (2451, 2462), False, 'import json\n'), ((3296, 3319), 'requests.post', 'requests.post', ([], {'url': 'path'}), '(url=path)\n', (3309, 3319), False, 'import requests\n'), ((2695, 2716), 'json.dumps', 'json.dumps', (['meta_data'], {}), '(meta_data)\n', (2705, 2716), False, 'import json\n')] |
YuHe0108/cvmodule | Segment/models/other/fcn.py | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | # from tensorflow.keras import Model, Input
# from tensorflow.keras.applications import vgg16, resnet50
# from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
# from tensorflow.keras import layers
# import tensorflow as tf
#
# """
# FCN-8特点:
# 1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
# 2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
# 3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
# 4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
# 上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
# 当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
#
# FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
# 1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
# 如果直接上采样 32 倍预测输出,被称为 FCN-32。
# FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
# """
#
#
# def fcn8_helper(input_shape, num_classes, backbone):
# assert input_shape[0] % 32 == 0
# assert input_shape[1] % 32 == 0
#
# inputs = Input(input_shape)
# if backbone == 'vgg16':
# base_model = vgg16.VGG16(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=100)
# elif backbone == 'resnet50':
# base_model = resnet50.ResNet50(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=1000)
# assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
#
# out = Conv2D(
# filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
# kernel_initializer="he_normal", name="score_fr")(out)
#
# # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
# out = Conv2DTranspose(
# filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
#
# fcn8 = Model(inputs=inputs, outputs=out)
# return fcn8
#
#
# def fcn8_model(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='vgg16')
#
# # "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, filters]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# def fcn8_model_resnet50(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='resnet50')
#
# # "block4_pool" shape: [B, 16, 16, 1024] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("conv4_block6_out").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, 512]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("conv3_block4_out").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# if __name__ == '__main__':
# # m = FCN8(15, 320, 320)
# # from keras.utils import plot_model
# #
# # plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# # print(len(m.layers))
# model_1 = fcn8_model_resnet50(input_shape=(256, 256, 3), num_classes=1)
# model_1.summary()
# # inputs = tf.keras.Input((256, 256, 3))
# # base_model = resnet50.ResNet50(input_tensor=inputs,
# # include_top=False,
# # weights='imagenet',
# # pooling=None,
# # classes=1000)
# # base_model.summary()
from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
from tensorflow.keras.applications import vgg16, resnet50
from tensorflow.keras import Model, Input
from tensorflow.keras import layers
"""
FCN-8特点:
1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
如果直接上采样 32 倍预测输出,被称为 FCN-32。
FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
"""
def fcn8_helper(input_shape, num_classes, weight_name='imagenet'):
assert input_shape[0] % 32 == 0
assert input_shape[1] % 32 == 0
inputs = Input(input_shape)
base_model = vgg16.VGG16(input_tensor=inputs,
include_top=False,
weights=weight_name,
pooling=None,
classes=100)
assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
out = Conv2D(
filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
kernel_initializer="he_normal", name="score_fr")(out)
# [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
out = Conv2DTranspose(
filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
fcn8 = Model(inputs=inputs, outputs=out)
return fcn8
def fcn8_model(input_shape, num_classes):
fcn8 = fcn8_helper(input_shape, num_classes)
# "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
skip_con1 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
Summed = add(inputs=[skip_con1, fcn8.output])
# [B, 32, 32, num_classes]
x = Conv2DTranspose(
num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
# block3_pool: [B, 32, 32, filters]
skip_con2 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
Summed2 = add(inputs=[skip_con2, x])
# 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
outputs = Conv2DTranspose(
num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
activation='sigmoid', name="upsample")(Summed2)
if num_classes == 1:
outputs = layers.Activation('sigmoid')(outputs)
else:
outputs = layers.Softmax()(outputs)
fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# for layer_ in fcn_model.layers[:]:
# layer_.trainable = True
return fcn_model
if __name__ == '__main__':
# m = FCN8(15, 320, 320)
# from keras.utils import plot_model
#
# plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# print(len(m.layers))
model_1 = fcn8_model(input_shape=(256, 256, 3), num_classes=1)
model_1.summary()
| [((7112, 7130), 'tensorflow.keras.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (7117, 7130), False, 'from tensorflow.keras import Model, Input\n'), ((7149, 7252), 'tensorflow.keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'input_tensor': 'inputs', 'include_top': '(False)', 'weights': 'weight_name', 'pooling': 'None', 'classes': '(100)'}), '(input_tensor=inputs, include_top=False, weights=weight_name,\n pooling=None, classes=100)\n', (7160, 7252), False, 'from tensorflow.keras.applications import vgg16, resnet50\n'), ((8154, 8187), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'out'}), '(inputs=inputs, outputs=out)\n', (8159, 8187), False, 'from tensorflow.keras import Model, Input\n'), ((8574, 8610), 'tensorflow.keras.layers.add', 'add', ([], {'inputs': '[skip_con1, fcn8.output]'}), '(inputs=[skip_con1, fcn8.output])\n', (8577, 8610), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9041, 9067), 'tensorflow.keras.layers.add', 'add', ([], {'inputs': '[skip_con2, x]'}), '(inputs=[skip_con2, x])\n', (9044, 9067), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9462, 9517), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'fcn8.input', 'outputs': 'outputs', 'name': '"""FCN8s"""'}), "(inputs=fcn8.input, outputs=outputs, name='FCN8s')\n", (9467, 9517), False, 'from tensorflow.keras import Model, Input\n'), ((7474, 7561), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1024)', 'kernel_size': '(7)', 'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fc6"""'}), "(filters=1024, kernel_size=7, padding='same', activation='relu', name\n ='fc6')\n", (7480, 7561), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7597, 7614), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (7604, 7614), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7631, 7718), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1024)', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fc7"""'}), "(filters=1024, kernel_size=1, padding='same', activation='relu', name\n ='fc7')\n", (7637, 7718), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7740, 7757), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (7747, 7757), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7774, 7910), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'num_classes', 'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'name': '"""score_fr"""'}), "(filters=num_classes, kernel_size=(1, 1), padding='same', activation=\n 'relu', kernel_initializer='he_normal', name='score_fr')\n", (7780, 7910), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8003, 8128), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'num_classes', 'kernel_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': 'None', 'name': '"""score2"""'}), "(filters=num_classes, kernel_size=(2, 2), strides=(2, 2),\n padding='valid', activation=None, name='score2')\n", (8018, 8128), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8378, 8506), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_classes'], {'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': 'None', 'kernel_initializer': '"""he_normal"""', 'name': '"""score_pool4"""'}), "(num_classes, kernel_size=(1, 1), padding='same', activation=None,\n kernel_initializer='he_normal', name='score_pool4')\n", (8384, 8506), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8654, 8772), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['num_classes'], {'kernel_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': 'None', 'name': '"""score4"""'}), "(num_classes, kernel_size=(2, 2), strides=(2, 2), padding=\n 'valid', activation=None, name='score4')\n", (8669, 8772), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8844, 8972), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_classes'], {'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': 'None', 'kernel_initializer': '"""he_normal"""', 'name': '"""score_pool3"""'}), "(num_classes, kernel_size=(1, 1), padding='same', activation=None,\n kernel_initializer='he_normal', name='score_pool3')\n", (8850, 8972), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9153, 9278), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['num_classes'], {'kernel_size': '(8, 8)', 'strides': '(8, 8)', 'padding': '"""valid"""', 'activation': '"""sigmoid"""', 'name': '"""upsample"""'}), "(num_classes, kernel_size=(8, 8), strides=(8, 8), padding=\n 'valid', activation='sigmoid', name='upsample')\n", (9168, 9278), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9349, 9377), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9366, 9377), False, 'from tensorflow.keras import layers\n'), ((9417, 9433), 'tensorflow.keras.layers.Softmax', 'layers.Softmax', ([], {}), '()\n', (9431, 9433), False, 'from tensorflow.keras import layers\n')] |
lopippo/IsoSpec | tests/Python/test_all_configs_output.py | dfc6d7dac213f174fb9c61a5ee018d3f6174febc | def binom(n, k):
"""Quickly adapted from https://stackoverflow.com/questions/26560726/python-binomial-coefficient"""
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
def max_confs_cnt(formula=""):
"""Get the maximal number of configurations for a given chemical formula."""
from IsoSpecPy import IsoParamsFromFormula
f = IsoParamsFromFormula(formula)
if f.atomCount:
N = 1
for n, p in zip(f.atomCount, f.prob):
N *= binom(n+len(p)-1, n)
return N
else:
return 0
def test_max_confs_cnt():
assert max_confs_cnt("O100") == 5151
assert max_confs_cnt("O100N10S6") == 4759524
test_formulas = [ 'O100',
'O100N10S6',
'C100H202',
'S10H20' ]
def test_all_configs_output_cnt():
"""Test if IsoSpecPy output correctly all configurations."""
from IsoSpecPy import IsoThreshold
global test_formulas
for f in test_formulas:
I = IsoThreshold(formula=f, threshold=0.0, absolute=True)
assert len(I) == max_confs_cnt(f)
print("Seems OK!")
if __name__ == "__main__":
test_all_configs_output_cnt()
| [((501, 530), 'IsoSpecPy.IsoParamsFromFormula', 'IsoParamsFromFormula', (['formula'], {}), '(formula)\n', (521, 530), False, 'from IsoSpecPy import IsoParamsFromFormula\n'), ((1150, 1203), 'IsoSpecPy.IsoThreshold', 'IsoThreshold', ([], {'formula': 'f', 'threshold': '(0.0)', 'absolute': '(True)'}), '(formula=f, threshold=0.0, absolute=True)\n', (1162, 1203), False, 'from IsoSpecPy import IsoThreshold\n')] |
soichih/TractSeg | tractseg/models/UNet_Pytorch_Regression.py | f78d0c6dc998905e593cbf4346745467e30d1979 | # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.MetricUtils import MetricUtils
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
class UNet_Pytorch_Regression(torch.nn.Module):
def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):
super(UNet_Pytorch_Regression, self).__init__()
self.in_channel = n_input_channels
self.n_classes = n_classes
self.contr_1_1 = conv2d(n_input_channels, n_filt)
self.contr_1_2 = conv2d(n_filt, n_filt)
self.pool_1 = nn.MaxPool2d((2, 2))
self.contr_2_1 = conv2d(n_filt, n_filt * 2)
self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)
self.pool_2 = nn.MaxPool2d((2, 2))
self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)
self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)
self.pool_3 = nn.MaxPool2d((2, 2))
self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)
self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)
self.pool_4 = nn.MaxPool2d((2, 2))
self.dropout = nn.Dropout(p=0.4)
self.encode_1 = conv2d(n_filt * 8, n_filt * 16)
self.encode_2 = conv2d(n_filt * 16, n_filt * 16)
self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)
# self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d
self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)
self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)
self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)
# self.deconv_2 = nn.Upsample(scale_factor=2)
self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)
self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)
self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)
# self.deconv_3 = nn.Upsample(scale_factor=2)
self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)
self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)
self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)
# self.deconv_4 = nn.Upsample(scale_factor=2)
self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)
self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)
self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)
def forward(self, inpt):
contr_1_1 = self.contr_1_1(inpt)
contr_1_2 = self.contr_1_2(contr_1_1)
pool_1 = self.pool_1(contr_1_2)
contr_2_1 = self.contr_2_1(pool_1)
contr_2_2 = self.contr_2_2(contr_2_1)
pool_2 = self.pool_2(contr_2_2)
contr_3_1 = self.contr_3_1(pool_2)
contr_3_2 = self.contr_3_2(contr_3_1)
pool_3 = self.pool_3(contr_3_2)
contr_4_1 = self.contr_4_1(pool_3)
contr_4_2 = self.contr_4_2(contr_4_1)
pool_4 = self.pool_4(contr_4_2)
pool_4 = self.dropout(pool_4)
encode_1 = self.encode_1(pool_4)
encode_2 = self.encode_2(encode_1)
deconv_1 = self.deconv_1(encode_2)
concat1 = torch.cat([deconv_1, contr_4_2], 1)
expand_1_1 = self.expand_1_1(concat1)
expand_1_2 = self.expand_1_2(expand_1_1)
deconv_2 = self.deconv_2(expand_1_2)
concat2 = torch.cat([deconv_2, contr_3_2], 1)
expand_2_1 = self.expand_2_1(concat2)
expand_2_2 = self.expand_2_2(expand_2_1)
deconv_3 = self.deconv_3(expand_2_2)
concat3 = torch.cat([deconv_3, contr_2_2], 1)
expand_3_1 = self.expand_3_1(concat3)
expand_3_2 = self.expand_3_2(expand_3_1)
deconv_4 = self.deconv_4(expand_3_2)
concat4 = torch.cat([deconv_4, contr_1_2], 1)
expand_4_1 = self.expand_4_1(concat4)
expand_4_2 = self.expand_4_2(expand_4_1)
conv_5 = self.conv_5(expand_4_2)
return conv_5, None
| [((1485, 1517), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_input_channels', 'n_filt'], {}), '(n_input_channels, n_filt)\n', (1491, 1517), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1543, 1565), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_filt', 'n_filt'], {}), '(n_filt, n_filt)\n', (1549, 1565), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1588, 1608), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (1600, 1608), True, 'import torch.nn as nn\n'), ((1635, 1661), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_filt', '(n_filt * 2)'], {}), '(n_filt, n_filt * 2)\n', (1641, 1661), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1687, 1717), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2)', '(n_filt * 2)'], {}), '(n_filt * 2, n_filt * 2)\n', (1693, 1717), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1740, 1760), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (1752, 1760), True, 'import torch.nn as nn\n'), ((1787, 1817), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2)', '(n_filt * 4)'], {}), '(n_filt * 2, n_filt * 4)\n', (1793, 1817), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1843, 1873), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4)', '(n_filt * 4)'], {}), '(n_filt * 4, n_filt * 4)\n', (1849, 1873), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1896, 1916), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (1908, 1916), True, 'import torch.nn as nn\n'), ((1943, 1973), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4)', '(n_filt * 8)'], {}), '(n_filt * 4, n_filt * 8)\n', (1949, 1973), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1999, 2029), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8)', '(n_filt * 8)'], {}), '(n_filt * 8, n_filt * 8)\n', (2005, 2029), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2052, 2072), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (2064, 2072), True, 'import torch.nn as nn\n'), ((2097, 2114), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.4)'}), '(p=0.4)\n', (2107, 2114), True, 'import torch.nn as nn\n'), ((2140, 2171), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8)', '(n_filt * 16)'], {}), '(n_filt * 8, n_filt * 16)\n', (2146, 2171), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2196, 2228), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 16)', '(n_filt * 16)'], {}), '(n_filt * 16, n_filt * 16)\n', (2202, 2228), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2253, 2312), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 16)', '(n_filt * 16)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)\n', (2261, 2312), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((2464, 2508), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8 + n_filt * 16)', '(n_filt * 8)'], {}), '(n_filt * 8 + n_filt * 16, n_filt * 8)\n', (2470, 2508), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2535, 2565), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8)', '(n_filt * 8)'], {}), '(n_filt * 8, n_filt * 8)\n', (2541, 2565), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2590, 2647), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 8)', '(n_filt * 8)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)\n', (2598, 2647), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((2729, 2782), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4 + n_filt * 8)', '(n_filt * 4)'], {'stride': '(1)'}), '(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)\n', (2735, 2782), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2809, 2849), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4)', '(n_filt * 4)'], {'stride': '(1)'}), '(n_filt * 4, n_filt * 4, stride=1)\n', (2815, 2849), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2874, 2931), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 4)', '(n_filt * 4)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)\n', (2882, 2931), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((3013, 3066), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2 + n_filt * 4)', '(n_filt * 2)'], {'stride': '(1)'}), '(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)\n', (3019, 3066), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3093, 3133), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2)', '(n_filt * 2)'], {'stride': '(1)'}), '(n_filt * 2, n_filt * 2, stride=1)\n', (3099, 3133), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3158, 3215), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 2)', '(n_filt * 2)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)\n', (3166, 3215), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((3297, 3342), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt + n_filt * 2)', 'n_filt'], {'stride': '(1)'}), '(n_filt + n_filt * 2, n_filt, stride=1)\n', (3303, 3342), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3369, 3401), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_filt', 'n_filt'], {'stride': '(1)'}), '(n_filt, n_filt, stride=1)\n', (3375, 3401), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3425, 3500), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_filt', 'n_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (3434, 3500), True, 'import torch.nn as nn\n'), ((4304, 4339), 'torch.cat', 'torch.cat', (['[deconv_1, contr_4_2]', '(1)'], {}), '([deconv_1, contr_4_2], 1)\n', (4313, 4339), False, 'import torch\n'), ((4499, 4534), 'torch.cat', 'torch.cat', (['[deconv_2, contr_3_2]', '(1)'], {}), '([deconv_2, contr_3_2], 1)\n', (4508, 4534), False, 'import torch\n'), ((4694, 4729), 'torch.cat', 'torch.cat', (['[deconv_3, contr_2_2]', '(1)'], {}), '([deconv_3, contr_2_2], 1)\n', (4703, 4729), False, 'import torch\n'), ((4889, 4924), 'torch.cat', 'torch.cat', (['[deconv_4, contr_1_2]', '(1)'], {}), '([deconv_4, contr_1_2], 1)\n', (4898, 4924), False, 'import torch\n')] |
hackerwins/polyaxon | platform/core/polyaxon/sidecar/sidecar/__main__.py | ff56a098283ca872abfbaae6ba8abba479ffa394 | import argparse
import time
from kubernetes.client.rest import ApiException
from polyaxon_client.client import PolyaxonClient
from polyaxon_k8s.manager import K8SManager
from sidecar import settings
from sidecar.monitor import is_pod_running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--app_label',
type=str
)
parser.add_argument(
'--container_id',
type=str
)
parser.add_argument(
'--sleep_interval',
default=2,
type=int
)
parser.add_argument(
'--max_restarts',
default=0,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
container_id = arguments.pop('container_id')
app_label = arguments.pop('app_label')
sleep_interval = arguments.pop('sleep_interval')
max_restarts = arguments.pop('max_restarts')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
client = PolyaxonClient()
client.set_internal_health_check()
retry = 0
is_running = True
status = None
while is_running and retry < 3:
time.sleep(sleep_interval)
try:
is_running, status = is_pod_running(k8s_manager,
settings.POD_ID,
container_id,
max_restarts)
except ApiException:
retry += 1
time.sleep(sleep_interval) # We wait a bit more before try
if status:
client.reconcile(status=status)
| [((285, 310), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (308, 310), False, 'import argparse\n'), ((920, 981), 'polyaxon_k8s.manager.K8SManager', 'K8SManager', ([], {'namespace': 'settings.K8S_NAMESPACE', 'in_cluster': '(True)'}), '(namespace=settings.K8S_NAMESPACE, in_cluster=True)\n', (930, 981), False, 'from polyaxon_k8s.manager import K8SManager\n'), ((995, 1011), 'polyaxon_client.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (1009, 1011), False, 'from polyaxon_client.client import PolyaxonClient\n'), ((1149, 1175), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (1159, 1175), False, 'import time\n'), ((1222, 1294), 'sidecar.monitor.is_pod_running', 'is_pod_running', (['k8s_manager', 'settings.POD_ID', 'container_id', 'max_restarts'], {}), '(k8s_manager, settings.POD_ID, container_id, max_restarts)\n', (1236, 1294), False, 'from sidecar.monitor import is_pod_running\n'), ((1503, 1529), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (1513, 1529), False, 'import time\n')] |
plusangel/simple_robot | simple_robot_tests/src/test_odometry.py | d9ad5ed8cd592f4aee14df13465435279b4d60d7 | #! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
class OdomTopicReader(object):
def __init__(self, topic_name = '/odom'):
self._topic_name = topic_name
self._sub = rospy.Subscriber(self._topic_name, Odometry, self.topic_callback)
self._odomdata = Odometry()
def topic_callback(self, msg):
self._odomdata = msg
rospy.loginfo(self._odomdata)
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| [((444, 484), 'rospy.init_node', 'rospy.init_node', (['"""odom_topic_subscriber"""'], {}), "('odom_topic_subscriber')\n", (459, 484), False, 'import rospy\n'), ((544, 558), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (554, 558), False, 'import rospy\n'), ((207, 272), 'rospy.Subscriber', 'rospy.Subscriber', (['self._topic_name', 'Odometry', 'self.topic_callback'], {}), '(self._topic_name, Odometry, self.topic_callback)\n', (223, 272), False, 'import rospy\n'), ((298, 308), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (306, 308), False, 'from nav_msgs.msg import Odometry\n'), ((382, 411), 'rospy.loginfo', 'rospy.loginfo', (['self._odomdata'], {}), '(self._odomdata)\n', (395, 411), False, 'import rospy\n'), ((573, 592), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (590, 592), False, 'import rospy\n')] |
kevinintel/neural-compressor | test/test_random.py | b57645566aeff8d3c18dc49d2739a583c072f940 | """Tests for quantization"""
import numpy as np
import unittest
import os
import shutil
import yaml
import tensorflow as tf
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
accuracy_criterion:
relative: 0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml2():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
exit_policy:
max_trials: 5
accuracy_criterion:
relative: -0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_model():
try:
graph = tf.Graph()
graph_def = tf.GraphDef()
with tf.Session() as sess:
x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filter=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.global_variables_initializer())
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
except:
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filters=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.compat.v1.global_variables_initializer())
constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, [
'op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
class TestQuantization(unittest.TestCase):
@classmethod
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
build_fake_yaml2()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
shutil.rmtree("saved", ignore_errors=True)
def test_ru_random_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
def test_ru_random_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
if __name__ == "__main__":
unittest.main()
| [((619, 663), 'yaml.load', 'yaml.load', (['fake_yaml'], {'Loader': 'yaml.SafeLoader'}), '(fake_yaml, Loader=yaml.SafeLoader)\n', (628, 663), False, 'import yaml\n'), ((1296, 1340), 'yaml.load', 'yaml.load', (['fake_yaml'], {'Loader': 'yaml.SafeLoader'}), '(fake_yaml, Loader=yaml.SafeLoader)\n', (1305, 1340), False, 'import yaml\n'), ((4415, 4430), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4428, 4430), False, 'import unittest\n'), ((735, 750), 'yaml.dump', 'yaml.dump', (['y', 'f'], {}), '(y, f)\n', (744, 750), False, 'import yaml\n'), ((1413, 1428), 'yaml.dump', 'yaml.dump', (['y', 'f'], {}), '(y, f)\n', (1422, 1428), False, 'import yaml\n'), ((1500, 1510), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1508, 1510), True, 'import tensorflow as tf\n'), ((1532, 1545), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1543, 1545), True, 'import tensorflow as tf\n'), ((3374, 3401), 'os.remove', 'os.remove', (['"""fake_yaml.yaml"""'], {}), "('fake_yaml.yaml')\n", (3383, 3401), False, 'import os\n'), ((3411, 3439), 'os.remove', 'os.remove', (['"""fake_yaml2.yaml"""'], {}), "('fake_yaml2.yaml')\n", (3420, 3439), False, 'import os\n'), ((3451, 3493), 'shutil.rmtree', 'shutil.rmtree', (['"""saved"""'], {'ignore_errors': '(True)'}), "('saved', ignore_errors=True)\n", (3464, 3493), False, 'import shutil\n'), ((3633, 3663), 'neural_compressor.experimental.Quantization', 'Quantization', (['"""fake_yaml.yaml"""'], {}), "('fake_yaml.yaml')\n", (3645, 3663), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3776, 3802), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (3793, 3802), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3840, 3866), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (3857, 3866), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4075, 4106), 'neural_compressor.experimental.Quantization', 'Quantization', (['"""fake_yaml2.yaml"""'], {}), "('fake_yaml2.yaml')\n", (4087, 4106), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4219, 4245), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (4236, 4245), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4283, 4309), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (4300, 4309), False, 'from neural_compressor.experimental import Quantization, common\n'), ((1560, 1572), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1570, 1572), True, 'import tensorflow as tf\n'), ((1599, 1655), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '(1, 3, 3, 1)', 'name': '"""x"""'}), "(tf.float64, shape=(1, 3, 3, 1), name='x')\n", (1613, 1655), True, 'import tensorflow as tf\n'), ((1745, 1840), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filter': 'y', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""op_to_store"""'}), "(input=x, filter=y, strides=[1, 1, 1, 1], padding='VALID', name\n ='op_to_store')\n", (1757, 1840), True, 'import tensorflow as tf\n'), ((1957, 2045), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['op_to_store']"], {}), "(sess, sess.graph_def, [\n 'op_to_store'])\n", (2001, 2045), True, 'import tensorflow as tf\n'), ((2179, 2218), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (2198, 2218), True, 'import tensorflow as tf\n'), ((2249, 2259), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2257, 2259), True, 'import tensorflow as tf\n'), ((2281, 2304), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (2302, 2304), True, 'import tensorflow as tf\n'), ((1685, 1715), 'numpy.random.random', 'np.random.random', (['(2, 2, 1, 1)'], {}), '((2, 2, 1, 1))\n', (1701, 1715), True, 'import numpy as np\n'), ((1892, 1925), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1923, 1925), True, 'import tensorflow as tf\n'), ((2319, 2341), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2339, 2341), True, 'import tensorflow as tf\n'), ((2368, 2434), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float64'], {'shape': '(1, 3, 3, 1)', 'name': '"""x"""'}), "(tf.float64, shape=(1, 3, 3, 1), name='x')\n", (2392, 2434), True, 'import tensorflow as tf\n'), ((2534, 2629), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filters': 'y', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""op_to_store"""'}), "(input=x, filters=y, strides=[1, 1, 1, 1], padding='VALID',\n name='op_to_store')\n", (2546, 2629), True, 'import tensorflow as tf\n'), ((2757, 2854), 'tensorflow.compat.v1.graph_util.convert_variables_to_constants', 'tf.compat.v1.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['op_to_store']"], {}), "(sess, sess.graph_def,\n ['op_to_store'])\n", (2811, 2854), True, 'import tensorflow as tf\n'), ((3057, 3096), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (3076, 3096), True, 'import tensorflow as tf\n'), ((2474, 2504), 'numpy.random.random', 'np.random.random', (['(2, 2, 1, 1)'], {}), '((2, 2, 1, 1))\n', (2490, 2504), True, 'import numpy as np\n'), ((2682, 2725), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2723, 2725), True, 'import tensorflow as tf\n')] |
lilies/Cirq | cirq/google/engine/engine_client_test.py | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EngineClient."""
import datetime
from unittest import mock
import pytest
from google.api_core import exceptions
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf.timestamp_pb2 import Timestamp
from cirq.google.engine.engine_client import EngineClient, EngineException
from cirq.google.engine.client import quantum
from cirq.google.engine.client.quantum_v1alpha1 import enums as qenums
from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes
def setup_mock_(client_constructor):
grpc_client = mock.Mock()
client_constructor.return_value = grpc_client
return grpc_client
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.create_quantum_program.return_value = result
code = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_program('proj', 'prog', code, 'A program',
labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program',
labels=labels), False)
assert client.create_program('proj', 'prog', code,
'A program') == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program'), False)
assert client.create_program('proj', 'prog', code,
labels=labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
labels=labels), False)
assert client.create_program('proj', 'prog', code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code), False)
assert client.create_program('proj', program_id=None,
code=code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj', qtypes.QuantumProgram(code=code), False)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.get_quantum_program.return_value = result
client = EngineClient()
assert client.get_program('proj', 'prog', False) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert client.get_program('proj', 'prog', True) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProgram(name='projects/proj/programs/prog1'),
qtypes.QuantumProgram(name='projects/proj/programs/prog2')
]
grpc_client.list_quantum_programs.return_value = results
client = EngineClient()
assert client.list_programs(project_id='proj') == results
assert grpc_client.list_quantum_programs.call_args[0] == ('projects/proj',)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, created_after, created_before, labels',
[
('',
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:*',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters(client_constructor, expected_filter,
created_before, created_after, labels):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_programs(project_id='proj',
created_before=created_before,
created_after=created_after,
has_labels=labels)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters_invalid_type(client_constructor):
with pytest.raises(ValueError, match=""):
EngineClient().list_programs(project_id='proj',
created_before="Unsupported date/time")
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.set_program_description('proj', 'prog', 'A program') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
description='A program'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_program_description('proj', 'prog', '') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.return_value = qtypes.QuantumProgram(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_program_labels('proj', 'prog', labels) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_program_labels('proj', 'prog', {}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.add_program_labels('proj', 'prog',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.add_program_labels('proj', 'prog',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_program_labels('proj', 'prog', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.remove_program_labels('proj', 'prog', ['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_program_labels('proj', 'prog',
['hello', 'weather']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_program_labels('proj', 'prog',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_program('proj', 'prog')
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert not client.delete_program('proj', 'prog', delete_jobs=True)
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.create_quantum_job.return_value = result
run_context = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10, 'A job',
labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job',
labels=labels), False)
assert client.create_job(
'proj',
'prog',
'job0',
['processor0'],
run_context,
10,
'A job',
) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job'), False)
assert client.create_job('proj',
'prog',
'job0', ['processor0'],
run_context,
10,
labels=labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
labels=labels), False)
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
assert client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
with pytest.raises(ValueError, match='priority must be between 0 and 1000'):
client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=5000)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_job.return_value = result
client = EngineClient()
assert client.get_job('proj', 'prog', 'job0', False) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', False)
assert client.get_job('proj', 'prog', 'job0', True) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.set_job_description('proj', 'prog', 'job0', 'A job') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
description='A job'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_job_description('proj', 'prog', 'job0', '') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_job.return_value = qtypes.QuantumJob(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_job_labels('proj', 'prog', 'job0', labels) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_job_labels('proj', 'prog', 'job0', {}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.add_job_labels('proj', 'prog', 'job0',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_job.call_count == 0
assert client.add_job_labels('proj', 'prog', 'job0',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_job_labels('proj', 'prog', 'job0', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.remove_job_labels('proj', 'prog', 'job0',
['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_job_labels('proj', 'prog', 'job0',
['hello', 'weather']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_job_labels('proj', 'prog', 'job0',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_job('proj', 'prog', 'job0')
assert grpc_client.delete_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.cancel_job('proj', 'prog', 'job0')
assert grpc_client.cancel_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_job_results(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumResult(
parent='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_result.return_value = result
client = EngineClient()
assert client.get_job_results('proj', 'prog', 'job0') == result
assert grpc_client.get_quantum_result.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job1'),
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job2')
]
grpc_client.list_quantum_jobs.return_value = results
client = EngineClient()
assert client.list_jobs(project_id='proj', program_id='prog1') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/prog1',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
assert client.list_jobs(project_id='proj') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/-',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, '
'created_after, '
'created_before, '
'labels, '
'execution_states',
[
('',
None,
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None,
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None,
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
None
),
('(execution_status.state = FAILURE OR '
'execution_status.state = CANCELLED)',
None,
None,
None,
[quantum.enums.ExecutionStatus.State.FAILURE,
quantum.enums.ExecutionStatus.State.CANCELLED,]
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:* AND '
'(execution_status.state = SUCCESS)',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
[quantum.enums.ExecutionStatus.State.SUCCESS,],
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs_filters(client_constructor, expected_filter, created_before,
created_after, labels, execution_states):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_jobs(project_id='proj',
program_id='prog',
created_before=created_before,
created_after=created_after,
has_labels=labels,
execution_states=execution_states)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_processors(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProcessor(name='projects/proj/processor/processor0'),
qtypes.QuantumProcessor(name='projects/proj/processor/processor1')
]
grpc_client.list_quantum_processors.return_value = results
client = EngineClient()
assert client.list_processors('proj') == results
assert grpc_client.list_quantum_processors.call_args[0] == (
'projects/proj',)
assert grpc_client.list_quantum_processors.call_args[1] == {
'filter_': '',
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_processor(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProcessor(name='projects/proj/processors/processor0')
grpc_client.get_quantum_processor.return_value = result
client = EngineClient()
assert client.get_processor('proj', 'processor0') == result
assert grpc_client.get_quantum_processor.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_calibrations(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumCalibration(
name='projects/proj/processor/processor0/calibrations/123456'),
qtypes.QuantumCalibration(
name='projects/proj/processor/processor1/calibrations/224466')
]
grpc_client.list_quantum_calibrations.return_value = results
client = EngineClient()
assert client.list_calibrations('proj', 'processor0') == results
assert grpc_client.list_quantum_calibrations.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_calibration('proj', 'processor0', 123456) == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/123456',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_does_not_exist(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') is None
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_error(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_current_calibration('proj', 'processor0')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_doesnt_retry_not_found_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
with pytest.raises(EngineException, match='not found'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count == 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_retry_5xx_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.ServiceUnavailable(
'internal error')
client = EngineClient(max_retry_delay_seconds=1)
with pytest.raises(TimeoutError,
match='Reached max retry attempts.*internal error'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count > 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
start = datetime.datetime.fromtimestamp(1000000000)
end = datetime.datetime.fromtimestamp(1000003600)
users = ['[email protected]']
result = qtypes.QuantumReservation(
name='projects/proj/processors/processor0/reservations/papar-party-44',
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000003600),
whitelisted_users=users,
)
grpc_client.create_quantum_reservation.return_value = result
client = EngineClient()
assert client.create_reservation('proj', 'processor0', start, end,
users) == result
assert grpc_client.create_quantum_reservation.call_count == 1
kwargs = grpc_client.create_quantum_reservation.call_args[1]
# The outgoing argument will not have the resource name
result.name = ''
assert kwargs == {
'parent': 'projects/proj/processors/processor0',
'quantum_reservation': result
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['[email protected]'],
)
grpc_client.cancel_quantum_reservation.return_value = result
client = EngineClient()
assert (client.cancel_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.cancel_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['[email protected]'],
)
grpc_client.delete_quantum_reservation.return_value = result
client = EngineClient()
assert (client.delete_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.delete_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['[email protected]'],
)
grpc_client.get_quantum_reservation.return_value = result
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_not_found(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
grpc_client.get_quantum_reservation.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == None)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_exception(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_reservation.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_reservation('proj', 'processor0', 'goog')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
results = [
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['[email protected]'],
),
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1200000000),
end_time=Timestamp(seconds=1200002000),
whitelisted_users=['[email protected]'],
),
]
grpc_client.list_quantum_reservations.return_value = results
client = EngineClient()
assert (client.list_reservations('proj', 'processor0') == results)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000001000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['[email protected]'],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
start=datetime.datetime.fromtimestamp(1000001000),
end=datetime.datetime.fromtimestamp(1000002000),
whitelisted_users=['[email protected]'],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name':
name,
'quantum_reservation':
result,
'update_mask':
FieldMask(paths=['start_time', 'end_time', 'whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation_remove_all_users(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
whitelisted_users=[],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
whitelisted_users=[],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
'quantum_reservation': result,
'update_mask': FieldMask(paths=['whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_time_slots(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000020000),
end_time=Timestamp(seconds=1000040000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.MAINTENANCE,
maintenance_config=qtypes.QuantumTimeSlot.MaintenanceConfig(
title='Testing',
description='Testing some new configuration.',
),
),
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000010000),
end_time=Timestamp(seconds=1000020000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.RESERVATION,
reservation_config=qtypes.QuantumTimeSlot.ReservationConfig(
project_id='super_secret_quantum'),
)
]
grpc_client.list_quantum_time_slots.return_value = results
client = EngineClient()
assert (client.list_time_slots('proj', 'processor0') == results)
| [((1222, 1293), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (1239, 1293), False, 'from unittest import mock\n'), ((3338, 3409), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (3355, 3409), False, 'from unittest import mock\n'), ((4003, 4074), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (4020, 4074), False, 'from unittest import mock\n'), ((6020, 6091), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (6037, 6091), False, 'from unittest import mock\n'), ((6613, 6684), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (6630, 6684), False, 'from unittest import mock\n'), ((6931, 7002), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (6948, 7002), False, 'from unittest import mock\n'), ((7949, 8020), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (7966, 8020), False, 'from unittest import mock\n'), ((9317, 9388), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (9334, 9388), False, 'from unittest import mock\n'), ((11437, 11508), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (11454, 11508), False, 'from unittest import mock\n'), ((13165, 13236), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (13182, 13236), False, 'from unittest import mock\n'), ((13708, 13779), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (13725, 13779), False, 'from unittest import mock\n'), ((17933, 18004), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (17950, 18004), False, 'from unittest import mock\n'), ((18616, 18687), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (18633, 18687), False, 'from unittest import mock\n'), ((19652, 19723), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (19669, 19723), False, 'from unittest import mock\n'), ((21030, 21101), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (21047, 21101), False, 'from unittest import mock\n'), ((23104, 23175), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (23121, 23175), False, 'from unittest import mock\n'), ((24858, 24929), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (24875, 24929), False, 'from unittest import mock\n'), ((25220, 25291), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (25237, 25291), False, 'from unittest import mock\n'), ((25582, 25653), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (25599, 25653), False, 'from unittest import mock\n'), ((26105, 26176), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (26122, 26176), False, 'from unittest import mock\n'), ((28963, 29034), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (28980, 29034), False, 'from unittest import mock\n'), ((29644, 29715), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (29661, 29715), False, 'from unittest import mock\n'), ((30319, 30390), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (30336, 30390), False, 'from unittest import mock\n'), ((30834, 30905), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (30851, 30905), False, 'from unittest import mock\n'), ((31529, 31600), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (31546, 31600), False, 'from unittest import mock\n'), ((32111, 32182), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (32128, 32182), False, 'from unittest import mock\n'), ((32702, 32773), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (32719, 32773), False, 'from unittest import mock\n'), ((33228, 33299), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (33245, 33299), False, 'from unittest import mock\n'), ((33652, 33723), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (33669, 33723), False, 'from unittest import mock\n'), ((34131, 34202), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (34148, 34202), False, 'from unittest import mock\n'), ((34689, 34760), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (34706, 34760), False, 'from unittest import mock\n'), ((35817, 35888), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (35834, 35888), False, 'from unittest import mock\n'), ((36615, 36686), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (36632, 36686), False, 'from unittest import mock\n'), ((37413, 37484), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (37430, 37484), False, 'from unittest import mock\n'), ((38196, 38267), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (38213, 38267), False, 'from unittest import mock\n'), ((38811, 38882), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (38828, 38882), False, 'from unittest import mock\n'), ((39230, 39301), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (39247, 39301), False, 'from unittest import mock\n'), ((40121, 40192), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (40138, 40192), False, 'from unittest import mock\n'), ((41226, 41297), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (41243, 41297), False, 'from unittest import mock\n'), ((42050, 42121), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (42067, 42121), False, 'from unittest import mock\n'), ((1134, 1145), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1143, 1145), False, 'from unittest import mock\n'), ((1403, 1460), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (1424, 1460), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((1534, 1554), 'cirq.google.engine.client.quantum_v1alpha1.types.any_pb2.Any', 'qtypes.any_pb2.Any', ([], {}), '()\n', (1552, 1554), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((1600, 1614), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (1612, 1614), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((3516, 3573), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (3537, 3573), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((3646, 3660), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (3658, 3660), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((4401, 4415), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (4413, 4415), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((6292, 6306), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (6304, 6306), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((7121, 7178), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (7142, 7178), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7254, 7268), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (7266, 7268), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((8172, 8278), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (8193, 8278), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8351, 8408), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (8372, 8408), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8484, 8498), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (8496, 8498), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((9504, 9610), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (9525, 9610), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9747, 9804), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (9768, 9804), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9880, 9894), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (9892, 9894), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((11627, 11733), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (11648, 11733), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((11870, 11927), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (11891, 11927), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12003, 12017), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (12015, 12017), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((13346, 13360), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (13358, 13360), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((13885, 13948), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (13902, 13948), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((14025, 14045), 'cirq.google.engine.client.quantum_v1alpha1.types.any_pb2.Any', 'qtypes.any_pb2.Any', ([], {}), '()\n', (14043, 14045), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((14091, 14105), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (14103, 14105), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((18107, 18170), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (18124, 18170), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((18239, 18253), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (18251, 18253), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((18802, 18865), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (18819, 18865), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((18937, 18951), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (18949, 18951), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((19867, 19969), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (19884, 19969), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20042, 20105), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (20059, 20105), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20177, 20191), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (20189, 20191), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((21213, 21315), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (21230, 21315), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((21444, 21507), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (21461, 21507), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((21579, 21593), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (21591, 21593), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((23290, 23392), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (23307, 23392), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((23521, 23584), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (23538, 23584), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((23656, 23670), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (23668, 23670), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((25035, 25049), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (25047, 25049), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((25397, 25411), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (25409, 25411), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((25760, 25828), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumResult', 'qtypes.QuantumResult', ([], {'parent': '"""projects/proj/programs/prog/jobs/job0"""'}), "(parent='projects/proj/programs/prog/jobs/job0')\n", (25780, 25828), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((25909, 25923), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (25921, 25923), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((26508, 26522), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (26520, 26522), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((29247, 29261), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (29259, 29261), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((30063, 30077), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (30075, 30077), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((30499, 30566), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor', 'qtypes.QuantumProcessor', ([], {'name': '"""projects/proj/processors/processor0"""'}), "(name='projects/proj/processors/processor0')\n", (30522, 30566), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((30641, 30655), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (30653, 30655), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((31327, 31341), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (31339, 31341), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((31711, 31805), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processors/processor0/calibrations/123456"""'}), "(name=\n 'projects/proj/processors/processor0/calibrations/123456')\n", (31736, 31805), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((31886, 31900), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (31898, 31900), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((32301, 32395), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processors/processor0/calibrations/123456"""'}), "(name=\n 'projects/proj/processors/processor0/calibrations/123456')\n", (32326, 32395), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((32476, 32490), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (32488, 32490), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((32948, 32980), 'google.api_core.exceptions.NotFound', 'exceptions.NotFound', (['"""not found"""'], {}), "('not found')\n", (32967, 32980), False, 'from google.api_core import exceptions\n'), ((33004, 33018), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (33016, 33018), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((33465, 33494), 'google.api_core.exceptions.BadRequest', 'exceptions.BadRequest', (['"""boom"""'], {}), "('boom')\n", (33486, 33494), False, 'from google.api_core import exceptions\n'), ((33518, 33532), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (33530, 33532), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((33888, 33920), 'google.api_core.exceptions.NotFound', 'exceptions.NotFound', (['"""not found"""'], {}), "('not found')\n", (33907, 33920), False, 'from google.api_core import exceptions\n'), ((33944, 33958), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (33956, 33958), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((34354, 34401), 'google.api_core.exceptions.ServiceUnavailable', 'exceptions.ServiceUnavailable', (['"""internal error"""'], {}), "('internal error')\n", (34383, 34401), False, 'from google.api_core import exceptions\n'), ((34425, 34464), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {'max_retry_delay_seconds': '(1)'}), '(max_retry_delay_seconds=1)\n', (34437, 34464), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((34872, 34915), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000000000)'], {}), '(1000000000)\n', (34903, 34915), False, 'import datetime\n'), ((34926, 34969), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000003600)'], {}), '(1000003600)\n', (34957, 34969), False, 'import datetime\n'), ((35338, 35352), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (35350, 35352), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((36354, 36368), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (36366, 36368), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((37152, 37166), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (37164, 37166), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((37944, 37958), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (37956, 37958), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((38505, 38537), 'google.api_core.exceptions.NotFound', 'exceptions.NotFound', (['"""not found"""'], {}), "('not found')\n", (38524, 38537), False, 'from google.api_core import exceptions\n'), ((38561, 38575), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (38573, 38575), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((39043, 39072), 'google.api_core.exceptions.BadRequest', 'exceptions.BadRequest', (['"""boom"""'], {}), "('boom')\n", (39064, 39072), False, 'from google.api_core import exceptions\n'), ((39096, 39110), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (39108, 39110), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((40032, 40046), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (40044, 40046), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((40658, 40672), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (40670, 40672), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((41504, 41562), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumReservation', 'qtypes.QuantumReservation', ([], {'name': 'name', 'whitelisted_users': '[]'}), '(name=name, whitelisted_users=[])\n', (41529, 41562), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((41665, 41679), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (41677, 41679), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((43145, 43159), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (43157, 43159), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((4193, 4251), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog1"""'}), "(name='projects/proj/programs/prog1')\n", (4214, 4251), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((4261, 4319), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog2"""'}), "(name='projects/proj/programs/prog2')\n", (4282, 4319), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((6758, 6793), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""""""'}), "(ValueError, match='')\n", (6771, 6793), False, 'import pytest\n'), ((17603, 17673), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""priority must be between 0 and 1000"""'}), "(ValueError, match='priority must be between 0 and 1000')\n", (17616, 17673), False, 'import pytest\n'), ((26292, 26356), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog1/jobs/job1"""'}), "(name='projects/proj/programs/prog1/jobs/job1')\n", (26309, 26356), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((26366, 26430), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog1/jobs/job2"""'}), "(name='projects/proj/programs/prog1/jobs/job2')\n", (26383, 26430), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((29837, 29903), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor', 'qtypes.QuantumProcessor', ([], {'name': '"""projects/proj/processor/processor0"""'}), "(name='projects/proj/processor/processor0')\n", (29860, 29903), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((29913, 29979), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor', 'qtypes.QuantumProcessor', ([], {'name': '"""projects/proj/processor/processor1"""'}), "(name='projects/proj/processor/processor1')\n", (29936, 29979), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((31029, 31122), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processor/processor0/calibrations/123456"""'}), "(name=\n 'projects/proj/processor/processor0/calibrations/123456')\n", (31054, 31122), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((31140, 31233), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processor/processor1/calibrations/224466"""'}), "(name=\n 'projects/proj/processor/processor1/calibrations/224466')\n", (31165, 31233), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((33542, 33586), 'pytest.raises', 'pytest.raises', (['EngineException'], {'match': '"""boom"""'}), "(EngineException, match='boom')\n", (33555, 33586), False, 'import pytest\n'), ((33968, 34017), 'pytest.raises', 'pytest.raises', (['EngineException'], {'match': '"""not found"""'}), "(EngineException, match='not found')\n", (33981, 34017), False, 'import pytest\n'), ((34474, 34553), 'pytest.raises', 'pytest.raises', (['TimeoutError'], {'match': '"""Reached max retry attempts.*internal error"""'}), "(TimeoutError, match='Reached max retry attempts.*internal error')\n", (34487, 34553), False, 'import pytest\n'), ((39120, 39164), 'pytest.raises', 'pytest.raises', (['EngineException'], {'match': '"""boom"""'}), "(EngineException, match='boom')\n", (39133, 39164), False, 'import pytest\n'), ((1841, 1953), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code', 'description': '"""A program"""', 'labels': 'labels'}), "(name='projects/proj/programs/prog', code=code,\n description='A program', labels=labels)\n", (1862, 1953), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((2267, 2364), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code', 'description': '"""A program"""'}), "(name='projects/proj/programs/prog', code=code,\n description='A program')\n", (2288, 2364), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((2650, 2738), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code', 'labels': 'labels'}), "(name='projects/proj/programs/prog', code=code, labels\n =labels)\n", (2671, 2738), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((2975, 3043), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code'}), "(name='projects/proj/programs/prog', code=code)\n", (2996, 3043), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((3294, 3326), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'code': 'code'}), '(code=code)\n', (3315, 3326), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((4880, 4905), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (4893, 4905), False, 'import datetime\n'), ((4994, 5062), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(0)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)\n', (5011, 5062), False, 'import datetime\n'), ((5199, 5225), 'datetime.date', 'datetime.date', (['(2020)', '(10)', '(1)'], {}), '(2020, 10, 1)\n', (5212, 5225), False, 'import datetime\n'), ((5326, 5351), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (5339, 5351), False, 'import datetime\n'), ((5365, 5434), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(10)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 10, tzinfo=datetime.timezone.utc)\n', (5382, 5434), False, 'import datetime\n'), ((5804, 5829), 'datetime.date', 'datetime.date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (5817, 5829), False, 'import datetime\n'), ((5843, 5902), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, tzinfo=datetime.timezone.utc)\n', (5860, 5902), False, 'import datetime\n'), ((7461, 7548), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'description': '"""A program"""'}), "(name='projects/proj/programs/prog', description=\n 'A program')\n", (7482, 7548), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7583, 7637), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (7614, 7637), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7823, 7880), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (7844, 7880), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7890, 7944), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (7921, 7944), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8742, 8844), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': 'labels', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels=labels,\n label_fingerprint='hash')\n", (8763, 8844), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8910, 8959), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (8941, 8959), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9140, 9228), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', label_fingerprint\n ='hash')\n", (9161, 9228), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9263, 9312), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (9294, 9312), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((10309, 10473), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': "{'color': 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels={'color':\n 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (10330, 10473), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((10703, 10752), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (10734, 10752), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((10988, 11153), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': "{'color': 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels={'color':\n 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (11009, 11153), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((11383, 11432), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (11414, 11432), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12399, 12523), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': "{'color': 'red', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels={'color':\n 'red', 'run': '1'}, label_fingerprint='hash')\n", (12420, 12523), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12690, 12739), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (12721, 12739), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12988, 13076), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', label_fingerprint\n ='hash')\n", (13009, 13076), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((13111, 13160), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (13142, 13160), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19150, 19239), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'description': '"""A job"""'}), "(name='projects/proj/programs/prog/jobs/job0', description\n ='A job')\n", (19167, 19239), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19270, 19324), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (19301, 19324), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19520, 19583), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (19537, 19583), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19593, 19647), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (19624, 19647), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20445, 20554), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': 'labels', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels=\n labels, label_fingerprint='hash')\n", (20462, 20554), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20611, 20660), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (20642, 20660), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20851, 20944), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0',\n label_fingerprint='hash')\n", (20868, 20944), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20976, 21025), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (21007, 21025), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((22010, 22181), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': "{'color': 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels={\n 'color': 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (22027, 22181), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((22382, 22431), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (22413, 22431), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((22677, 22849), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': "{'color': 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels={\n 'color': 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (22694, 22849), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((23050, 23099), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (23081, 23099), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24098, 24229), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': "{'color': 'red', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels={\n 'color': 'red', 'run': '1'}, label_fingerprint='hash')\n", (24115, 24229), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24375, 24424), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (24406, 24424), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24679, 24772), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0',\n label_fingerprint='hash')\n", (24696, 24772), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24804, 24853), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (24835, 24853), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((27342, 27367), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (27355, 27367), False, 'import datetime\n'), ((27474, 27542), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(0)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)\n', (27491, 27542), False, 'import datetime\n'), ((27697, 27723), 'datetime.date', 'datetime.date', (['(2020)', '(10)', '(1)'], {}), '(2020, 10, 1)\n', (27710, 27723), False, 'import datetime\n'), ((27842, 27867), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (27855, 27867), False, 'import datetime\n'), ((27881, 27950), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(10)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 10, tzinfo=datetime.timezone.utc)\n', (27898, 27950), False, 'import datetime\n'), ((28687, 28712), 'datetime.date', 'datetime.date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (28700, 28712), False, 'import datetime\n'), ((28726, 28785), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, tzinfo=datetime.timezone.utc)\n', (28743, 28785), False, 'import datetime\n'), ((35141, 35170), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (35150, 35170), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((35189, 35218), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000003600)'}), '(seconds=1000003600)\n', (35198, 35218), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36143, 36172), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (36152, 36172), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36191, 36220), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (36200, 36220), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36941, 36970), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (36950, 36970), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36989, 37018), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (36998, 37018), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((37736, 37765), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (37745, 37765), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((37784, 37813), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (37793, 37813), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((40447, 40476), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000001000)'}), '(seconds=1000001000)\n', (40456, 40476), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((40495, 40524), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (40504, 40524), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((41152, 41216), 'google.protobuf.field_mask_pb2.FieldMask', 'FieldMask', ([], {'paths': "['start_time', 'end_time', 'whitelisted_users']"}), "(paths=['start_time', 'end_time', 'whitelisted_users'])\n", (41161, 41216), False, 'from google.protobuf.field_mask_pb2 import FieldMask\n'), ((42002, 42040), 'google.protobuf.field_mask_pb2.FieldMask', 'FieldMask', ([], {'paths': "['whitelisted_users']"}), "(paths=['whitelisted_users'])\n", (42011, 42040), False, 'from google.protobuf.field_mask_pb2 import FieldMask\n'), ((6803, 6817), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (6815, 6817), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((39573, 39602), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (39582, 39602), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((39625, 39654), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (39634, 39654), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((39799, 39828), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1200000000)'}), '(seconds=1200000000)\n', (39808, 39828), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((39851, 39880), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1200002000)'}), '(seconds=1200002000)\n', (39860, 39880), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((40790, 40833), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000001000)'], {}), '(1000001000)\n', (40821, 40833), False, 'import datetime\n'), ((40847, 40890), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000002000)'], {}), '(1000002000)\n', (40878, 40890), False, 'import datetime\n'), ((42329, 42358), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000020000)'}), '(seconds=1000020000)\n', (42338, 42358), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42381, 42410), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000040000)'}), '(seconds=1000040000)\n', (42390, 42410), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42514, 42623), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot.MaintenanceConfig', 'qtypes.QuantumTimeSlot.MaintenanceConfig', ([], {'title': '"""Testing"""', 'description': '"""Testing some new configuration."""'}), "(title='Testing', description=\n 'Testing some new configuration.')\n", (42554, 42623), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((42773, 42802), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000010000)'}), '(seconds=1000010000)\n', (42782, 42802), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42825, 42854), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000020000)'}), '(seconds=1000020000)\n', (42834, 42854), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42958, 43033), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot.ReservationConfig', 'qtypes.QuantumTimeSlot.ReservationConfig', ([], {'project_id': '"""super_secret_quantum"""'}), "(project_id='super_secret_quantum')\n", (42998, 43033), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((14627, 14730), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (14668, 14730), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((15336, 15439), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (15377, 15439), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((16106, 16209), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (16147, 16209), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((16739, 16842), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (16780, 16842), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((17453, 17556), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (17494, 17556), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n')] |
jwygoda/google-ads-python | google/ads/google_ads/v0/proto/services/media_file_service_pb2_grpc.py | 863892b533240cb45269d9c2cceec47e2c5a8b68 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v0.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v0.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2
class MediaFileServiceStub(object):
"""Service to manage media files.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMediaFile = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/GetMediaFile',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.FromString,
)
self.MutateMediaFiles = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/MutateMediaFiles',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.FromString,
)
class MediaFileServiceServicer(object):
"""Service to manage media files.
"""
def GetMediaFile(self, request, context):
"""Returns the requested media file in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateMediaFiles(self, request, context):
"""Creates media files. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [((2991, 3106), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""google.ads.googleads.v0.services.MediaFileService"""', 'rpc_method_handlers'], {}), "(\n 'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)\n", (3027, 3106), False, 'import grpc\n'), ((2180, 2528), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetMediaFile'], {'request_deserializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString', 'response_serializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString'}), '(servicer.GetMediaFile,\n request_deserializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2\n .GetMediaFileRequest.FromString, response_serializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2\n .MediaFile.SerializeToString)\n', (2215, 2528), False, 'import grpc\n'), ((2571, 2950), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.MutateMediaFiles'], {'request_deserializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString', 'response_serializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString'}), '(servicer.MutateMediaFiles,\n request_deserializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2\n .MutateMediaFilesRequest.FromString, response_serializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2\n .MutateMediaFilesResponse.SerializeToString)\n', (2606, 2950), False, 'import grpc\n')] |
KhaledSharif/kornia | docs/generate_example_images.py | 9bae28e032b092b065658117723a82816d09dbac | import importlib
import math
import os
from pathlib import Path
from typing import Optional, Tuple
import cv2
import numpy as np
import requests
import torch
import kornia as K
def read_img_from_url(url: str, resize_to: Optional[Tuple[int, int]] = None) -> torch.Tensor:
# perform request
response = requests.get(url).content
# convert to array of ints
nparr = np.frombuffer(response, np.uint8)
# convert to image array and resize
img: np.ndarray = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)[..., :3]
# convert the image to a tensor
img_t: torch.Tensor = K.utils.image_to_tensor(img, keepdim=False) # 1xCxHXW
img_t = img_t.float() / 255.0
if resize_to is None:
img_t = K.geometry.resize(img_t, 184)
else:
img_t = K.geometry.resize(img_t, resize_to)
return img_t
def main():
# load the images
BASE_IMAGE_URL1: str = "https://raw.githubusercontent.com/kornia/data/main/panda.jpg" # augmentation
BASE_IMAGE_URL2: str = "https://raw.githubusercontent.com/kornia/data/main/simba.png" # color
BASE_IMAGE_URL3: str = "https://raw.githubusercontent.com/kornia/data/main/girona.png" # enhance
BASE_IMAGE_URL4: str = "https://raw.githubusercontent.com/kornia/data/main/baby_giraffe.png" # morphology
BASE_IMAGE_URL5: str = "https://raw.githubusercontent.com/kornia/data/main/persistencia_memoria.jpg" # filters
BASE_IMAGE_URL6: str = "https://raw.githubusercontent.com/kornia/data/main/delorean.png" # geometry
OUTPUT_PATH = Path(__file__).absolute().parent / "source/_static/img"
os.makedirs(OUTPUT_PATH, exist_ok=True)
print(f"Pointing images to path {OUTPUT_PATH}.")
img1 = read_img_from_url(BASE_IMAGE_URL1)
img2 = read_img_from_url(BASE_IMAGE_URL2, img1.shape[-2:])
img3 = read_img_from_url(BASE_IMAGE_URL3, img1.shape[-2:])
img4 = read_img_from_url(BASE_IMAGE_URL4)
img5 = read_img_from_url(BASE_IMAGE_URL5, (234, 320))
img6 = read_img_from_url(BASE_IMAGE_URL6)
# TODO: make this more generic for modules out of kornia.augmentation
# Dictionary containing the transforms to generate the sample images:
# Key: Name of the transform class.
# Value: (parameters, num_samples, seed)
mod = importlib.import_module("kornia.augmentation")
augmentations_list: dict = {
"CenterCrop": ((184, 184), 1, 2018),
"ColorJitter": ((0.3, 0.3, 0.3, 0.3), 2, 2018),
"RandomAffine": (((-15.0, 20.0), (0.1, 0.1), (0.7, 1.3), 20), 2, 2019),
"RandomBoxBlur": (((7, 7),), 1, 2020),
"RandomCrop": ((img1.shape[-2:], (50, 50)), 2, 2020),
"RandomChannelShuffle": ((), 1, 2020),
"RandomElasticTransform": (((63, 63), (32, 32), (2.0, 2.0)), 2, 2018),
"RandomEqualize": ((), 1, 2020),
"RandomErasing": (((0.2, 0.4), (0.3, 1 / 0.3)), 2, 2017),
"RandomFisheye": ((torch.tensor([-0.3, 0.3]), torch.tensor([-0.3, 0.3]), torch.tensor([0.9, 1.0])), 2, 2020),
"RandomGaussianBlur": (((3, 3), (0.1, 2.0)), 1, 2020),
"RandomGaussianNoise": ((0.0, 0.05), 1, 2020),
"RandomGrayscale": ((), 1, 2020),
"RandomHorizontalFlip": ((), 1, 2020),
"RandomInvert": ((), 1, 2020),
"RandomMotionBlur": ((7, 35.0, 0.5), 2, 2020),
"RandomPerspective": ((0.2,), 2, 2020),
"RandomPlanckianJitter": ((), 2, 2022),
"RandomPosterize": (((1, 4),), 2, 2016),
"RandomResizedCrop": ((img1.shape[-2:], (1.0, 2.0), (1.0, 2.0)), 2, 2020),
"RandomRotation": ((45.0,), 2, 2019),
"RandomSharpness": ((16.0,), 1, 2019),
"RandomSolarize": ((0.2, 0.2), 2, 2019),
"RandomVerticalFlip": ((), 1, 2020),
"RandomThinPlateSpline": ((), 1, 2020),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in augmentations_list.items():
img_in = img1.repeat(num_samples, 1, 1, 1)
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out = aug(img_in)
if aug_name == "CenterCrop":
h, w = img1.shape[-2:]
h_new, w_new = out.shape[-2:]
h_dif, w_dif = int(h - h_new), int(w - w_new)
out = torch.nn.functional.pad(out, (w_dif // 2, w_dif // 2, 0, h_dif))
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.augmentation")
mix_augmentations_list: dict = {
"RandomMixUp": (((0.3, 0.4),), 2, 20),
"RandomCutMix": ((img1.shape[-2], img1.shape[-1]), 2, 2019),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in mix_augmentations_list.items():
img_in = torch.cat([img1, img2])
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out, _ = aug(img_in, torch.tensor([0, 1]))
out = torch.cat([img_in[0], img_in[1], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.color")
color_transforms_list: dict = {
"grayscale_to_rgb": ((), 3),
"rgb_to_bgr": ((), 1),
"rgb_to_grayscale": ((), 1),
"rgb_to_hsv": ((), 1),
"rgb_to_hls": ((), 1),
"rgb_to_luv": ((), 1),
"rgb_to_lab": ((), 1),
# "rgb_to_rgba": ((1.,), 1),
"rgb_to_xyz": ((), 1),
"rgb_to_ycbcr": ((), 1),
"rgb_to_yuv": ((), 1),
"rgb_to_linear_rgb": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in color_transforms_list.items():
# import function and apply
fn = getattr(mod, fn_name)
if fn_name == "grayscale_to_rgb":
out = fn(K.color.rgb_to_grayscale(img2), *args)
else:
out = fn(img2, *args)
# perform normalization to visualize
if fn_name == "rgb_to_lab":
out = out[:, :1] / 100.0
elif fn_name == "rgb_to_hsv":
out[:, :1] = out[:, :1] / 2 * math.pi
elif fn_name == "rgb_to_luv":
out = out[:, :1] / 116.0
# repeat channels for grayscale
if out.shape[1] != 3:
out = out.repeat(1, 3, 1, 1)
# save the output image
if fn_name == "grayscale_to_rgb":
out = torch.cat(
[K.color.rgb_to_grayscale(img2[0]).repeat(3, 1, 1), *(out[i] for i in range(out.size(0)))], dim=-1
)
else:
out = torch.cat([img2[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.enhance module
mod = importlib.import_module("kornia.enhance")
transforms: dict = {
"adjust_brightness": ((torch.tensor([0.25, 0.5]),), 2),
"adjust_contrast": ((torch.tensor([0.65, 0.5]),), 2),
"adjust_gamma": ((torch.tensor([0.85, 0.75]), 2.0), 2),
"adjust_hue": ((torch.tensor([-math.pi / 4, math.pi / 4]),), 2),
"adjust_saturation": ((torch.tensor([1.0, 2.0]),), 2),
"solarize": ((torch.tensor([0.8, 0.5]), torch.tensor([-0.25, 0.25])), 2),
"posterize": ((torch.tensor([4, 2]),), 2),
"sharpness": ((torch.tensor([1.0, 2.5]),), 2),
"equalize": ((), 1),
"invert": ((), 1),
"equalize_clahe": ((), 1),
"add_weighted": ((0.75, 0.25, 2.0), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img3.repeat(num_samples, 1, 1, 1)
if fn_name == "add_weighted":
args_in = (img_in, args[0], img2, args[1], args[2])
else:
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.morphology module
mod = importlib.import_module("kornia.morphology")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"dilation": ((kernel,), 1),
"erosion": ((kernel,), 1),
"opening": ((kernel,), 1),
"closing": ((kernel,), 1),
"gradient": ((kernel,), 1),
"top_hat": ((kernel,), 1),
"bottom_hat": ((kernel,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img4.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
# import pdb;pdb.set_trace()
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.filters module
mod = importlib.import_module("kornia.filters")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"box_blur": (((5, 5),), 1),
"median_blur": (((5, 5),), 1),
"gaussian_blur2d": (((5, 5), (1.5, 1.5)), 1),
"motion_blur": ((5, 90.0, 1.0), 1),
"max_blur_pool2d": ((5,), 1),
"blur_pool2d": ((5,), 1),
"unsharp_mask": (((5, 5), (1.5, 1.5)), 1),
"laplacian": ((5,), 1),
"sobel": ((), 1),
"spatial_gradient": ((), 1),
"canny": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img5.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("max_blur_pool2d", "blur_pool2d"):
out = K.geometry.resize(out, img_in.shape[-2:])
if fn_name == "canny":
out = out[1].repeat(1, 3, 1, 1)
if isinstance(out, torch.Tensor):
out = out.clamp(min=0.0, max=1.0)
if fn_name in ("laplacian", "sobel", "spatial_gradient", "canny"):
out = K.enhance.normalize_min_max(out)
if fn_name == "spatial_gradient":
out = out.permute(2, 1, 0, 3, 4).squeeze()
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.geometry.transform module
mod = importlib.import_module("kornia.geometry.transform")
h, w = img6.shape[-2:]
def _get_tps_args():
src = torch.tensor([[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0], [0.0, 0.0]]]).repeat(2, 1, 1) # Bx5x2
dst = src + torch.distributions.Uniform(-0.2, 0.2).rsample((2, 5, 2))
kernel, affine = K.geometry.transform.get_tps_transform(dst, src)
return src, kernel, affine
transforms: dict = {
"warp_affine": (
(
K.geometry.transform.get_affine_matrix2d(
translations=torch.zeros(2, 2),
center=(torch.tensor([w, h]) / 2).repeat(2, 1),
scale=torch.distributions.Uniform(0.5, 1.5).rsample((2, 2)),
angle=torch.tensor([-25.0, 25.0]),
)[:, :2, :3],
(h, w),
),
2,
),
"remap": (
(
*(K.utils.create_meshgrid(h, w, normalized_coordinates=True) - 0.25).unbind(-1),
'bilinear',
'zeros',
True,
True,
),
1,
),
"warp_image_tps": ((_get_tps_args()), 2),
"rotate": ((torch.tensor([-15.0, 25.0]),), 2),
"translate": ((torch.tensor([[10.0, -15], [50.0, -25.0]]),), 2),
"scale": ((torch.tensor([[0.5, 1.25], [1.0, 1.5]]),), 2),
"shear": ((torch.tensor([[0.1, -0.2], [-0.2, 0.1]]),), 2),
"rot180": ((), 1),
"hflip": ((), 1),
"vflip": ((), 1),
"resize": (((120, 220),), 1),
"rescale": ((0.5,), 1),
"elastic_transform2d": ((torch.rand(1, 2, h, w) * 2 - 1, (63, 63), (32, 32), (4.0, 4.0)), 1),
"pyrdown": ((), 1),
"pyrup": ((), 1),
"build_pyramid": ((3,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img6.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("resize", "rescale", "pyrdown", "pyrup"):
h_new, w_new = out.shape[-2:]
out = torch.nn.functional.pad(out, (0, (w - w_new), 0, (h - h_new)))
if fn_name == "build_pyramid":
_out = []
for pyr in out[1:]:
h_new, w_new = pyr.shape[-2:]
out_tmp = torch.nn.functional.pad(pyr, (0, (w - w_new), 0, (h - h_new)))
_out.append(out_tmp)
out = torch.cat(_out)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
if __name__ == "__main__":
main()
| [((381, 414), 'numpy.frombuffer', 'np.frombuffer', (['response', 'np.uint8'], {}), '(response, np.uint8)\n', (394, 414), True, 'import numpy as np\n'), ((590, 633), 'kornia.utils.image_to_tensor', 'K.utils.image_to_tensor', (['img'], {'keepdim': '(False)'}), '(img, keepdim=False)\n', (613, 633), True, 'import kornia as K\n'), ((1584, 1623), 'os.makedirs', 'os.makedirs', (['OUTPUT_PATH'], {'exist_ok': '(True)'}), '(OUTPUT_PATH, exist_ok=True)\n', (1595, 1623), False, 'import os\n'), ((2243, 2289), 'importlib.import_module', 'importlib.import_module', (['"""kornia.augmentation"""'], {}), "('kornia.augmentation')\n", (2266, 2289), False, 'import importlib\n'), ((4795, 4841), 'importlib.import_module', 'importlib.import_module', (['"""kornia.augmentation"""'], {}), "('kornia.augmentation')\n", (4818, 4841), False, 'import importlib\n'), ((5834, 5873), 'importlib.import_module', 'importlib.import_module', (['"""kornia.color"""'], {}), "('kornia.color')\n", (5857, 5873), False, 'import importlib\n'), ((7658, 7699), 'importlib.import_module', 'importlib.import_module', (['"""kornia.enhance"""'], {}), "('kornia.enhance')\n", (7681, 7699), False, 'import importlib\n'), ((9193, 9237), 'importlib.import_module', 'importlib.import_module', (['"""kornia.morphology"""'], {}), "('kornia.morphology')\n", (9216, 9237), False, 'import importlib\n'), ((9251, 9298), 'torch.tensor', 'torch.tensor', (['[[0, 1, 0], [1, 1, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 1], [0, 1, 0]])\n', (9263, 9298), False, 'import torch\n'), ((10303, 10344), 'importlib.import_module', 'importlib.import_module', (['"""kornia.filters"""'], {}), "('kornia.filters')\n", (10326, 10344), False, 'import importlib\n'), ((10358, 10405), 'torch.tensor', 'torch.tensor', (['[[0, 1, 0], [1, 1, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 1], [0, 1, 0]])\n', (10370, 10405), False, 'import torch\n'), ((12055, 12107), 'importlib.import_module', 'importlib.import_module', (['"""kornia.geometry.transform"""'], {}), "('kornia.geometry.transform')\n", (12078, 12107), False, 'import importlib\n'), ((312, 329), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (324, 329), False, 'import requests\n'), ((477, 518), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_UNCHANGED'], {}), '(nparr, cv2.IMREAD_UNCHANGED)\n', (489, 518), False, 'import cv2\n'), ((721, 750), 'kornia.geometry.resize', 'K.geometry.resize', (['img_t', '(184)'], {}), '(img_t, 184)\n', (738, 750), True, 'import kornia as K\n'), ((777, 812), 'kornia.geometry.resize', 'K.geometry.resize', (['img_t', 'resize_to'], {}), '(img_t, resize_to)\n', (794, 812), True, 'import kornia as K\n'), ((4039, 4062), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4056, 4062), False, 'import torch\n'), ((5131, 5154), 'torch.cat', 'torch.cat', (['[img1, img2]'], {}), '([img1, img2])\n', (5140, 5154), False, 'import torch\n'), ((5299, 5322), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (5316, 5322), False, 'import torch\n'), ((12385, 12433), 'kornia.geometry.transform.get_tps_transform', 'K.geometry.transform.get_tps_transform', (['dst', 'src'], {}), '(dst, src)\n', (12423, 12433), True, 'import kornia as K\n'), ((4337, 4401), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['out', '(w_dif // 2, w_dif // 2, 0, h_dif)'], {}), '(out, (w_dif // 2, w_dif // 2, 0, h_dif))\n', (4360, 4401), False, 'import torch\n'), ((5409, 5429), 'torch.tensor', 'torch.tensor', (['[0, 1]'], {}), '([0, 1])\n', (5421, 5429), False, 'import torch\n'), ((11207, 11248), 'kornia.geometry.resize', 'K.geometry.resize', (['out', 'img_in.shape[-2:]'], {}), '(out, img_in.shape[-2:])\n', (11224, 11248), True, 'import kornia as K\n'), ((11505, 11537), 'kornia.enhance.normalize_min_max', 'K.enhance.normalize_min_max', (['out'], {}), '(out)\n', (11532, 11537), True, 'import kornia as K\n'), ((14271, 14329), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['out', '(0, w - w_new, 0, h - h_new)'], {}), '(out, (0, w - w_new, 0, h - h_new))\n', (14294, 14329), False, 'import torch\n'), ((14617, 14632), 'torch.cat', 'torch.cat', (['_out'], {}), '(_out)\n', (14626, 14632), False, 'import torch\n'), ((2873, 2898), 'torch.tensor', 'torch.tensor', (['[-0.3, 0.3]'], {}), '([-0.3, 0.3])\n', (2885, 2898), False, 'import torch\n'), ((2900, 2925), 'torch.tensor', 'torch.tensor', (['[-0.3, 0.3]'], {}), '([-0.3, 0.3])\n', (2912, 2925), False, 'import torch\n'), ((2927, 2951), 'torch.tensor', 'torch.tensor', (['[0.9, 1.0]'], {}), '([0.9, 1.0])\n', (2939, 2951), False, 'import torch\n'), ((6554, 6584), 'kornia.color.rgb_to_grayscale', 'K.color.rgb_to_grayscale', (['img2'], {}), '(img2)\n', (6578, 6584), True, 'import kornia as K\n'), ((7756, 7781), 'torch.tensor', 'torch.tensor', (['[0.25, 0.5]'], {}), '([0.25, 0.5])\n', (7768, 7781), False, 'import torch\n'), ((7818, 7843), 'torch.tensor', 'torch.tensor', (['[0.65, 0.5]'], {}), '([0.65, 0.5])\n', (7830, 7843), False, 'import torch\n'), ((7877, 7903), 'torch.tensor', 'torch.tensor', (['[0.85, 0.75]'], {}), '([0.85, 0.75])\n', (7889, 7903), False, 'import torch\n'), ((7939, 7980), 'torch.tensor', 'torch.tensor', (['[-math.pi / 4, math.pi / 4]'], {}), '([-math.pi / 4, math.pi / 4])\n', (7951, 7980), False, 'import torch\n'), ((8019, 8043), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (8031, 8043), False, 'import torch\n'), ((8073, 8097), 'torch.tensor', 'torch.tensor', (['[0.8, 0.5]'], {}), '([0.8, 0.5])\n', (8085, 8097), False, 'import torch\n'), ((8099, 8126), 'torch.tensor', 'torch.tensor', (['[-0.25, 0.25]'], {}), '([-0.25, 0.25])\n', (8111, 8126), False, 'import torch\n'), ((8156, 8176), 'torch.tensor', 'torch.tensor', (['[4, 2]'], {}), '([4, 2])\n', (8168, 8176), False, 'import torch\n'), ((8207, 8231), 'torch.tensor', 'torch.tensor', (['[1.0, 2.5]'], {}), '([1.0, 2.5])\n', (8219, 8231), False, 'import torch\n'), ((12175, 12261), 'torch.tensor', 'torch.tensor', (['[[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0], [0.0, 0.0]]]'], {}), '([[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0], [0.0, \n 0.0]]])\n', (12187, 12261), False, 'import torch\n'), ((13281, 13308), 'torch.tensor', 'torch.tensor', (['[-15.0, 25.0]'], {}), '([-15.0, 25.0])\n', (13293, 13308), False, 'import torch\n'), ((13339, 13381), 'torch.tensor', 'torch.tensor', (['[[10.0, -15], [50.0, -25.0]]'], {}), '([[10.0, -15], [50.0, -25.0]])\n', (13351, 13381), False, 'import torch\n'), ((13408, 13447), 'torch.tensor', 'torch.tensor', (['[[0.5, 1.25], [1.0, 1.5]]'], {}), '([[0.5, 1.25], [1.0, 1.5]])\n', (13420, 13447), False, 'import torch\n'), ((13474, 13514), 'torch.tensor', 'torch.tensor', (['[[0.1, -0.2], [-0.2, 0.1]]'], {}), '([[0.1, -0.2], [-0.2, 0.1]])\n', (13486, 13514), False, 'import torch\n'), ((14499, 14557), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['pyr', '(0, w - w_new, 0, h - h_new)'], {}), '(pyr, (0, w - w_new, 0, h - h_new))\n', (14522, 14557), False, 'import torch\n'), ((1523, 1537), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1527, 1537), False, 'from pathlib import Path\n'), ((12302, 12340), 'torch.distributions.Uniform', 'torch.distributions.Uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (12329, 12340), False, 'import torch\n'), ((13704, 13726), 'torch.rand', 'torch.rand', (['(1)', '(2)', 'h', 'w'], {}), '(1, 2, h, w)\n', (13714, 13726), False, 'import torch\n'), ((7153, 7186), 'kornia.color.rgb_to_grayscale', 'K.color.rgb_to_grayscale', (['img2[0]'], {}), '(img2[0])\n', (7177, 7186), True, 'import kornia as K\n'), ((12625, 12642), 'torch.zeros', 'torch.zeros', (['(2)', '(2)'], {}), '(2, 2)\n', (12636, 12642), False, 'import torch\n'), ((12819, 12846), 'torch.tensor', 'torch.tensor', (['[-25.0, 25.0]'], {}), '([-25.0, 25.0])\n', (12831, 12846), False, 'import torch\n'), ((12994, 13052), 'kornia.utils.create_meshgrid', 'K.utils.create_meshgrid', (['h', 'w'], {'normalized_coordinates': '(True)'}), '(h, w, normalized_coordinates=True)\n', (13017, 13052), True, 'import kornia as K\n'), ((12738, 12775), 'torch.distributions.Uniform', 'torch.distributions.Uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (12765, 12775), False, 'import torch\n'), ((12672, 12692), 'torch.tensor', 'torch.tensor', (['[w, h]'], {}), '([w, h])\n', (12684, 12692), False, 'import torch\n')] |
Pushkar-Bhuse/forte | forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py | b7402330cf0b2b26fe56234f0ae43c89b31c0082 | # Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Tuple
import numpy as np
from texar.torch.data import Vocab, Embedding
from ft.onto.base_ontology import Annotation
from forte.common.configuration import Config
from forte.processors.data_augment.algorithms.text_replacement_op import (
TextReplacementOp,
)
__all__ = [
"EmbeddingSimilarityReplacementOp",
]
class EmbeddingSimilarityReplacementOp(TextReplacementOp):
r"""
This class is a replacement op leveraging pre-trained word
embeddings, such as `word2vec` and `glove`, to replace the input
word with another word with similar word embedding.
By default, the replacement word is randomly chosen from the
top k words with the most similar embeddings.
Args:
configs:
The config should contain the following key-value pairs:
- vocab_path (str): The absolute path to the vocabulary file for
the pretrained embeddings
- embed_hparams (dict): The hparams to initialize the
texar.torch.data.Embedding object.
- top_k (int): the number of k most similar words to choose from
"""
def __init__(self, configs: Config):
super().__init__(configs)
self.vocab = Vocab(self.configs["vocab_path"])
embed_hparams = self.configs["embed_hparams"]
embedding = Embedding(self.vocab.token_to_id_map_py, embed_hparams)
self.normalized_vectors = (
embedding.word_vecs
/ np.sqrt((embedding.word_vecs**2).sum(axis=1))[:, np.newaxis]
)
def replace(self, input_anno: Annotation) -> Tuple[bool, str]:
r"""
This function replaces a word words with similar
pretrained embeddings.
Args:
input_anno (Annotation): The input annotation.
Returns:
A tuple of two values, where the first element is a boolean value
indicating whether the replacement happens, and the second
element is the replaced word.
"""
word = input_anno.text
if word not in self.vocab.token_to_id_map_py:
return False, word
source_id = self.vocab.token_to_id_map_py[word]
source_vector = self.normalized_vectors[source_id]
scores = np.dot(self.normalized_vectors, source_vector)
target_ids = np.argpartition(-scores, self.configs["top_k"] + 1)[
: self.configs["top_k"] + 1
]
target_words = [
self.vocab.id_to_token_map_py[idx]
for idx in target_ids
if idx != source_id
and self.vocab.id_to_token_map_py[idx].lower() != word.lower()
]
return True, random.choice(target_words)
| [((1846, 1879), 'texar.torch.data.Vocab', 'Vocab', (["self.configs['vocab_path']"], {}), "(self.configs['vocab_path'])\n", (1851, 1879), False, 'from texar.torch.data import Vocab, Embedding\n'), ((1954, 2009), 'texar.torch.data.Embedding', 'Embedding', (['self.vocab.token_to_id_map_py', 'embed_hparams'], {}), '(self.vocab.token_to_id_map_py, embed_hparams)\n', (1963, 2009), False, 'from texar.torch.data import Vocab, Embedding\n'), ((2875, 2921), 'numpy.dot', 'np.dot', (['self.normalized_vectors', 'source_vector'], {}), '(self.normalized_vectors, source_vector)\n', (2881, 2921), True, 'import numpy as np\n'), ((2943, 2994), 'numpy.argpartition', 'np.argpartition', (['(-scores)', "(self.configs['top_k'] + 1)"], {}), "(-scores, self.configs['top_k'] + 1)\n", (2958, 2994), True, 'import numpy as np\n'), ((3290, 3317), 'random.choice', 'random.choice', (['target_words'], {}), '(target_words)\n', (3303, 3317), False, 'import random\n')] |
FelixSchwarz/sentry | src/sentry/receivers/experiments.py | 7c92c4fa2b6b9f214764f48c82594acae1549e52 | from __future__ import print_function, absolute_import
from sentry import analytics
from sentry.signals import join_request_created, join_request_link_viewed
@join_request_created.connect(weak=False)
def record_join_request_created(member, **kwargs):
analytics.record(
"join_request.created", member_id=member.id, organization_id=member.organization_id
)
@join_request_link_viewed.connect(weak=False)
def record_join_request_link_viewed(organization, **kwargs):
analytics.record("join_request.link_viewed", organization_id=organization.id)
| [((162, 202), 'sentry.signals.join_request_created.connect', 'join_request_created.connect', ([], {'weak': '(False)'}), '(weak=False)\n', (190, 202), False, 'from sentry.signals import join_request_created, join_request_link_viewed\n'), ((377, 421), 'sentry.signals.join_request_link_viewed.connect', 'join_request_link_viewed.connect', ([], {'weak': '(False)'}), '(weak=False)\n', (409, 421), False, 'from sentry.signals import join_request_created, join_request_link_viewed\n'), ((258, 363), 'sentry.analytics.record', 'analytics.record', (['"""join_request.created"""'], {'member_id': 'member.id', 'organization_id': 'member.organization_id'}), "('join_request.created', member_id=member.id,\n organization_id=member.organization_id)\n", (274, 363), False, 'from sentry import analytics\n'), ((487, 564), 'sentry.analytics.record', 'analytics.record', (['"""join_request.link_viewed"""'], {'organization_id': 'organization.id'}), "('join_request.link_viewed', organization_id=organization.id)\n", (503, 564), False, 'from sentry import analytics\n')] |
arturtamborski/arturtamborskipl | arturtamborskipl/urls.py | 9b93be045f58d5802d9a61568d7ecfbb12042b59 | from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
from django.conf import settings
from blog.sitemaps import ArticleSitemap
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}}, name='sitemap'),
url(r'^', include('blog.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| [((313, 344), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (316, 344), False, 'from django.conf.urls import url, include\n'), ((456, 547), 'django.conf.urls.url', 'url', (['"""^sitemap\\\\.xml$"""', 'sitemap', "{'sitemaps': {'blog': ArticleSitemap}}"], {'name': '"""sitemap"""'}), "('^sitemap\\\\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}},\n name='sitemap')\n", (459, 547), False, 'from django.conf.urls import url, include\n'), ((374, 449), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""robots.txt"""', 'content_type': '"""text/plain"""'}), "(template_name='robots.txt', content_type='text/plain')\n", (394, 449), False, 'from django.views.generic import TemplateView\n'), ((560, 580), 'django.conf.urls.include', 'include', (['"""blog.urls"""'], {}), "('blog.urls')\n", (567, 580), False, 'from django.conf.urls import url, include\n'), ((679, 706), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (686, 706), False, 'from django.conf.urls import url, include\n')] |
steinermg/ion-functions | ion_functions/qc/qc_functions.py | cea532ad9af51e86768572c8deb48547d99567c5 | #!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author Christopher Mueller
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: Christopher Wingard. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
| [((2816, 2834), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (2829, 2834), True, 'import numpy as np\n'), ((2848, 2869), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (2861, 2869), True, 'import numpy as np\n'), ((9428, 9450), 'numpy.max', 'np.max', (['datlim[:, (1)]'], {}), '(datlim[:, (1)])\n', (9434, 9450), True, 'import numpy as np\n'), ((9464, 9486), 'numpy.min', 'np.min', (['datlim[:, (0)]'], {}), '(datlim[:, (0)])\n', (9470, 9486), True, 'import numpy as np\n'), ((12138, 12156), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (12151, 12156), True, 'import numpy as np\n'), ((12775, 12809), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (12788, 12809), True, 'import numpy as np\n'), ((12825, 12852), 'ion_functions.qc.qc_extensions.spikevalues', 'spikevalues', (['dat', 'L', 'N', 'acc'], {}), '(dat, L, N, acc)\n', (12836, 12852), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((15356, 15374), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (15369, 15374), True, 'import numpy as np\n'), ((15383, 15399), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (15396, 15399), True, 'import numpy as np\n'), ((16252, 16277), 'numpy.polyfit', 'np.polyfit', (['t', 'dat', 'ord_n'], {}), '(t, dat, ord_n)\n', (16262, 16277), True, 'import numpy as np\n'), ((16290, 16307), 'numpy.polyval', 'np.polyval', (['pp', 't'], {}), '(pp, t)\n', (16300, 16307), True, 'import numpy as np\n'), ((18293, 18309), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (18306, 18309), True, 'import numpy as np\n'), ((19033, 19044), 'numpy.abs', 'np.abs', (['num'], {}), '(num)\n', (19039, 19044), True, 'import numpy as np\n'), ((19055, 19089), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (19068, 19089), True, 'import numpy as np\n'), ((23110, 23125), 'numpy.isnan', 'np.isnan', (['mindx'], {}), '(mindx)\n', (23118, 23125), True, 'import numpy as np\n'), ((23175, 23193), 'numpy.isnan', 'np.isnan', (['startdat'], {}), '(startdat)\n', (23183, 23193), True, 'import numpy as np\n'), ((23855, 23922), 'ion_functions.qc.qc_extensions.gradientvalues', 'gradientvalues', (['dat', 'x', 'grad_min', 'grad_max', 'mindx', 'startdat', 'toldat'], {}), '(dat, x, grad_min, grad_max, mindx, startdat, toldat)\n', (23869, 23922), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((27237, 27264), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27245, 27264), True, 'import numpy as np\n'), ((27274, 27301), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27282, 27301), True, 'import numpy as np\n'), ((27311, 27338), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27319, 27338), True, 'import numpy as np\n'), ((27348, 27375), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27356, 27375), True, 'import numpy as np\n'), ((27385, 27412), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27393, 27412), True, 'import numpy as np\n'), ((27422, 27449), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27430, 27449), True, 'import numpy as np\n'), ((30523, 30535), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (30530, 30535), True, 'import numpy as np\n'), ((33440, 33458), 'numpy.all', 'np.all', (['inflags', '(0)'], {}), '(inflags, 0)\n', (33446, 33458), True, 'import numpy as np\n'), ((669, 703), 'logging.getLogger', 'logging.getLogger', (['"""ion-functions"""'], {}), "('ion-functions')\n", (686, 703), False, 'import logging\n'), ((1222, 1256), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (1230, 1256), True, 'import numpy as np\n'), ((3673, 3707), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3681, 3707), True, 'import numpy as np\n'), ((3840, 3874), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3848, 3874), True, 'import numpy as np\n'), ((3952, 3986), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3960, 3986), True, 'import numpy as np\n'), ((4073, 4107), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (4081, 4107), True, 'import numpy as np\n'), ((4570, 4588), 'numpy.column_stack', 'np.column_stack', (['z'], {}), '(z)\n', (4585, 4588), True, 'import numpy as np\n'), ((8632, 8696), 'numpy.interp', 'np.interp', (['z', 'datlimz', 'datlim[:, (0)]'], {'left': 'np.nan', 'right': 'np.nan'}), '(z, datlimz, datlim[:, (0)], left=np.nan, right=np.nan)\n', (8641, 8696), True, 'import numpy as np\n'), ((8774, 8838), 'numpy.interp', 'np.interp', (['z', 'datlimz', 'datlim[:, (1)]'], {'left': 'np.nan', 'right': 'np.nan'}), '(z, datlimz, datlim[:, (1)], left=np.nan, right=np.nan)\n', (8783, 8838), True, 'import numpy as np\n'), ((9378, 9392), 'numpy.isnan', 'np.isnan', (['lim1'], {}), '(lim1)\n', (9386, 9392), True, 'import numpy as np\n'), ((9397, 9411), 'numpy.isnan', 'np.isnan', (['lim2'], {}), '(lim2)\n', (9405, 9411), True, 'import numpy as np\n'), ((9768, 9802), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (9776, 9802), True, 'import numpy as np\n'), ((13040, 13074), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (13048, 13074), True, 'import numpy as np\n'), ((16770, 16796), 'numpy.empty', 'np.empty', (['x.shape', 'np.int8'], {}), '(x.shape, np.int8)\n', (16778, 16796), True, 'import numpy as np\n'), ((19200, 19232), 'numpy.zeros', 'np.zeros', (['dat.size'], {'dtype': '"""int8"""'}), "(dat.size, dtype='int8')\n", (19208, 19232), True, 'import numpy as np\n'), ((19257, 19284), 'ion_functions.qc.qc_extensions.stuckvalues', 'stuckvalues', (['dat', 'reso', 'num'], {}), '(dat, reso, num)\n', (19268, 19284), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((19574, 19608), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (19582, 19608), True, 'import numpy as np\n'), ((23375, 23396), 'ion_functions.utils.isscalar', 'utils.isscalar', (['mindx'], {}), '(mindx)\n', (23389, 23396), False, 'from ion_functions import utils\n'), ((23476, 23500), 'ion_functions.utils.isscalar', 'utils.isscalar', (['startdat'], {}), '(startdat)\n', (23490, 23500), False, 'from ion_functions import utils\n'), ((23646, 23666), 'numpy.abs', 'np.abs', (['(x[0] - x[-1])'], {}), '(x[0] - x[-1])\n', (23652, 23666), True, 'import numpy as np\n'), ((23690, 23707), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (23698, 23707), True, 'import numpy as np\n'), ((23736, 23773), 'ooi.logging.log.warn', 'log.warn', (['"""Too few values to inspect"""'], {}), "('Too few values to inspect')\n", (23744, 23773), False, 'from ooi.logging import log\n'), ((26458, 26477), 'ion_functions.utils.isvector', 'utils.isvector', (['lon'], {}), '(lon)\n', (26472, 26477), False, 'from ion_functions import utils\n'), ((26482, 26501), 'ion_functions.utils.isvector', 'utils.isvector', (['lat'], {}), '(lat)\n', (26496, 26501), False, 'from ion_functions import utils\n'), ((26506, 26524), 'ion_functions.utils.isvector', 'utils.isvector', (['dt'], {}), '(dt)\n', (26520, 26524), False, 'from ion_functions import utils\n'), ((27526, 27544), 'time.gmtime', 'time.gmtime', (['dt[i]'], {}), '(dt[i])\n', (27537, 27544), False, 'import time\n'), ((30701, 30714), 'numpy.arcsin', 'np.arcsin', (['sz'], {}), '(sz)\n', (30710, 30714), True, 'import numpy as np\n'), ((30864, 30883), 'numpy.sin', 'np.sin', (['(deg2rad * z)'], {}), '(deg2rad * z)\n', (30870, 30883), True, 'import numpy as np\n'), ((31237, 31273), 'numpy.array', 'np.array', (['[i.shape[0] for i in args]'], {}), '([i.shape[0] for i in args])\n', (31245, 31273), True, 'import numpy as np\n'), ((31421, 31435), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (31429, 31435), True, 'import numpy as np\n'), ((734, 752), 'numpy.atleast_1d', 'np.atleast_1d', (['arr'], {}), '(arr)\n', (747, 752), True, 'import numpy as np\n'), ((4317, 4349), 'numpy.asanyarray', 'np.asanyarray', (['v'], {'dtype': 'np.float'}), '(v, dtype=np.float)\n', (4330, 4349), True, 'import numpy as np\n'), ((4366, 4381), 'ion_functions.qc.qc_extensions.ntp_to_month', 'ntp_to_month', (['v'], {}), '(v)\n', (4378, 4381), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((6275, 6294), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (6289, 6294), False, 'from ion_functions import utils\n'), ((6369, 6391), 'ion_functions.utils.ismatrix', 'utils.ismatrix', (['datlim'], {}), '(datlim)\n', (6383, 6391), False, 'from ion_functions import utils\n'), ((9877, 9895), 'numpy.atleast_1d', 'np.atleast_1d', (['acc'], {}), '(acc)\n', (9890, 9895), True, 'import numpy as np\n'), ((9901, 9917), 'numpy.atleast_1d', 'np.atleast_1d', (['N'], {}), '(N)\n', (9914, 9917), True, 'import numpy as np\n'), ((9923, 9939), 'numpy.atleast_1d', 'np.atleast_1d', (['L'], {}), '(L)\n', (9936, 9939), True, 'import numpy as np\n'), ((12393, 12412), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (12407, 12412), False, 'from ion_functions import utils\n'), ((13156, 13176), 'numpy.atleast_1d', 'np.atleast_1d', (['ord_n'], {}), '(ord_n)\n', (13169, 13176), True, 'import numpy as np\n'), ((13182, 13201), 'numpy.atleast_1d', 'np.atleast_1d', (['nstd'], {}), '(nstd)\n', (13195, 13201), True, 'import numpy as np\n'), ((16875, 16894), 'numpy.atleast_1d', 'np.atleast_1d', (['reso'], {}), '(reso)\n', (16888, 16894), True, 'import numpy as np\n'), ((16900, 16918), 'numpy.atleast_1d', 'np.atleast_1d', (['num'], {}), '(num)\n', (16913, 16918), True, 'import numpy as np\n'), ((18450, 18469), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (18464, 18469), False, 'from ion_functions import utils\n'), ((19747, 19767), 'numpy.atleast_1d', 'np.atleast_1d', (['mindx'], {}), '(mindx)\n', (19760, 19767), True, 'import numpy as np\n'), ((19773, 19796), 'numpy.atleast_1d', 'np.atleast_1d', (['startdat'], {}), '(startdat)\n', (19786, 19796), True, 'import numpy as np\n'), ((19802, 19823), 'numpy.atleast_1d', 'np.atleast_1d', (['toldat'], {}), '(toldat)\n', (19815, 19823), True, 'import numpy as np\n'), ((23006, 23040), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (23019, 23040), True, 'import numpy as np\n'), ((23059, 23091), 'numpy.asanyarray', 'np.asanyarray', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (23072, 23091), True, 'import numpy as np\n'), ((29688, 29711), 'numpy.sin', 'np.sin', (['(ma_sun + ml_sun)'], {}), '(ma_sun + ml_sun)\n', (29694, 29711), True, 'import numpy as np\n'), ((29919, 29954), 'numpy.sin', 'np.sin', (['(2.0 * ma_sun - 2.0 * ma_ven)'], {}), '(2.0 * ma_sun - 2.0 * ma_ven)\n', (29925, 29954), True, 'import numpy as np\n'), ((30003, 30015), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (30010, 30015), True, 'import numpy as np\n'), ((30328, 30341), 'numpy.sin', 'np.sin', (['(4 * l)'], {}), '(4 * l)\n', (30334, 30341), True, 'import numpy as np\n'), ((30576, 30597), 'numpy.sin', 'np.sin', (['(deg2rad * lat)'], {}), '(deg2rad * lat)\n', (30582, 30597), True, 'import numpy as np\n'), ((30598, 30611), 'numpy.sin', 'np.sin', (['decln'], {}), '(decln)\n', (30604, 30611), True, 'import numpy as np\n'), ((30662, 30683), 'numpy.cos', 'np.cos', (['(deg2rad * lha)'], {}), '(deg2rad * lha)\n', (30668, 30683), True, 'import numpy as np\n'), ((33135, 33159), 'ion_functions.utils.islogical', 'utils.islogical', (['inflags'], {}), '(inflags)\n', (33150, 33159), False, 'from ion_functions import utils\n'), ((873, 891), 'numpy.atleast_1d', 'np.atleast_1d', (['arr'], {}), '(arr)\n', (886, 891), True, 'import numpy as np\n'), ((1338, 1360), 'numpy.atleast_1d', 'np.atleast_1d', (['dat_min'], {}), '(dat_min)\n', (1351, 1360), True, 'import numpy as np\n'), ((1366, 1388), 'numpy.atleast_1d', 'np.atleast_1d', (['dat_max'], {}), '(dat_max)\n', (1379, 1388), True, 'import numpy as np\n'), ((15818, 15837), 'ion_functions.utils.isvector', 'utils.isvector', (['arg'], {}), '(arg)\n', (15832, 15837), False, 'from ion_functions import utils\n'), ((15996, 16015), 'ion_functions.utils.isscalar', 'utils.isscalar', (['arg'], {}), '(arg)\n', (16010, 16015), False, 'from ion_functions import utils\n'), ((16547, 16565), 'numpy.ones', 'np.ones', (['dat.shape'], {}), '(dat.shape)\n', (16554, 16565), True, 'import numpy as np\n'), ((18818, 18837), 'ion_functions.utils.isscalar', 'utils.isscalar', (['arg'], {}), '(arg)\n', (18832, 18837), False, 'from ion_functions import utils\n'), ((19719, 19740), 'numpy.atleast_1d', 'np.atleast_1d', (['ddatdx'], {}), '(ddatdx)\n', (19732, 19740), True, 'import numpy as np\n'), ((22673, 22692), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (22687, 22692), False, 'from ion_functions import utils\n'), ((22700, 22717), 'ion_functions.utils.isvector', 'utils.isvector', (['x'], {}), '(x)\n', (22714, 22717), False, 'from ion_functions import utils\n'), ((28357, 28379), 'numpy.fix', 'np.fix', (['(ma_sun / 360.0)'], {}), '(ma_sun / 360.0)\n', (28363, 28379), True, 'import numpy as np\n'), ((28528, 28550), 'numpy.fix', 'np.fix', (['(ml_sun / 360.0)'], {}), '(ml_sun / 360.0)\n', (28534, 28550), True, 'import numpy as np\n'), ((28694, 28716), 'numpy.fix', 'np.fix', (['(ma_jup / 360.0)'], {}), '(ma_jup / 360.0)\n', (28700, 28716), True, 'import numpy as np\n'), ((29140, 29162), 'numpy.fix', 'np.fix', (['(ma_ven / 360.0)'], {}), '(ma_ven / 360.0)\n', (29146, 29162), True, 'import numpy as np\n'), ((29624, 29656), 'numpy.cos', 'np.cos', (['(ma_sun - ml_sun - ma_jup)'], {}), '(ma_sun - ml_sun - ma_jup)\n', (29630, 29656), True, 'import numpy as np\n'), ((29884, 29907), 'numpy.sin', 'np.sin', (['(ma_sun - ma_jup)'], {}), '(ma_sun - ma_jup)\n', (29890, 29907), True, 'import numpy as np\n'), ((30138, 30155), 'numpy.fix', 'np.fix', (['(l / 360.0)'], {}), '(l / 360.0)\n', (30144, 30155), True, 'import numpy as np\n'), ((30296, 30309), 'numpy.cos', 'np.cos', (['(3 * l)'], {}), '(3 * l)\n', (30302, 30309), True, 'import numpy as np\n'), ((30614, 30635), 'numpy.cos', 'np.cos', (['(deg2rad * lat)'], {}), '(deg2rad * lat)\n', (30620, 30635), True, 'import numpy as np\n'), ((30646, 30659), 'numpy.cos', 'np.cos', (['decln'], {}), '(decln)\n', (30652, 30659), True, 'import numpy as np\n'), ((2912, 2932), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (2927, 2932), False, 'from ion_functions import utils\n'), ((3012, 3029), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (3024, 3029), False, 'from ion_functions import utils\n'), ((3106, 3129), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['datlim'], {}), '(datlim)\n', (3121, 3129), False, 'from ion_functions import utils\n'), ((3212, 3232), 'ion_functions.utils.isreal', 'utils.isreal', (['datlim'], {}), '(datlim)\n', (3224, 3232), False, 'from ion_functions import utils\n'), ((12199, 12219), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (12214, 12219), False, 'from ion_functions import utils\n'), ((12299, 12316), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (12311, 12316), False, 'from ion_functions import utils\n'), ((16384, 16395), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (16390, 16395), True, 'import numpy as np\n'), ((18352, 18372), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (18367, 18372), False, 'from ion_functions import utils\n'), ((18542, 18559), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (18554, 18559), False, 'from ion_functions import utils\n'), ((19692, 19713), 'numpy.atleast_1d', 'np.atleast_1d', (['ddatdx'], {}), '(ddatdx)\n', (19705, 19713), True, 'import numpy as np\n'), ((22908, 22918), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (22915, 22918), True, 'import numpy as np\n'), ((28076, 28100), 'numpy.fix', 'np.fix', (['(275.0 * mn / 9.0)'], {}), '(275.0 * mn / 9.0)\n', (28082, 28100), True, 'import numpy as np\n'), ((28929, 28952), 'numpy.fix', 'np.fix', (['(an_moon / 360.0)'], {}), '(an_moon / 360.0)\n', (28935, 28952), True, 'import numpy as np\n'), ((29574, 29601), 'numpy.sin', 'np.sin', (['(2 * ma_sun - ml_sun)'], {}), '(2 * ma_sun - ml_sun)\n', (29580, 29601), True, 'import numpy as np\n'), ((29845, 29859), 'numpy.cos', 'np.cos', (['ma_sun'], {}), '(ma_sun)\n', (29851, 29859), True, 'import numpy as np\n'), ((30275, 30288), 'numpy.sin', 'np.sin', (['(3 * l)'], {}), '(3 * l)\n', (30281, 30288), True, 'import numpy as np\n'), ((3616, 3637), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (3629, 3637), True, 'import numpy as np\n'), ((3783, 3804), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (3796, 3804), True, 'import numpy as np\n'), ((6641, 6661), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (6656, 6661), False, 'from ion_functions import utils\n'), ((6759, 6776), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (6771, 6776), False, 'from ion_functions import utils\n'), ((12555, 12575), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (12570, 12575), False, 'from ion_functions import utils\n'), ((12673, 12690), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (12685, 12690), False, 'from ion_functions import utils\n'), ((15532, 15552), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (15547, 15552), False, 'from ion_functions import utils\n'), ((15650, 15667), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (15662, 15667), False, 'from ion_functions import utils\n'), ((16354, 16373), 'numpy.std', 'np.std', (['(dat - datpp)'], {}), '(dat - datpp)\n', (16360, 16373), True, 'import numpy as np\n'), ((18700, 18720), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (18715, 18720), False, 'from ion_functions import utils\n'), ((18930, 18947), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (18942, 18947), False, 'from ion_functions import utils\n'), ((29526, 29549), 'numpy.sin', 'np.sin', (['(ma_sun - ml_sun)'], {}), '(ma_sun - ml_sun)\n', (29532, 29549), True, 'import numpy as np\n'), ((29795, 29813), 'numpy.cos', 'np.cos', (['(2 * ma_sun)'], {}), '(2 * ma_sun)\n', (29801, 29813), True, 'import numpy as np\n'), ((30255, 30268), 'numpy.cos', 'np.cos', (['(2 * l)'], {}), '(2 * l)\n', (30261, 30268), True, 'import numpy as np\n'), ((29468, 29492), 'numpy.sin', 'np.sin', (['(an_moon - ml_sun)'], {}), '(an_moon - ml_sun)\n', (29474, 29492), True, 'import numpy as np\n'), ((29767, 29781), 'numpy.cos', 'np.cos', (['ma_sun'], {}), '(ma_sun)\n', (29773, 29781), True, 'import numpy as np\n'), ((30224, 30237), 'numpy.sin', 'np.sin', (['(2 * l)'], {}), '(2 * l)\n', (30230, 30237), True, 'import numpy as np\n'), ((29440, 29454), 'numpy.cos', 'np.cos', (['ml_sun'], {}), '(ml_sun)\n', (29446, 29454), True, 'import numpy as np\n'), ((30184, 30193), 'numpy.sin', 'np.sin', (['l'], {}), '(l)\n', (30190, 30193), True, 'import numpy as np\n'), ((30204, 30213), 'numpy.cos', 'np.cos', (['l'], {}), '(l)\n', (30210, 30213), True, 'import numpy as np\n'), ((29390, 29417), 'numpy.sin', 'np.sin', (['(2 * ma_sun + ml_sun)'], {}), '(2 * ma_sun + ml_sun)\n', (29396, 29417), True, 'import numpy as np\n'), ((28037, 28060), 'numpy.fix', 'np.fix', (['((mn + 9) / 12.0)'], {}), '((mn + 9) / 12.0)\n', (28043, 28060), True, 'import numpy as np\n'), ((29362, 29376), 'numpy.sin', 'np.sin', (['ml_sun'], {}), '(ml_sun)\n', (29368, 29376), True, 'import numpy as np\n'), ((29305, 29328), 'numpy.sin', 'np.sin', (['(ma_sun + ml_sun)'], {}), '(ma_sun + ml_sun)\n', (29311, 29328), True, 'import numpy as np\n'), ((29229, 29243), 'numpy.sin', 'np.sin', (['ml_sun'], {}), '(ml_sun)\n', (29235, 29243), True, 'import numpy as np\n'), ((29257, 29280), 'numpy.sin', 'np.sin', (['(ma_sun - ml_sun)'], {}), '(ma_sun - ml_sun)\n', (29263, 29280), True, 'import numpy as np\n')] |
avshalomt2/datatest | datatest/__past__/api08.py | f622b0e990b53c73f56730a9009b39af7653df20 | """Backward compatibility for version 0.8 API."""
from __future__ import absolute_import
import inspect
import datatest
from datatest._compatibility import itertools
from datatest._compatibility.collections.abc import Sequence
from datatest._load.get_reader import get_reader
from datatest._load.load_csv import load_csv
from datatest._load.temptable import load_data
from datatest._load.temptable import new_table_name
from datatest._load.temptable import savepoint
from datatest._load.temptable import table_exists
from datatest._query.query import DEFAULT_CONNECTION
from datatest._query.query import BaseElement
from datatest._utils import file_types
from datatest._utils import string_types
from datatest._utils import iterpeek
from datatest.allowance import BaseAllowance
from datatest import Invalid
from datatest.difference import NOTFOUND
datatest.DataResult = datatest.Result
class DataQuery(datatest.Query):
def __call__(self, *args, **kwds):
self.execute(*args, **kwds)
datatest.DataQuery = DataQuery
class DataSource(datatest.Selector):
def __init__(self, data, fieldnames=None):
first_value, iterator = iterpeek(data)
if isinstance(first_value, dict):
if not fieldnames:
fieldnames = list(first_value.keys())
super(DataSource, self).__init__(iterator, fieldnames)
else:
if fieldnames:
iterator = itertools.chain([fieldnames], iterator)
super(DataSource, self).__init__(iterator)
@classmethod
def from_csv(cls, file, encoding=None, **fmtparams):
if isinstance(file, string_types) or isinstance(file, file_types):
data_list = [file]
else:
data_list = file
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
for obj in data_list:
load_csv(cursor, table, obj, encoding=encoding, **fmtparams)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = file
new_cls._args = (encoding,)
new_cls._kwds = fmtparams
new_cls._update_list = []
return new_cls
@classmethod
def from_excel(cls, path, worksheet=0):
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
reader = get_reader.from_excel(path, worksheet=0)
load_data(cursor, table, reader)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = path
new_cls._args = tuple()
new_cls._kwds = dict()
if worksheet != 0:
new_cls._kwds['worksheet'] = worksheet
new_cls._update_list = []
return new_cls
def columns(self, type=list): # Removed in datatest 0.8.2
return type(self.fieldnames)
datatest.DataSource = DataSource
class allowed_key(BaseAllowance):
"""The given *function* should accept a number of arguments
equal the given key elements. If key is a single value (string
or otherwise), *function* should accept one argument. If key
is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_key, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
key = item[0]
if not isinstance(key, tuple) and isinstance(key, BaseElement):
return self.function(key)
return self.function(*key)
datatest.allowed_key = allowed_key
class allowed_args(BaseAllowance):
"""The given *function* should accept a number of arguments equal
the given elements in the 'args' attribute. If args is a single
value (string or otherwise), *function* should accept one argument.
If args is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_args, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
args = item[1].args
if not isinstance(args, tuple) and isinstance(args, BaseElement):
return self.function(args)
return self.function(*args)
datatest.allowed_args = allowed_args
def get_subject(self):
if hasattr(self, '_subject_data'):
return self._subject_data
return self._find_data_source('subject')
def set_subject(self, value):
self._subject_data = value
datatest.DataTestCase.subject = property(get_subject, set_subject)
def get_reference(self):
if hasattr(self, '_reference_data'):
return self._reference_data
return self._find_data_source('reference')
def set_reference(self, value):
self._reference_data = value
datatest.DataTestCase.reference = property(get_reference, set_reference)
def _find_data_source(name):
stack = inspect.stack()
stack.pop() # Skip record of current frame.
for record in stack: # Bubble-up stack looking for name.
frame = record[0]
if name in frame.f_globals:
return frame.f_globals[name] # <- EXIT!
raise NameError('cannot find {0!r}'.format(name))
datatest.DataTestCase._find_data_source = staticmethod(_find_data_source)
def allowedKey(self, function, msg=None):
"""Allows differences in a mapping where *function* returns True.
For each difference, function will receive the associated mapping
**key** unpacked into one or more arguments.
"""
return allowed_key(function, msg)
datatest.DataTestCase.allowedKey = allowedKey
def allowedArgs(self, function, msg=None):
"""Allows differences where *function* returns True. For the
'args' attribute of each difference (a tuple), *function* must
accept the number of arguments unpacked from 'args'.
"""
return allowed_args(function, msg)
datatest.DataTestCase.allowedArgs = allowedArgs
def _require_sequence(data, sequence): # New behavior in datatest 0.8.3
"""Compare *data* against a *sequence* of values. Stops at the
first difference found and returns an AssertionError. If no
differences are found, returns None.
"""
if isinstance(data, str):
raise ValueError("uncomparable types: 'str' and sequence type")
data_type = getattr(data, 'evaluation_type', data.__class__)
if not issubclass(data_type, Sequence):
type_name = data_type.__name__
msg = "expected sequence type, but got " + repr(type_name)
raise ValueError(msg)
message_prefix = None
previous_element = NOTFOUND
zipped = itertools.zip_longest(data, sequence, fillvalue=NOTFOUND)
for index, (actual, expected) in enumerate(zipped):
if actual == expected:
previous_element = actual
continue
if actual == NOTFOUND:
message_prefix = ('Data sequence is missing '
'elements starting with index {0}').format(index)
message_suffix = 'Expected {0!r}'.format(expected)
elif expected == NOTFOUND:
message_prefix = ('Data sequence contains extra '
'elements starting with index {0}').format(index)
message_suffix = 'Found {0!r}'.format(actual)
else:
message_prefix = \
'Data sequence differs starting at index {0}'.format(index)
message_suffix = \
'Found {0!r}, expected {1!r}'.format(actual, expected)
break
else: # <- NOBREAK!
return None # <- EXIT!
leading_elements = []
if index > 1:
leading_elements.append('...')
if previous_element != NOTFOUND:
leading_elements.append(repr(previous_element))
actual_repr = repr(actual) if actual != NOTFOUND else '?????'
caret_underline = '^' * len(actual_repr)
trailing_elements = []
next_tuple = next(zipped, NOTFOUND)
if next_tuple != NOTFOUND:
trailing_elements.append(repr(next_tuple[0]))
if next(zipped, NOTFOUND) != NOTFOUND:
trailing_elements.append('...')
if leading_elements:
leading_string = ', '.join(leading_elements) + ', '
else:
leading_string = ''
leading_whitespace = ' ' * len(leading_string)
if trailing_elements:
trailing_string = ', ' + ', '.join(trailing_elements)
else:
trailing_string = ''
sequence_string = leading_string + actual_repr + trailing_string
message = '{0}:\n\n {1}\n {2}{3}\n{4}'.format(message_prefix,
sequence_string,
leading_whitespace,
caret_underline,
message_suffix)
return AssertionError(message)
datatest.validation._require_sequence = _require_sequence
def _require_callable(data, function):
if data is NOTFOUND:
return Invalid(None) # <- EXIT!
def wrapped(element):
try:
if isinstance(element, BaseElement):
returned_value = function(element)
else:
returned_value = function(*element)
except Exception:
returned_value = False # Raised errors count as False.
if returned_value == True:
return None # <- EXIT!
if returned_value == False:
return Invalid(element) # <- EXIT!
if isinstance(returned_value, BaseDifference):
return returned_value # <- EXIT!
callable_name = function.__name__
message = \
'{0!r} returned {1!r}, should return True, False or a difference instance'
raise TypeError(message.format(callable_name, returned_value))
if isinstance(data, BaseElement):
return wrapped(data) # <- EXIT!
results = (wrapped(elem) for elem in data)
diffs = (diff for diff in results if diff)
first_element, diffs = iterpeek(diffs)
if first_element: # If not empty, return diffs.
return diffs
return None
| [((5493, 5508), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (5506, 5508), False, 'import inspect\n'), ((7193, 7250), 'datatest._compatibility.itertools.zip_longest', 'itertools.zip_longest', (['data', 'sequence'], {'fillvalue': 'NOTFOUND'}), '(data, sequence, fillvalue=NOTFOUND)\n', (7214, 7250), False, 'from datatest._compatibility import itertools\n'), ((10596, 10611), 'datatest._utils.iterpeek', 'iterpeek', (['diffs'], {}), '(diffs)\n', (10604, 10611), False, 'from datatest._utils import iterpeek\n'), ((1147, 1161), 'datatest._utils.iterpeek', 'iterpeek', (['data'], {}), '(data)\n', (1155, 1161), False, 'from datatest._utils import iterpeek\n'), ((9584, 9597), 'datatest.Invalid', 'Invalid', (['None'], {}), '(None)\n', (9591, 9597), False, 'from datatest import Invalid\n'), ((1887, 1904), 'datatest._load.temptable.savepoint', 'savepoint', (['cursor'], {}), '(cursor)\n', (1896, 1904), False, 'from datatest._load.temptable import savepoint\n'), ((1926, 1948), 'datatest._load.temptable.new_table_name', 'new_table_name', (['cursor'], {}), '(cursor)\n', (1940, 1948), False, 'from datatest._load.temptable import new_table_name\n'), ((2094, 2121), 'datatest._load.temptable.table_exists', 'table_exists', (['cursor', 'table'], {}), '(cursor, table)\n', (2106, 2121), False, 'from datatest._load.temptable import table_exists\n'), ((2493, 2510), 'datatest._load.temptable.savepoint', 'savepoint', (['cursor'], {}), '(cursor)\n', (2502, 2510), False, 'from datatest._load.temptable import savepoint\n'), ((2532, 2554), 'datatest._load.temptable.new_table_name', 'new_table_name', (['cursor'], {}), '(cursor)\n', (2546, 2554), False, 'from datatest._load.temptable import new_table_name\n'), ((2576, 2616), 'datatest._load.get_reader.get_reader.from_excel', 'get_reader.from_excel', (['path'], {'worksheet': '(0)'}), '(path, worksheet=0)\n', (2597, 2616), False, 'from datatest._load.get_reader import get_reader\n'), ((2629, 2661), 'datatest._load.temptable.load_data', 'load_data', (['cursor', 'table', 'reader'], {}), '(cursor, table, reader)\n', (2638, 2661), False, 'from datatest._load.temptable import load_data\n'), ((2696, 2723), 'datatest._load.temptable.table_exists', 'table_exists', (['cursor', 'table'], {}), '(cursor, table)\n', (2708, 2723), False, 'from datatest._load.temptable import table_exists\n'), ((10042, 10058), 'datatest.Invalid', 'Invalid', (['element'], {}), '(element)\n', (10049, 10058), False, 'from datatest import Invalid\n'), ((1424, 1463), 'datatest._compatibility.itertools.chain', 'itertools.chain', (['[fieldnames]', 'iterator'], {}), '([fieldnames], iterator)\n', (1439, 1463), False, 'from datatest._compatibility import itertools\n'), ((1999, 2059), 'datatest._load.load_csv.load_csv', 'load_csv', (['cursor', 'table', 'obj'], {'encoding': 'encoding'}), '(cursor, table, obj, encoding=encoding, **fmtparams)\n', (2007, 2059), False, 'from datatest._load.load_csv import load_csv\n')] |
jonkuhn/reinteract-jk | lib/reinteract/editor.py | 319c8d930f142cf3c3b8693fbff1b84fd582387c | # Copyright 2008 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import gobject
import gtk
import pango
from application import application
from format_escaped import format_escaped
from notebook import NotebookFile
from shell_buffer import ShellBuffer
from shell_view import ShellView
from save_file import SaveFileBuilder
class Editor(gobject.GObject):
def __init__(self, notebook):
gobject.GObject.__init__(self)
self.notebook = notebook
self._unsaved_index = application.allocate_unsaved_index()
#######################################################
# Utility
#######################################################
def _clear_unsaved(self):
if self._unsaved_index != None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
def _update_filename(self, *args):
self.notify('filename')
self.notify('title')
def _update_modified(self, *args):
self.notify('modified')
self.notify('title')
def _update_state(self, *args):
self.notify('state')
def _update_file(self):
self.notify('file')
def __prompt_for_name(self, title, save_button_text, action, check_name=None):
builder = SaveFileBuilder(title, self._get_display_name(), save_button_text, check_name)
builder.dialog.set_transient_for(self.widget.get_toplevel())
if self._get_filename() != None:
builder.name_entry.set_text(os.path.basename(self._get_filename()))
while True:
response = builder.dialog.run()
if response != gtk.RESPONSE_OK:
break
raw_name = builder.name_entry.get_text()
error_message = None
try:
raw_name = application.validate_name(raw_name)
except ValueError, e:
error_message = e.message
if not error_message:
extension = "." + self._get_extension()
if not (raw_name.lower().endswith(extension)):
raw_name += extension
if not error_message:
fullname = os.path.join(self.notebook.folder, raw_name)
if os.path.exists(fullname):
error_message = "'%s' already exists" % raw_name
if error_message:
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_OK,
type=gtk.MESSAGE_ERROR)
dialog.set_markup("<big><b>Please choose a different name</b></big>")
dialog.format_secondary_text(error_message)
dialog.run()
dialog.destroy()
continue
action(fullname)
break
builder.dialog.destroy()
#######################################################
# Implemented by subclasses
#######################################################
def _get_display_name(self):
raise NotImplementedError()
def _get_modified(self):
raise NotImplementedError()
def _get_state(self):
return NotebookFile.NONE
def _get_filename(self):
return NotImplementedError()
def _get_file(self):
return NotImplementedError()
def _get_extension(self):
return NotImplementedError()
def _save(self, filename):
return NotImplementedError()
#######################################################
# Public API
#######################################################
def close(self):
if self._unsaved_index != None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
self.widget.destroy()
def confirm_discard(self, before_quit=False):
if not self.modified:
return True
if before_quit:
message_format = self.DISCARD_FORMAT_BEFORE_QUIT
continue_button_text = '_Quit without saving'
else:
message_format = self.DISCARD_FORMAT
continue_button_text = '_Discard'
if self._get_filename() == None:
save_button_text = gtk.STOCK_SAVE_AS
else:
save_button_text = gtk.STOCK_SAVE
message = format_escaped("<big><b>" + message_format + "</b></big>", self._get_display_name())
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_NONE,
type=gtk.MESSAGE_WARNING)
dialog.set_markup(message)
dialog.add_buttons(continue_button_text, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
save_button_text, 1)
dialog.set_default_response(1)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
return True
elif response == 1:
self.save()
if self.modified:
return False
else:
return True
else:
return False
def load(self, filename):
raise NotImplementedError()
def save(self, filename=None):
if filename == None:
filename = self._get_filename()
if filename == None:
def action(fullname):
self._save(fullname)
self._clear_unsaved()
self.notebook.refresh()
self.__prompt_for_name(title="Save As...", save_button_text="_Save", action=action)
else:
self._save(filename)
def rename(self):
if self._get_filename() == None:
self.save()
return
old_name = os.path.basename(self._get_filename())
title = "Rename '%s'" % old_name
def check_name(name):
return name != "" and name != old_name
def action(fullname):
old_filename = self._get_filename()
self._save(fullname)
self._clear_unsaved()
os.remove(old_filename)
self.notebook.refresh()
self.__prompt_for_name(title=title, save_button_text="_Rename", action=action, check_name=check_name)
@property
def needs_calculate(self):
return (self.state != NotebookFile.EXECUTE_SUCCESS and
self.state != NotebookFile.NONE and
self.state != NotebookFile.EXECUTING)
def calculate(self):
pass
def undo(self):
pass
def redo(self):
pass
@gobject.property
def filename(self):
return self._get_filename()
@gobject.property
def file(self):
return self._get_file()
@gobject.property
def modified(self):
return self._get_modified()
@gobject.property
def state(self):
return self._get_state()
@gobject.property
def title(self):
if self.modified:
return "*" + self._get_display_name()
else:
return self._get_display_name()
| [] |
tvogels01/arthur-redshift-etl | python/scripts/compare_events.py | 477f822d16cd3a86b3bf95cfa28915cb7470a6e4 | """
This script compares events from two ETLs to highlight differences in elapsed times or row counts.
* Pre-requisites
You need to have a list of events for each ETL. Arthur can provide this using the
"query_events" command.
For example:
```
arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events
arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events
```
* Usage
Once you have the files, you use this script:
```
compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events
```
The order of those two files is: "older ETL" => "newer ETL".
"""
import csv
import re
import sys
from collections import defaultdict, namedtuple
from math import isclose
from tabulate import tabulate
def read_file(filename):
"""
Read output from query_events command.
The file is expected to be formatted such that there's a header line, a separator, then the
data. The column set must contain "elapsed" and "rowcount" for later processing.
Also Arthur prints a summary after the table, like "(100 rows)" which will be skipped if present.
"""
column_spacing_re = re.compile(r"\s+\|\s+")
row_count_re = re.compile(r"\(\d+\s*rows\)")
print(f"Reading events from {filename}...")
with open(filename) as f:
for i, line in enumerate(f.readlines()):
if i == 1 or row_count_re.match(line):
# Found the separator line or the final row tally.
continue
yield column_spacing_re.sub("|", line).strip()
def parse_file(filename):
"""Parse the input as '|'-delimited columns."""
lines = read_file(filename)
reader = csv.reader(lines, delimiter="|")
row_class = namedtuple("CsvRow", next(reader), rename=True)
for row in reader:
yield row_class._make(row)
def extract_values(filename):
"""Find elapsed time and rowcount for each target relation."""
# The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later.
elapsed = defaultdict(lambda: None)
rowcount = defaultdict(lambda: None)
for row in parse_file(filename):
elapsed[row.step, row.target] = float(row.elapsed) if row.elapsed != "---" else None
rowcount[row.step, row.target] = int(row.rowcount) if row.rowcount != "---" else None
return elapsed, rowcount
def delta(a, b):
"""
Return change in percent (or None if undefined).
The delta in percent is rounded to one decimal.
"""
if a is None or b is None:
return None
if a == 0.0 and b == 0.0:
return 0.0
assert a != 0.0 and b != 0.0
return round((b - a) * 1000.0 / a) / 10.0
def show_delta(previous_value, current_value, column):
"""
Return whether the change from previous event to current event is "significant".
If the values appear to be equal or almost equal, there's no need to report a delta.
Also, if the values are really small and any change is inflated, skip reporting the delta.
Note that for row count, a decrease in rows is always shown.
"""
if previous_value is None or current_value is None:
return False
if previous_value == current_value:
return False
if column == "elapsed":
# Decrease trigger-happiness for quick loads:
if previous_value < 10.0 and current_value < 10.0:
return False
if previous_value < 30.0 or current_value < 30.0:
return not isclose(previous_value, current_value, abs_tol=20.0)
if previous_value < 60.0 or current_value < 60.0:
return not isclose(previous_value, current_value, rel_tol=0.5)
if previous_value < 300.0 or current_value < 300.0:
return not isclose(previous_value, current_value, rel_tol=0.2)
if column == "rowcount":
# We expect to move forward with growing tables so smaller row counts are suspect.
if previous_value > current_value:
return True
# Increase trigger-happiness for small (dimensional) tables:
if previous_value < 1000 or current_value < 1000:
return not isclose(previous_value, current_value, abs_tol=10)
return not isclose(previous_value, current_value, rel_tol=0.1)
def print_comparison_table(previous_values, current_values, column):
"""Print differences between runs, sorted by relation."""
all_events = frozenset(previous_values).union(current_values)
has_large_diff = frozenset(
event
for event in all_events
if show_delta(previous_values[event], current_values[event], column)
)
table = sorted(
(
(
event[1], # target
event[0], # step
previous_values[event],
current_values[event],
delta(previous_values[event], current_values[event]),
)
for event in has_large_diff
),
key=lambda row: row[:2], # Avoid comparison with None values in the columns
)
print("Differences for '{}':\n".format(column))
print(
tabulate(
table,
headers=("target", "step", "prev. " + column, "cur. " + column, "delta %"),
tablefmt="presto",
)
)
def main():
if len(sys.argv) >= 2 and sys.argv[1] in ("-h", "--help"):
print(__doc__)
sys.exit(0)
if len(sys.argv) != 3:
print(
"Usage: {prog} previous_events current_events".format(prog=sys.argv[0]),
file=sys.stderr,
)
sys.exit(1)
previous_events_file, current_events_file = sys.argv[1:3]
previous_elapsed, previous_rowcount = extract_values(previous_events_file)
current_elapsed, current_rowcount = extract_values(current_events_file)
print_comparison_table(previous_elapsed, current_elapsed, "elapsed")
print()
print_comparison_table(previous_rowcount, current_rowcount, "rowcount")
if __name__ == "__main__":
main()
| [((1145, 1170), 're.compile', 're.compile', (['"""\\\\s+\\\\|\\\\s+"""'], {}), "('\\\\s+\\\\|\\\\s+')\n", (1155, 1170), False, 'import re\n'), ((1188, 1220), 're.compile', 're.compile', (['"""\\\\(\\\\d+\\\\s*rows\\\\)"""'], {}), "('\\\\(\\\\d+\\\\s*rows\\\\)')\n", (1198, 1220), False, 'import re\n'), ((1673, 1705), 'csv.reader', 'csv.reader', (['lines'], {'delimiter': '"""|"""'}), "(lines, delimiter='|')\n", (1683, 1705), False, 'import csv\n'), ((2023, 2049), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (2034, 2049), False, 'from collections import defaultdict, namedtuple\n'), ((2064, 2090), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (2075, 2090), False, 'from collections import defaultdict, namedtuple\n'), ((4183, 4234), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'rel_tol': '(0.1)'}), '(previous_value, current_value, rel_tol=0.1)\n', (4190, 4234), False, 'from math import isclose\n'), ((5085, 5199), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "('target', 'step', 'prev. ' + column, 'cur. ' + column, 'delta %')", 'tablefmt': '"""presto"""'}), "(table, headers=('target', 'step', 'prev. ' + column, 'cur. ' +\n column, 'delta %'), tablefmt='presto')\n", (5093, 5199), False, 'from tabulate import tabulate\n'), ((5357, 5368), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5365, 5368), False, 'import sys\n'), ((5543, 5554), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5551, 5554), False, 'import sys\n'), ((3457, 3509), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'abs_tol': '(20.0)'}), '(previous_value, current_value, abs_tol=20.0)\n', (3464, 3509), False, 'from math import isclose\n'), ((3591, 3642), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'rel_tol': '(0.5)'}), '(previous_value, current_value, rel_tol=0.5)\n', (3598, 3642), False, 'from math import isclose\n'), ((3726, 3777), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'rel_tol': '(0.2)'}), '(previous_value, current_value, rel_tol=0.2)\n', (3733, 3777), False, 'from math import isclose\n'), ((4116, 4166), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'abs_tol': '(10)'}), '(previous_value, current_value, abs_tol=10)\n', (4123, 4166), False, 'from math import isclose\n')] |
cmu-sei/augur-code | harness/drifter.py | d8c1e29ce3276037b26b65ea316d251752529449 | # Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact [email protected] for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 Jonathan E. Taylor, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
import shutil
from drift import drift_generator
from utils import arguments
from utils.config import Config
from utils import logging
from datasets import dataset
LOG_FILE_NAME = "drifter.log"
DEFAULT_CONFIG_FILENAME = "./drifter_config.json"
DRIFT_EXP_CONFIG_FOLDER = "../experiments/drifter"
def load_dataset(dataset_filename, dataset_class_name):
"""Load dataset to drift."""
dataset_class = dataset.load_dataset_class(dataset_class_name)
base_dataset = dataset_class()
base_dataset.load_from_file(dataset_filename)
return base_dataset
def main():
logging.setup_logging(LOG_FILE_NAME)
# Allow selecting configs for experiments, and load it.
args = arguments.get_parsed_arguments()
config_file = Config.get_config_file(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)
config = Config()
config.load(config_file)
# Load scenario data.
drift_module, params = drift_generator.load_drift_config(config.get("drift_scenario"))
if args.test:
drift_generator.test_drift(config, drift_module, params, config.get("bins"))
else:
# Sort dataset into bins.
base_dataset = load_dataset(config.get("dataset"), config.get("dataset_class"))
bin_value = config.get("bin_value") if config.contains("bin_value") else "results"
bin_shuffle = config.get("bin_shuffle") if config.contains("bin_shuffle") else True
bins = drift_generator.load_bins(base_dataset, config.get("bins"), bin_value, bin_shuffle)
# Apply drift.
drifted_dataset = drift_generator.apply_drift(bins, drift_module, params)
drift_generator.add_timestamps(drifted_dataset, config.get("timestamps"))
# Save it to regular file, and timestamped file.
drifted_dataset.save_to_file(config.get("output"))
print("Copying output file to timestamped backup.")
shutil.copyfile(config.get("output"), drift_generator.get_drift_stamped_name(config.get("output")))
if __name__ == '__main__':
main()
| [((2550, 2596), 'datasets.dataset.load_dataset_class', 'dataset.load_dataset_class', (['dataset_class_name'], {}), '(dataset_class_name)\n', (2576, 2596), False, 'from datasets import dataset\n'), ((2724, 2760), 'utils.logging.setup_logging', 'logging.setup_logging', (['LOG_FILE_NAME'], {}), '(LOG_FILE_NAME)\n', (2745, 2760), False, 'from utils import logging\n'), ((2833, 2865), 'utils.arguments.get_parsed_arguments', 'arguments.get_parsed_arguments', ([], {}), '()\n', (2863, 2865), False, 'from utils import arguments\n'), ((2884, 2962), 'utils.config.Config.get_config_file', 'Config.get_config_file', (['args', 'DRIFT_EXP_CONFIG_FOLDER', 'DEFAULT_CONFIG_FILENAME'], {}), '(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)\n', (2906, 2962), False, 'from utils.config import Config\n'), ((2976, 2984), 'utils.config.Config', 'Config', ([], {}), '()\n', (2982, 2984), False, 'from utils.config import Config\n'), ((3700, 3755), 'drift.drift_generator.apply_drift', 'drift_generator.apply_drift', (['bins', 'drift_module', 'params'], {}), '(bins, drift_module, params)\n', (3727, 3755), False, 'from drift import drift_generator\n')] |
DSM-DMS/Project-DMS-Web | server/server-flask/app/docs/admin/survey/survey.py | 73a5d8fc2310bca90169414abf50f541ca0724c7 | SURVEY_POST = {
'tags': ['설문조사 관리'],
'description': '설문조사 등록',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': '설문조사 제목',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'start_date',
'description': '시작 날짜(YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'end_date',
'description': '종료 날짜(YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'target',
'description': '대상 학년',
'in': 'formData',
'type': 'list',
'required': True
}
],
'responses': {
'201': {
'description': '설문조사 등록 성공'
},
'403': {
'description': '권한 없음'
}
}
}
QUESTION_POST = {
'tags': ['설문조사 관리'],
'description': '설문조사에 질문 등록',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'id',
'description': '질문을 추가할 설문조사 ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': '질문 제목',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'is_objective',
'description': '객관식 여부',
'in': 'formData',
'type': 'bool',
'required': True
},
{
'name': 'choice_paper',
'description': '객관식 선택지',
'in': 'formData',
'type': 'list',
'required': False
}
],
'responses': {
'201': {
'description': '질문 추가 성공'
},
'403': {
'description': '권한 없음'
}
}
}
| [] |
xuyu0010/ARID_v1 | network/baselines_archive/resnet_3d101.py | b03d0975f41547e8aa78929b8e26a62248f8e18f | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import logging
import os
try:
from . import initializer
from .utils import load_state
except:
import initializer
from utils import load_state
__all__ = ['ResNeXt', 'resnet50', 'resnet101']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
block_inplanes=[64, 128, 256, 512],
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
num_classes=400,
pretrained=True):
super().__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = nn.Conv3d(n_input_channels,
self.in_planes,
kernel_size=(conv1_t_size, 7, 7),
stride=(conv1_t_stride, 2, 2),
padding=(conv1_t_size // 2, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
shortcut_type)
self.layer2 = self._make_layer(block,
block_inplanes[1],
layers[1],
shortcut_type,
stride=2)
self.layer3 = self._make_layer(block,
block_inplanes[2],
layers[2],
shortcut_type,
stride=2)
self.layer4 = self._make_layer(block,
block_inplanes[3],
layers[3],
shortcut_type,
stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Initialization
initializer.xavier(net=self)
if pretrained:
pretrained_model=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained/resnet-101-kinetics.pth')
logging.info("Network:: graph initialized, loading pretrained model: `{}'".format(pretrained_model))
assert os.path.exists(pretrained_model), "cannot locate: `{}'".format(pretrained_model)
pretrained = torch.load(pretrained_model)
load_state(self, pretrained['state_dict'])
else:
logging.info("Network:: graph initialized, use random inilization!")
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def RESNET101(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
if __name__ == "__main__":
import torch
logging.getLogger().setLevel(logging.DEBUG)
# ---------
net1 = RESNET101(num_classes=11, pretrained=True)
data = torch.randn(1,3,16,224,224)
output1 = net1(data)
print (output1.shape)
| [((451, 540), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (460, 540), True, 'import torch.nn as nn\n'), ((647, 721), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n', (656, 721), True, 'import torch.nn as nn\n'), ((8449, 8480), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(16)', '(224)', '(224)'], {}), '(1, 3, 16, 224, 224)\n', (8460, 8480), False, 'import torch\n'), ((1031, 1053), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1045, 1053), True, 'import torch.nn as nn\n'), ((1074, 1095), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1081, 1095), True, 'import torch.nn as nn\n'), ((1162, 1184), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1176, 1184), True, 'import torch.nn as nn\n'), ((1818, 1840), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1832, 1840), True, 'import torch.nn as nn\n'), ((1915, 1937), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1929, 1937), True, 'import torch.nn as nn\n'), ((2021, 2060), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (2035, 2060), True, 'import torch.nn as nn\n'), ((2081, 2102), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2088, 2102), True, 'import torch.nn as nn\n'), ((3248, 3412), 'torch.nn.Conv3d', 'nn.Conv3d', (['n_input_channels', 'self.in_planes'], {'kernel_size': '(conv1_t_size, 7, 7)', 'stride': '(conv1_t_stride, 2, 2)', 'padding': '(conv1_t_size // 2, 3, 3)', 'bias': '(False)'}), '(n_input_channels, self.in_planes, kernel_size=(conv1_t_size, 7, 7\n ), stride=(conv1_t_stride, 2, 2), padding=(conv1_t_size // 2, 3, 3),\n bias=False)\n', (3257, 3412), True, 'import torch.nn as nn\n'), ((3578, 3608), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['self.in_planes'], {}), '(self.in_planes)\n', (3592, 3608), True, 'import torch.nn as nn\n'), ((3629, 3650), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3636, 3650), True, 'import torch.nn as nn\n'), ((3674, 3722), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (3686, 3722), True, 'import torch.nn as nn\n'), ((4648, 4679), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (4668, 4679), True, 'import torch.nn as nn\n'), ((4698, 4757), 'torch.nn.Linear', 'nn.Linear', (['(block_inplanes[3] * block.expansion)', 'num_classes'], {}), '(block_inplanes[3] * block.expansion, num_classes)\n', (4707, 4757), True, 'import torch.nn as nn\n'), ((5176, 5204), 'initializer.xavier', 'initializer.xavier', ([], {'net': 'self'}), '(net=self)\n', (5194, 5204), False, 'import initializer\n'), ((5844, 5889), 'torch.nn.functional.avg_pool3d', 'F.avg_pool3d', (['x'], {'kernel_size': '(1)', 'stride': 'stride'}), '(x, kernel_size=1, stride=stride)\n', (5856, 5889), True, 'import torch.nn.functional as F\n'), ((6141, 6180), 'torch.cat', 'torch.cat', (['[out.data, zero_pads]'], {'dim': '(1)'}), '([out.data, zero_pads], dim=1)\n', (6150, 6180), False, 'import torch\n'), ((7158, 7180), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (7171, 7180), True, 'import torch.nn as nn\n'), ((5486, 5518), 'os.path.exists', 'os.path.exists', (['pretrained_model'], {}), '(pretrained_model)\n', (5500, 5518), False, 'import os\n'), ((5592, 5620), 'torch.load', 'torch.load', (['pretrained_model'], {}), '(pretrained_model)\n', (5602, 5620), False, 'import torch\n'), ((5633, 5675), 'utils.load_state', 'load_state', (['self', "pretrained['state_dict']"], {}), "(self, pretrained['state_dict'])\n", (5643, 5675), False, 'from utils import load_state\n'), ((5702, 5770), 'logging.info', 'logging.info', (['"""Network:: graph initialized, use random inilization!"""'], {}), "('Network:: graph initialized, use random inilization!')\n", (5714, 5770), False, 'import logging\n'), ((8324, 8343), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8341, 8343), False, 'import logging\n'), ((4849, 4919), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (4872, 4919), True, 'import torch.nn as nn\n'), ((6439, 6528), 'functools.partial', 'partial', (['self._downsample_basic_block'], {'planes': '(planes * block.expansion)', 'stride': 'stride'}), '(self._downsample_basic_block, planes=planes * block.expansion,\n stride=stride)\n', (6446, 6528), False, 'from functools import partial\n'), ((5064, 5094), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (5081, 5094), True, 'import torch.nn as nn\n'), ((5111, 5139), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5128, 5139), True, 'import torch.nn as nn\n'), ((5287, 5313), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5303, 5313), False, 'import os\n'), ((6762, 6802), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (6776, 6802), True, 'import torch.nn as nn\n')] |
gecBurton/inference_logic | tests/ninety_nine_problems/test_miscellaneous_problems.py | 2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6 | import pytest
from inference_logic import Rule, Variable, search
from inference_logic.data_structures import Assert, Assign
@pytest.mark.xfail
def test_90():
r"""
P90 (**) Eight queens problem
This is a classical problem in computer science. The objective is to
place eight queens on a chessboard so that no two queens are attacking
each other; i.e., no two queens are in the same row, the same column,
or on the same diagonal. We generalize this original problem by
allowing for an arbitrary dimension N of the chessboard.
We represent the positions of the queens as a list of numbers 1..N.
Example: [4,2,7,3,6,8,5,1] means that the queen in the first column
is in row 4, the queen in the second column is in row 2, etc.
By using the permutations of the numbers 1..N we guarantee that
no two queens are in the same row. The only test that remains
to be made is the diagonal test. A queen placed at column X and
row Y occupies two diagonals: one of them, with number C = X-Y, goes
from bottom-left to top-right, the other one, numbered D = X+Y, goes
from top-left to bottom-right. In the test predicate we keep track
of the already occupied diagonals in Cs and Ds.
% The first version is a simple generate-and-test solution.
% queens_1(N,Qs) :- Qs is a solution of the N-queens problem
queens_1(N,Qs) :- range(1,N,Rs), permu(Rs,Qs), test(Qs).
% range(A,B,L) :- L is the list of numbers A..B
range(A,A,[A]).
range(A,B,[A|L]) :- A < B, A1 is A+1, range(A1,B,L).
% permu(Xs,Zs) :- the list Zs is a permutation of the list Xs
permu([],[]).
permu(Qs,[Y|Ys]) :- del(Y,Qs,Rs), permu(Rs,Ys).
del(X,[X|Xs],Xs).
del(X,[Y|Ys],[Y|Zs]) :- del(X,Ys,Zs).
% test(Qs) :- the list Qs represents a non-attacking queens solution
test(Qs) :- test(Qs,1,[],[]).
% test(Qs,X,Cs,Ds) :- the queens in Qs, representing columns X to N,
% are not in conflict with the diagonals Cs and Ds
test([],_,_,_).
test([Y|Ys],X,Cs,Ds) :-
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X + 1,
test(Ys,X1,[C|Cs],[D|Ds]).
%--------------------------------------------------------------
% Now, in version 2, the tester is pushed completely inside the
% generator permu.
queens_2(N,Qs) :- range(1,N,Rs), permu_test(Rs,Qs,1,[],[]).
permu_test([],[],_,_,_).
permu_test(Qs,[Y|Ys],X,Cs,Ds) :-
del(Y,Qs,Rs),
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X+1,
permu_test(Rs,Ys,X1,[C|Cs],[D|Ds]).
"""
N, Qs, N, Rs, Qs, A, B, L, A1, Y, Ys, X, Xs, Zs = Variable.factory(
"N", "Qs", "N", "Rs", "Qs", "A", "B", "L", "A1", "Y", "Ys", "X", "Xs", "Zs"
)
_W1, _W2, _W3 = Variable.factory("_W1", "_W2", "_W3")
Cs, Ds, D, X1, C, Cs = Variable.factory("Cs", "Ds", "D", "X1", "C", "Cs")
db = [
Rule(
dict(queens_1=N, a=Qs),
dict(range=1, a=N, b=Rs),
dict(permu=Rs, a=Qs),
dict(test=Qs),
),
dict(range=A, a=A, b=[A]),
Rule(
dict(range=A, a=B, b=[A, *L]),
Assert(lambda A, B: A < B),
Assign(A1, lambda A: A + 1),
dict(range=A1, a=B, b=L),
),
dict(permu=[], a=[]),
Rule(
dict(permu=Qs, a=[Y, *Ys]), dict(delete=Y, a=Qs, b=Rs), dict(permu=Rs, a=Ys)
),
dict(delete=X, a=[X, *Xs], b=Xs),
Rule(dict(delete=X, a=[Y, *Ys], b=[Y, *Zs]), dict(delete=X, a=Ys, b=Zs)),
Rule(dict(test=Qs), dict(test=Qs, a=1, b=[], c=[])),
dict(test=[], a=_W1, b=_W2, c=_W3),
Rule(
dict(test=[Y, *Ys], a=X, b=Cs, c=Ds),
Assign(C, lambda X, Y: X - Y),
Assert(lambda C, Cs: C not in Cs),
Assign(D, lambda X, Y: X + Y),
Assert(lambda D, Ds: D not in Ds),
Assign(X1, lambda X: X + 1),
dict(test=Ys, a=X1, b=[C, *Cs], c=[D, *Ds]),
),
]
Q = Variable("Q")
query = dict(queens_1=8, a=Q)
assert list(search(db, query)) == []
| [((2700, 2797), 'inference_logic.Variable.factory', 'Variable.factory', (['"""N"""', '"""Qs"""', '"""N"""', '"""Rs"""', '"""Qs"""', '"""A"""', '"""B"""', '"""L"""', '"""A1"""', '"""Y"""', '"""Ys"""', '"""X"""', '"""Xs"""', '"""Zs"""'], {}), "('N', 'Qs', 'N', 'Rs', 'Qs', 'A', 'B', 'L', 'A1', 'Y', 'Ys',\n 'X', 'Xs', 'Zs')\n", (2716, 2797), False, 'from inference_logic import Rule, Variable, search\n'), ((2828, 2865), 'inference_logic.Variable.factory', 'Variable.factory', (['"""_W1"""', '"""_W2"""', '"""_W3"""'], {}), "('_W1', '_W2', '_W3')\n", (2844, 2865), False, 'from inference_logic import Rule, Variable, search\n'), ((2893, 2943), 'inference_logic.Variable.factory', 'Variable.factory', (['"""Cs"""', '"""Ds"""', '"""D"""', '"""X1"""', '"""C"""', '"""Cs"""'], {}), "('Cs', 'Ds', 'D', 'X1', 'C', 'Cs')\n", (2909, 2943), False, 'from inference_logic import Rule, Variable, search\n'), ((4078, 4091), 'inference_logic.Variable', 'Variable', (['"""Q"""'], {}), "('Q')\n", (4086, 4091), False, 'from inference_logic import Rule, Variable, search\n'), ((3220, 3246), 'inference_logic.data_structures.Assert', 'Assert', (['(lambda A, B: A < B)'], {}), '(lambda A, B: A < B)\n', (3226, 3246), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3260, 3287), 'inference_logic.data_structures.Assign', 'Assign', (['A1', '(lambda A: A + 1)'], {}), '(A1, lambda A: A + 1)\n', (3266, 3287), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3787, 3816), 'inference_logic.data_structures.Assign', 'Assign', (['C', '(lambda X, Y: X - Y)'], {}), '(C, lambda X, Y: X - Y)\n', (3793, 3816), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3830, 3863), 'inference_logic.data_structures.Assert', 'Assert', (['(lambda C, Cs: C not in Cs)'], {}), '(lambda C, Cs: C not in Cs)\n', (3836, 3863), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3877, 3906), 'inference_logic.data_structures.Assign', 'Assign', (['D', '(lambda X, Y: X + Y)'], {}), '(D, lambda X, Y: X + Y)\n', (3883, 3906), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3920, 3953), 'inference_logic.data_structures.Assert', 'Assert', (['(lambda D, Ds: D not in Ds)'], {}), '(lambda D, Ds: D not in Ds)\n', (3926, 3953), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3967, 3994), 'inference_logic.data_structures.Assign', 'Assign', (['X1', '(lambda X: X + 1)'], {}), '(X1, lambda X: X + 1)\n', (3973, 3994), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((4142, 4159), 'inference_logic.search', 'search', (['db', 'query'], {}), '(db, query)\n', (4148, 4159), False, 'from inference_logic import Rule, Variable, search\n')] |
ipa320/airbus_coop | airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py | 974564807ba5d24096e237a9991311608a390da1 | #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.res import R
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from rqt_robot_monitor.status_item import StatusItem
import rqt_robot_monitor.util_robot_monitor as util
## @class DiagnosticsStatus
## @brief Class for difine different control status.
#OK = 0
#WARN = 1
#ERROR = 2
#STALE = 3
class DiagnosticsWidget(QPushButton):
DIAGNOSTICS_TOPLEVEL_TOPIC_NAME = rospy.get_param('diagnostics_toplevel_topic_name','/diagnostics_toplevel_state')
state = "status_stale"
msg = "No diagnostic messages received"
def __init__(self, context):
"""! The constructor."""
QPushButton.__init__(self)
self._context = context
# Diagnostics top level: update the color of the button depending on the current diagnostics toplevel message
self.connect(self, SIGNAL("stateChanged"), self.update_state)
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
self._diagnostics_toplevel_state_sub = rospy.Subscriber(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME , DiagnosticStatus, self.toplevel_state_callback)
# Diagnostics: when button pressed open a new window with a detailed list of components and diagnostic messages
self.connect(self,SIGNAL('clicked(bool)'),self._trigger_button)
def update_state(self, state, msg):
self.setIcon(R.getIconById(state))
self.setIconSize(QSize(40,40))
self.setToolTip(msg)
def toplevel_state_callback(self, msg):
self.state = msg.level
if msg.level == 0:
self.state= "status_ok"
self.msg = "OK"
if msg.level == 1 :
self.state= "status_warning"
self.msg = "WARNING"
if msg.level == 2 :
self.state= "status_error"
self.msg = "ERROR"
if msg.level == 3 :
self.state= "status_stale"
self.msg = "STALE"
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
def _trigger_button(self, checked):
popup = DiagnosticsPopup(self, self._context)
popup.show_()
class DiagnosticsPopup(QAgiPopup):
def __init__(self, parent, context):
"""! The constructor."""
QAgiPopup.__init__(self, parent)
self._context = context
self._parent = parent
self.setRelativePosition(QAgiPopup.TopRight, QAgiPopup.BottomRight)
loadUi(R.layouts.diagnostics_popup, self)
self._inspectors = {}
self._current_msg = None
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self.adjustSize()
# Diagnostics subscriber
DIAGNOSTICS_TOPIC_NAME = rospy.get_param('diagnostics_topic_name','/diagnostics_agg')
self.connect(self,SIGNAL("UpdateDiagnostics"), self.update_diag)
self._diagnostics_agg_sub = rospy.Subscriber(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)
def update_diag(self):
#update the tree
self._tree.prune()
self.tree_all_devices.resizeColumnToContents(0)
self.adjustSize()
def message_cb(self,msg):
""" DiagnosticArray message callback """
for status in msg.status:
path = status.name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(status.name))
self.emit(SIGNAL('UpdateDiagnostics'))
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
app = QApplication(sys.argv)
main = QMainWindow()
main.setCentralWidget(TranslatorUi(Context(main)))
main.show()
app.exec_()
#End of file
| [((1353, 1438), 'rospy.get_param', 'rospy.get_param', (['"""diagnostics_toplevel_topic_name"""', '"""/diagnostics_toplevel_state"""'], {}), "('diagnostics_toplevel_topic_name',\n '/diagnostics_toplevel_state')\n", (1368, 1438), False, 'import rospy\n'), ((1939, 2045), 'rospy.Subscriber', 'rospy.Subscriber', (['self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME', 'DiagnosticStatus', 'self.toplevel_state_callback'], {}), '(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME, DiagnosticStatus,\n self.toplevel_state_callback)\n', (1955, 2045), False, 'import rospy\n'), ((3137, 3169), 'airbus_pyqt_extend.QtAgiGui.QAgiPopup.__init__', 'QAgiPopup.__init__', (['self', 'parent'], {}), '(self, parent)\n', (3155, 3169), False, 'from airbus_pyqt_extend.QtAgiGui import QAgiPopup\n'), ((3317, 3358), 'python_qt_binding.loadUi', 'loadUi', (['R.layouts.diagnostics_popup', 'self'], {}), '(R.layouts.diagnostics_popup, self)\n', (3323, 3358), False, 'from python_qt_binding import loadUi\n'), ((3771, 3832), 'rospy.get_param', 'rospy.get_param', (['"""diagnostics_topic_name"""', '"""/diagnostics_agg"""'], {}), "('diagnostics_topic_name', '/diagnostics_agg')\n", (3786, 3832), False, 'import rospy\n'), ((3941, 4015), 'rospy.Subscriber', 'rospy.Subscriber', (['DIAGNOSTICS_TOPIC_NAME', 'DiagnosticArray', 'self.message_cb'], {}), '(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)\n', (3957, 4015), False, 'import rospy\n'), ((2298, 2318), 'airbus_cobot_gui.res.R.getIconById', 'R.getIconById', (['state'], {}), '(state)\n', (2311, 2318), False, 'from airbus_cobot_gui.res import R\n'), ((4790, 4803), 'airbus_cobot_gui.context.Context', 'Context', (['main'], {}), '(main)\n', (4797, 4803), False, 'from airbus_cobot_gui.context import Context\n'), ((4532, 4567), 'rqt_robot_monitor.util_robot_monitor.get_resource_name', 'util.get_resource_name', (['status.name'], {}), '(status.name)\n', (4554, 4567), True, 'import rqt_robot_monitor.util_robot_monitor as util\n')] |
KEZKA/YL-WEB-PROJECT | sanansaattaja/website/forms/comment_form.py | dcefb490bdd6a1ae8449b3cbd5d6b36219506e8f | from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
class CommentForm(FlaskForm):
text = TextAreaField("Text", validators=[DataRequired()])
submit = SubmitField('Publish')
| [((230, 252), 'wtforms.SubmitField', 'SubmitField', (['"""Publish"""'], {}), "('Publish')\n", (241, 252), False, 'from wtforms import SubmitField, TextAreaField\n'), ((200, 214), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (212, 214), False, 'from wtforms.validators import DataRequired\n')] |
dongzizhu/GraphGallery | graphgallery/functional/dense/onehot.py | c65eab42daeb52de5019609fe7b368e30863b4ae | import numpy as np
from ..transform import DenseTransform
from ..decorators import multiple
from ..transform import Transform
__all__ = ['onehot', 'Onehot']
@Transform.register()
class Onehot(DenseTransform):
def __init__(self, depth=None):
super().__init__()
self.collect(locals())
def __call__(self, *x):
return onehot(*x, depth=self.depth)
@multiple()
def onehot(label, depth=None):
"""Get the one-hot like label of nodes."""
label = np.asarray(label, dtype=np.int32)
depth = depth or label.max() + 1
if label.ndim == 1:
return np.eye(depth, dtype=label.dtype)[label]
else:
raise ValueError(f"label must be a 1D array, but got {label.ndim}D array.")
| [((508, 541), 'numpy.asarray', 'np.asarray', (['label'], {'dtype': 'np.int32'}), '(label, dtype=np.int32)\n', (518, 541), True, 'import numpy as np\n'), ((621, 653), 'numpy.eye', 'np.eye', (['depth'], {'dtype': 'label.dtype'}), '(depth, dtype=label.dtype)\n', (627, 653), True, 'import numpy as np\n')] |
Bileonaire/api-ridemyway | models.py | af5a669c811356998e1935ace555ba955de1e8d0 | """Handles data storage for Users, rides and requests
"""
# pylint: disable=E1101
import datetime
from flask import make_response, jsonify, current_app
from werkzeug.security import generate_password_hash
import psycopg2
import config
from databasesetup import db
class User():
"""Contains user columns and methods to add, update and delete a user"""
def __init__(self, username, email, password, admin):
self.username = username
self.email = email
self.password = generate_password_hash(password, method='sha256')
if admin == True:
self.admin = '1'
else:
self.admin = '0'
new_user = "INSERT INTO users (username, email, password, admin) VALUES " \
"('" + self.username + "', '" + self.email + "', '" + self.password + "', '" + self.admin + "')"
db_cursor = db.con()
db_cursor.execute(new_user)
db.commit()
@staticmethod
def update_user(user_id, username, email, password, admin):
"""Updates user information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE users SET username=%s, email=%s, password=%s, admin=%s WHERE user_id=%s",
(username, email, password, admin, user_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def delete_user(user_id):
"""Deletes a user"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM users WHERE user_id=%s", (user_id,))
db.commit()
return make_response(jsonify({"message" : "user has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_user(user_id):
"""Gets a particular user"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users WHERE user_id=%s", (user_id,))
user = db_cursor.fetchall()
if user != []:
user=user[0]
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
return make_response(jsonify({"profile" : info}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_all_users():
"""Gets all users"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users")
users = db_cursor.fetchall()
all_users = []
for user in users:
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
all_users.append(info)
return make_response(jsonify({"All users" : all_users}), 200)
class Ride():
"""Contains ride columns and methods to add, update and delete a ride"""
def __init__(self, ride, driver_id, departuretime, numberplate, maximum, status):
self.ride = ride
self.driver_id = driver_id
self.departuretime = departuretime
self.numberplate = numberplate
self.maximum = maximum
self.status = status
new_ride = "INSERT INTO rides (ride, driver_id, departuretime, numberplate, maximum, status) VALUES " \
"('" + self.ride + "', '" + self.driver_id + "', '" + self.departuretime + "', '" + self.numberplate + "','" + self.maximum + "','" + self.status + "' )"
db_cursor = db.con()
db_cursor.execute(new_ride)
db.commit()
@classmethod
def create_ride(cls, ride, driver_id, departuretime, numberplate, maximum, status="pending"):
"""Creates a new ride"""
cls(ride, driver_id, departuretime, numberplate, maximum, status)
return make_response(jsonify({"message" : "ride has been successfully created"}), 201)
@staticmethod
def update_ride(ride_id, ride, driver_id, departuretime, numberplate,
maximum):
"""Updates ride information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE rides SET ride=%s, driver_id=%s, departuretime=%s, numberplate=%s, maximum=%s WHERE ride_id=%s",
(ride, driver_id, departuretime, numberplate, maximum, ride_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def start_ride(ride_id, driver_id):
"""starts a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride = ride[0]
if int(ride[2]) == driver_id:
db_cursor.execute("UPDATE rides SET status=%s WHERE ride_id=%s", ("given", ride_id,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("taken", ride_id, True,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("rejected", ride_id, False,))
db.commit()
return {"message" : "ride has started"}
return {"message" : "The ride you want to start is not your ride."}
return {"message" : "ride does not exist"}
@staticmethod
def delete_ride(ride_id):
"""Deletes a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
for ride in rides:
if ride[0] == ride_id:
db_cursor.execute("DELETE FROM rides WHERE ride_id=%s", (ride_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_ride(ride_id):
"""Gets a particular ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride=ride[0]
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
return make_response(jsonify({"ride" : info}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_rides():
"""Gets all rides"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
all_rides = []
for ride in rides:
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
all_rides.append(info)
return make_response(jsonify({"All rides" : all_rides}), 200)
class Request:
"""Contains menu columns and methods to add, update and delete a request"""
def __init__(self, ride_id, user_id, accepted, status):
self.ride_id = str(ride_id)
self.user_id = str(user_id)
self.accepted = accepted
self.status = status
new_request = "INSERT INTO request (ride_id, user_id, accepted, status) VALUES " \
"('" + self.ride_id + "', '" + self.user_id + "', '" + '0' + "', '" + self.status + "')"
db_cursor = db.con()
db_cursor.execute(new_request)
db.commit()
@classmethod
def request_ride(cls, ride_id, user_id, accepted=False, status="pending"):
"""Creates a new request"""
db_cursor = db.con()
db_cursor.execute("SELECT status FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchone()
if ride[0] == "pending":
cls(ride_id, user_id, accepted, status)
return make_response(jsonify({"message" : "request has been successfully sent for approval"}), 201)
return make_response(jsonify({"message" : "ride is already given"}), 400)
@staticmethod
def delete_request(request_id):
"""Deletes a request"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM request WHERE request_id=%s", (request_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def accept_request(request_id):
"""Accepts request"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE request SET accepted=%s WHERE request_id=%s", (True, request_id))
db.commit()
return make_response(jsonify({"message" : "request has been successfully accepted"}), 200)
except KeyError:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def get_requests(request_id):
"""Gets a particular request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
if request != None:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
return make_response(jsonify({"request" : info}), 200)
return make_response(jsonify({"message" : "request does not exists"}), 404)
@staticmethod
def get_particular_riderequests(ride_id):
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE ride_id=%s", (ride_id,))
requests = db_cursor.fetchall()
if requests != []:
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_requests():
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request")
requests = db_cursor.fetchall()
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
class Relation:
"""Contains method to get driver_id and maximum from a requested ride"""
@staticmethod
def get_driver_id(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
ride_id = str(request[2])
db_cursor.execute("SELECT driver_id FROM rides WHERE ride_id=%s", (ride_id,))
driver_id = db_cursor.fetchone()
if driver_id == None:
return make_response(jsonify({"message" : "ride does not exists"}), 404)
driver_id = driver_id[0]
return int(driver_id)
@staticmethod
def get_maximum(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (str(request_id),))
request = db_cursor.fetchone()
db_cursor.execute("SELECT maximum FROM rides WHERE ride_id=%s", (request[2],))
maximum = db_cursor.fetchone()
maximum = maximum[0]
return maximum
| [((502, 551), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {'method': '"""sha256"""'}), "(password, method='sha256')\n", (524, 551), False, 'from werkzeug.security import generate_password_hash\n'), ((872, 880), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (878, 880), False, 'from databasesetup import db\n'), ((925, 936), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (934, 936), False, 'from databasesetup import db\n'), ((2063, 2071), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (2069, 2071), False, 'from databasesetup import db\n'), ((2629, 2637), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (2635, 2637), False, 'from databasesetup import db\n'), ((3722, 3730), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (3728, 3730), False, 'from databasesetup import db\n'), ((3775, 3786), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (3784, 3786), False, 'from databasesetup import db\n'), ((4879, 4887), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (4885, 4887), False, 'from databasesetup import db\n'), ((5768, 5776), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (5774, 5776), False, 'from databasesetup import db\n'), ((6326, 6334), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (6332, 6334), False, 'from databasesetup import db\n'), ((7058, 7066), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (7064, 7066), False, 'from databasesetup import db\n'), ((8135, 8143), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (8141, 8143), False, 'from databasesetup import db\n'), ((8191, 8202), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (8200, 8202), False, 'from databasesetup import db\n'), ((8356, 8364), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (8362, 8364), False, 'from databasesetup import db\n'), ((9854, 9862), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (9860, 9862), False, 'from databasesetup import db\n'), ((10500, 10508), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (10506, 10508), False, 'from databasesetup import db\n'), ((11280, 11288), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (11286, 11288), False, 'from databasesetup import db\n'), ((11992, 12000), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (11998, 12000), False, 'from databasesetup import db\n'), ((12574, 12582), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (12580, 12582), False, 'from databasesetup import db\n'), ((1111, 1119), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (1117, 1119), False, 'from databasesetup import db\n'), ((1321, 1332), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (1330, 1332), False, 'from databasesetup import db\n'), ((1647, 1655), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (1653, 1655), False, 'from databasesetup import db\n'), ((1748, 1759), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (1757, 1759), False, 'from databasesetup import db\n'), ((2484, 2528), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exists'}"], {}), "({'message': 'user does not exists'})\n", (2491, 2528), False, 'from flask import make_response, jsonify, current_app\n'), ((2992, 3025), 'flask.jsonify', 'jsonify', (["{'All users': all_users}"], {}), "({'All users': all_users})\n", (2999, 3025), False, 'from flask import make_response, jsonify, current_app\n'), ((4040, 4098), 'flask.jsonify', 'jsonify', (["{'message': 'ride has been successfully created'}"], {}), "({'message': 'ride has been successfully created'})\n", (4047, 4098), False, 'from flask import make_response, jsonify, current_app\n'), ((4305, 4313), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (4311, 4313), False, 'from databasesetup import db\n'), ((4560, 4571), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (4569, 4571), False, 'from databasesetup import db\n'), ((6171, 6215), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exists'}"], {}), "({'message': 'user does not exists'})\n", (6178, 6215), False, 'from flask import make_response, jsonify, current_app\n'), ((6905, 6949), 'flask.jsonify', 'jsonify', (["{'message': 'ride does not exists'}"], {}), "({'message': 'ride does not exists'})\n", (6912, 6949), False, 'from flask import make_response, jsonify, current_app\n'), ((7581, 7614), 'flask.jsonify', 'jsonify', (["{'All rides': all_rides}"], {}), "({'All rides': all_rides})\n", (7588, 7614), False, 'from flask import make_response, jsonify, current_app\n'), ((8710, 8755), 'flask.jsonify', 'jsonify', (["{'message': 'ride is already given'}"], {}), "({'message': 'ride is already given'})\n", (8717, 8755), False, 'from flask import make_response, jsonify, current_app\n'), ((8889, 8897), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (8895, 8897), False, 'from databasesetup import db\n'), ((8998, 9009), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (9007, 9009), False, 'from databasesetup import db\n'), ((9362, 9370), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (9368, 9370), False, 'from databasesetup import db\n'), ((9487, 9498), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (9496, 9498), False, 'from databasesetup import db\n'), ((10360, 10407), 'flask.jsonify', 'jsonify', (["{'message': 'request does not exists'}"], {}), "({'message': 'request does not exists'})\n", (10367, 10407), False, 'from flask import make_response, jsonify, current_app\n'), ((11130, 11174), 'flask.jsonify', 'jsonify', (["{'message': 'ride does not exists'}"], {}), "({'message': 'ride does not exists'})\n", (11137, 11174), False, 'from flask import make_response, jsonify, current_app\n'), ((11744, 11785), 'flask.jsonify', 'jsonify', (["{'ride_requests': ride_requests}"], {}), "({'ride_requests': ride_requests})\n", (11751, 11785), False, 'from flask import make_response, jsonify, current_app\n'), ((1366, 1424), 'flask.jsonify', 'jsonify', (["{'message': 'user has been successfully updated'}"], {}), "({'message': 'user has been successfully updated'})\n", (1373, 1424), False, 'from flask import make_response, jsonify, current_app\n'), ((1793, 1851), 'flask.jsonify', 'jsonify', (["{'message': 'user has been successfully deleted'}"], {}), "({'message': 'user has been successfully deleted'})\n", (1800, 1851), False, 'from flask import make_response, jsonify, current_app\n'), ((2421, 2447), 'flask.jsonify', 'jsonify', (["{'profile': info}"], {}), "({'profile': info})\n", (2428, 2447), False, 'from flask import make_response, jsonify, current_app\n'), ((4605, 4663), 'flask.jsonify', 'jsonify', (["{'message': 'user has been successfully updated'}"], {}), "({'message': 'user has been successfully updated'})\n", (4612, 4663), False, 'from flask import make_response, jsonify, current_app\n'), ((5468, 5479), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (5477, 5479), False, 'from databasesetup import db\n'), ((6026, 6037), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (6035, 6037), False, 'from databasesetup import db\n'), ((6845, 6868), 'flask.jsonify', 'jsonify', (["{'ride': info}"], {}), "({'ride': info})\n", (6852, 6868), False, 'from flask import make_response, jsonify, current_app\n'), ((8602, 8673), 'flask.jsonify', 'jsonify', (["{'message': 'request has been successfully sent for approval'}"], {}), "({'message': 'request has been successfully sent for approval'})\n", (8609, 8673), False, 'from flask import make_response, jsonify, current_app\n'), ((9044, 9102), 'flask.jsonify', 'jsonify', (["{'message': 'ride has been successfully deleted'}"], {}), "({'message': 'ride has been successfully deleted'})\n", (9051, 9102), False, 'from flask import make_response, jsonify, current_app\n'), ((9532, 9594), 'flask.jsonify', 'jsonify', (["{'message': 'request has been successfully accepted'}"], {}), "({'message': 'request has been successfully accepted'})\n", (9539, 9594), False, 'from flask import make_response, jsonify, current_app\n'), ((10297, 10323), 'flask.jsonify', 'jsonify', (["{'request': info}"], {}), "({'request': info})\n", (10304, 10323), False, 'from flask import make_response, jsonify, current_app\n'), ((11052, 11093), 'flask.jsonify', 'jsonify', (["{'ride_requests': ride_requests}"], {}), "({'ride_requests': ride_requests})\n", (11059, 11093), False, 'from flask import make_response, jsonify, current_app\n'), ((12351, 12395), 'flask.jsonify', 'jsonify', (["{'message': 'ride does not exists'}"], {}), "({'message': 'ride does not exists'})\n", (12358, 12395), False, 'from flask import make_response, jsonify, current_app\n'), ((1481, 1524), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exist'}"], {}), "({'message': 'user does not exist'})\n", (1488, 1524), False, 'from flask import make_response, jsonify, current_app\n'), ((1908, 1952), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exists'}"], {}), "({'message': 'user does not exists'})\n", (1915, 1952), False, 'from flask import make_response, jsonify, current_app\n'), ((4720, 4763), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exist'}"], {}), "({'message': 'user does not exist'})\n", (4727, 4763), False, 'from flask import make_response, jsonify, current_app\n'), ((6076, 6134), 'flask.jsonify', 'jsonify', (["{'message': 'ride has been successfully deleted'}"], {}), "({'message': 'ride has been successfully deleted'})\n", (6083, 6134), False, 'from flask import make_response, jsonify, current_app\n'), ((9159, 9231), 'flask.jsonify', 'jsonify', (["{'message': 'the specified request does not exist in requests'}"], {}), "({'message': 'the specified request does not exist in requests'})\n", (9166, 9231), False, 'from flask import make_response, jsonify, current_app\n'), ((9660, 9732), 'flask.jsonify', 'jsonify', (["{'message': 'the specified request does not exist in requests'}"], {}), "({'message': 'the specified request does not exist in requests'})\n", (9667, 9732), False, 'from flask import make_response, jsonify, current_app\n')] |
herrywen-nanj/51reboot | lesson06/liqi/test.py | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | import configparser
'''
config = configparser.ConfigParser()
config.read('db.ini')
print(config.sections())
print(dict(config['mysqld'])['symbolic-links'])
'''
def ReadConfig(filename, section, key=None):
print(filename)
config = configparser.ConfigParser()
config.read(filename)
print(config.sections())
if not config.sections():
return "config init is empty", False
if key:
if section in config.sections():
return dict(config[section])[key], True
else:
return '', False
else:
return dict(config[section]), True
result, ok = ReadConfig('db.ini', 'mysqld', 'socket')
print(ok)
print(result)
if __name__ == '__main__':
ReadConfig('db.ini','mysqld','socket') | [((242, 269), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (267, 269), False, 'import configparser\n')] |
xUndero/noc | core/forms.py | 9fb34627721149fcf7064860bd63887e38849131 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Forms wrapper
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape
class NOCBoundField(forms.forms.BoundField):
"""
Bound field with django-admin like label-tag
"""
def __init__(self, *args, **kwargs):
super(NOCBoundField, self).__init__(*args, **kwargs)
self.is_checkbox = isinstance(self.field.widget, forms.CheckboxInput)
def label_tag(self, contents=None, attrs=None):
if not contents:
contents = force_unicode(
escape(self.field.label if self.field.label else self.name)
) + (":" if not self.is_checkbox else "")
classes = []
if self.is_checkbox:
classes += ["vCheckboxLabel"]
if self.field.required:
classes += ["required"]
if classes:
attrs = attrs.copy() if attrs else {}
attrs["class"] = " ".join(classes)
return super(NOCBoundField, self).label_tag(contents=contents, attrs=attrs)
class NOCForm(forms.Form):
"""
Form wrapper returning NOCBoundField items
"""
class Media(object):
css = {"all": ["/ui/pkg/django-media/admin/css/forms.css"]}
def __init__(self, *args, **kwargs):
super(NOCForm, self).__init__(*args, **kwargs)
self.disabled_fields = set()
def disable_field(self, name):
self.disabled_fields.add(name)
def __iter__(self):
for name, field in six.iteritems(self.fields):
if name not in self.disabled_fields:
yield NOCBoundField(self, field, name)
| [((1815, 1841), 'six.iteritems', 'six.iteritems', (['self.fields'], {}), '(self.fields)\n', (1828, 1841), False, 'import six\n'), ((893, 952), 'django.utils.html.escape', 'escape', (['(self.field.label if self.field.label else self.name)'], {}), '(self.field.label if self.field.label else self.name)\n', (899, 952), False, 'from django.utils.html import escape\n')] |
Prescrypto/ErsteOps | ersteops/unit/views.py | 0b744173fb4f500003c96c4dcb26fb67d6eaa5ec | import json
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.http import HttpResponse
from .models import Unit
from .utils import UNIT_LIST_FIELD
BAD_REQUEST = HttpResponse(json.dumps({'error': 'Bad Request'}), status=400, content_type='application/json')
def unit_json_list(request):
''' List Json View for local available units '''
if request.is_ajax():
units = Unit.objects.available_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
_raw_data = json.loads(data)
for unit in _raw_data:
if unit['fields']['is_alliance']:
unit['fields'].update({'identifier': '{}{}'.format(unit['fields']['identifier'],' (Alianza)')})
else:
continue
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def detail_unit_json(request, id_unit):
''' Detail view of unit '''
if request.is_ajax():
unit = Unit.objects.filter(pk=id_unit)
if len(unit) == 0:
return HttpResponse(json.dumps({'error': 'Unidad no encontrada'}), status=404, content_type='application/json')
data = serializers.serialize('json', unit, fields=UNIT_LIST_FIELD)
# Add crew list
_raw_data = json.loads(data)
_raw_data[0]['fields'].update({
'crew_list' : unit.first().get_crew_list
})
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def alliance_unit_json_list(request):
''' List Json View for alliance available units '''
if request.is_ajax():
units = Unit.objects.available_alliance_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
return HttpResponse(data, content_type='application/json', status=200)
else:
return BAD_REQUEST
| [((220, 256), 'json.dumps', 'json.dumps', (["{'error': 'Bad Request'}"], {}), "({'error': 'Bad Request'})\n", (230, 256), False, 'import json\n'), ((561, 577), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (571, 577), False, 'import json\n'), ((1257, 1316), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', 'unit'], {'fields': 'UNIT_LIST_FIELD'}), "('json', unit, fields=UNIT_LIST_FIELD)\n", (1278, 1316), False, 'from django.core import serializers\n'), ((1361, 1377), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1371, 1377), False, 'import json\n'), ((1889, 1952), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""', 'status': '(200)'}), "(data, content_type='application/json', status=200)\n", (1901, 1952), False, 'from django.http import HttpResponse\n'), ((838, 859), 'json.dumps', 'json.dumps', (['_raw_data'], {}), '(_raw_data)\n', (848, 859), False, 'import json\n'), ((1510, 1531), 'json.dumps', 'json.dumps', (['_raw_data'], {}), '(_raw_data)\n', (1520, 1531), False, 'import json\n'), ((1149, 1194), 'json.dumps', 'json.dumps', (["{'error': 'Unidad no encontrada'}"], {}), "({'error': 'Unidad no encontrada'})\n", (1159, 1194), False, 'import json\n')] |
gabrielviticov/exercicios-python | olamundo.py/exercicios_refeitos/ex029.py | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | '''
ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite.
'''
from colorise import set_color, reset_color
cor = {
'limpa':'\033[m',
'white':'\033[1;97m'
}
set_color(fg='green')
velocidade_carro = int(input('Informe a velocidade do carro KM/H: '))
if velocidade_carro > 80:
multa = (velocidade_carro - 80) * 7.00
print('\nMULTADO! VOCÊ ULTRAPASSOU O LIMITE PERMITIDO. LOGO TERÁ QUE PAGAR ', end='')
reset_color()
print('{}R${:.2f}{}'.format(cor['white'], multa, cor['limpa']))
else:
set_color(fg='green')
print('\nCONTINUE ASSIM. DIRIGINDO COM SEGURANÇA!')
| [((306, 327), 'colorise.set_color', 'set_color', ([], {'fg': '"""green"""'}), "(fg='green')\n", (315, 327), False, 'from colorise import set_color, reset_color\n'), ((561, 574), 'colorise.reset_color', 'reset_color', ([], {}), '()\n', (572, 574), False, 'from colorise import set_color, reset_color\n'), ((653, 674), 'colorise.set_color', 'set_color', ([], {'fg': '"""green"""'}), "(fg='green')\n", (662, 674), False, 'from colorise import set_color, reset_color\n')] |
felko/fruit | fruit/mixin/drawable.py | 4768fd333ac3b7c0bd6d339304b23e20e312d2d1 | #!/usr/bin/env python3.4
# coding: utf-8
class Drawable:
"""
Base class for drawable objects.
"""
def draw(self):
"""
Returns a Surface object.
"""
raise NotImplementedError(
"Method `draw` is not implemented for {}".format(type(self)))
| [] |
uts-cic/ontask_b | src/action/tests/test_logic.py | b313e2352c77b40655f41dd5acba3a7635e6f3b3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from django.conf import settings
from django.shortcuts import reverse
from django.core.management import call_command
import test
from dataops import pandas_db
from workflow.models import Workflow
class EmailActionTracking(test.OntaskTestCase):
fixtures = ['simple_email_action']
filename = os.path.join(
settings.BASE_DIR(),
'action',
'fixtures',
'simple_email_action_df.sql'
)
trck_tokens = [
"eyJhY3Rpb24iOjIsInRvIjoic3R1ZGVudDFAYm9ndXMuY29tIiwiY29sdW1uX2RzdCI6IkVtYWlsUmVhZF8xIiwic2VuZGVyIjoiaWRlc2lnbmVyMUBib2d1cy5jb20iLCJjb2x1bW5fdG8iOiJlbWFpbCJ9:1eBtw5:MwH1axNDQq9HpgcP6jRvp7cAFmI",
"eyJhY3Rpb24iOjIsInRvIjoic3R1ZGVudDJAYm9ndXMuY29tIiwiY29sdW1uX2RzdCI6IkVtYWlsUmVhZF8xIiwic2VuZGVyIjoiaWRlc2lnbmVyMUBib2d1cy5jb20iLCJjb2x1bW5fdG8iOiJlbWFpbCJ9:1eBtw5:FFS1EXjdgJjc37ZVOcW22aIegR4",
"eyJhY3Rpb24iOjIsInRvIjoic3R1ZGVudDNAYm9ndXMuY29tIiwiY29sdW1uX2RzdCI6IkVtYWlsUmVhZF8xIiwic2VuZGVyIjoiaWRlc2lnbmVyMUBib2d1cy5jb20iLCJjb2x1bW5fdG8iOiJlbWFpbCJ9:1eBtw5:V0KhNWbcY3YPTfJXRagPaeJae4M"
]
wflow_name = 'wflow1'
wflow_desc = 'description text for workflow 1'
wflow_empty = 'The workflow does not have data'
@classmethod
def setUpClass(cls):
super(EmailActionTracking, cls).setUpClass()
pandas_db.pg_restore_table(cls.filename)
def tearDown(self):
pandas_db.delete_all_tables()
super(EmailActionTracking, self).tearDown()
# Test that tracking hits are properly stored.
def test_tracking(self):
# Repeat the checks two times to test if they are accumulating
for idx in range(1, 3):
# Iterate over the tracking items
for trck in self.trck_tokens:
self.client.get(reverse('trck') + '?v=' + trck)
# Get the workflow and the data frame
workflow = Workflow.objects.get(name=self.wflow_name)
df = pandas_db.load_from_db(workflow.id)
# Check that the results have been updated in the DB (to 1)
for uemail in [x[1] for x in test.user_info
if x[1].startswith('student')]:
self.assertEqual(
int(df.loc[df['email'] == uemail, 'EmailRead_1'].values[0]),
idx
)
| [((416, 435), 'django.conf.settings.BASE_DIR', 'settings.BASE_DIR', ([], {}), '()\n', (433, 435), False, 'from django.conf import settings\n'), ((1387, 1427), 'dataops.pandas_db.pg_restore_table', 'pandas_db.pg_restore_table', (['cls.filename'], {}), '(cls.filename)\n', (1413, 1427), False, 'from dataops import pandas_db\n'), ((1461, 1490), 'dataops.pandas_db.delete_all_tables', 'pandas_db.delete_all_tables', ([], {}), '()\n', (1488, 1490), False, 'from dataops import pandas_db\n'), ((1953, 1995), 'workflow.models.Workflow.objects.get', 'Workflow.objects.get', ([], {'name': 'self.wflow_name'}), '(name=self.wflow_name)\n', (1973, 1995), False, 'from workflow.models import Workflow\n'), ((2013, 2048), 'dataops.pandas_db.load_from_db', 'pandas_db.load_from_db', (['workflow.id'], {}), '(workflow.id)\n', (2035, 2048), False, 'from dataops import pandas_db\n'), ((1847, 1862), 'django.shortcuts.reverse', 'reverse', (['"""trck"""'], {}), "('trck')\n", (1854, 1862), False, 'from django.shortcuts import reverse\n')] |
izm51/obniz-python-sdk | obniz/parts/Moving/StepperMotor/__init__.py | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | from attrdict import AttrDefault
import asyncio
class StepperMotor:
def __init__(self):
self.keys = ['a', 'b', 'aa', 'bb', 'common']
self.required_keys = ['a', 'b', 'aa', 'bb']
self._step_instructions = AttrDefault(bool,
{
'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]],
'2': [[0, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]],
'1-2': [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 0]]
}
)
self.type = None
self.current_step = 0
self._step_type = '2'
self.frequency = 100
self.rotation_step_count = 100
self.milli_meter_step_count = 1
@staticmethod
def info():
return AttrDefault(bool, {'name': 'StepperMotor'})
def wired(self, obniz):
self.obniz = obniz
if obniz.is_valid_io(*[self.params.common]):
self.common = obniz.get_io(*[self.params.common])
self.common.output(*[True])
self.type = 'unipolar'
else:
self.type = 'bipolar'
self.ios = []
self.ios.append(*[obniz.get_io(*[self.params.a])])
self.ios.append(*[obniz.get_io(*[self.params.b])])
self.ios.append(*[obniz.get_io(*[self.params.aa])])
self.ios.append(*[obniz.get_io(*[self.params.bb])])
async def step_wait(self, step_count):
if type(step_count) in ['int', 'float']:
raise Exception('must provide number')
step_count = round(*[step_count])
if step_count == 0:
return
step_count_abs = abs(*[step_count])
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
array = []
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
if step_count > 0:
for i in range(0, len(instructions), 1):
current_phase += 1
if current_phase >= instruction_length:
current_phase = 0
array.append(*[instructions[current_phase]])
else:
for i in range(0, len(instructions), 1):
current_phase -= 1
if current_phase < 0:
current_phase = (instruction_length - 1)
array.append(*[instructions[current_phase]])
msec = 1000 / self.frequency
msec = int(*[msec])
if msec < 1:
msec = 1
def anonymous0(index):
instruction = array[index]
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instruction[i]])
state = anonymous0
states = []
for i in range(0, instruction_length, 1):
states.append(*[AttrDefault(bool, {'duration': msec, 'state': state})])
await self.obniz.io.repeat_wait(*[states, step_count_abs])
self.current_step += step_count
async def step_to_wait(self, destination):
mustmove = (destination - self.current_step)
await self.step_wait(*[mustmove])
async def hold_wait(self):
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instructions[current_phase][i]])
await self.obniz.ping_wait(*[])
async def free_wait(self):
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[True])
await self.obniz.ping_wait(*[])
def step_type(self, step_type):
new_type = self._step_instructions[step_type]
if not new_type:
raise Exception('unknown step type ' + str(step_type))
self._step_type = step_type
def speed(self, step_per_sec):
self.frequency = step_per_sec
def current_rotation(self):
return self.current_step / self.rotation_step_count * 360
def current_angle(self):
angle = int(*[self.current_rotation(*[]) * 1000]) % 360000 / 1000
if angle < 0:
angle = (360 - angle)
return angle
async def rotate_wait(self, rotation):
rotation /= 360
needed = rotation * self.rotation_step_count
await self.step_wait(*[needed])
async def rotate_to_wait(self, angle):
needed = (angle - self.current_angle(*[]))
if abs(*[needed]) > 180:
needed = (needed - 360) if needed > 0 else (360 + needed)
needed = needed / 360 * self.rotation_step_count
await self.step_wait(*[needed])
def current_distance(self):
return self.current_step / self.milli_meter_step_count
async def move_wait(self, distance):
needed = distance * self.milli_meter_step_count
await self.step_wait(*[needed])
async def move_to_wait(self, destination):
needed = (destination - self.current_distance(*[])) * self.milli_meter_step_count
await self.step_wait(*[needed])
def _get_step_instructions(self):
return self._step_instructions[self._step_type] | [((233, 512), 'attrdict.AttrDefault', 'AttrDefault', (['bool', "{'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]], '2': [[0, 0,\n 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]], '1-2': [[0, 1, 1, 1],\n [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 0], [\n 1, 1, 1, 0], [0, 1, 1, 0]]}"], {}), "(bool, {'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1,\n 0]], '2': [[0, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]],\n '1-2': [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, \n 0, 1], [1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 0]]})\n", (244, 512), False, 'from attrdict import AttrDefault\n'), ((826, 869), 'attrdict.AttrDefault', 'AttrDefault', (['bool', "{'name': 'StepperMotor'}"], {}), "(bool, {'name': 'StepperMotor'})\n", (837, 869), False, 'from attrdict import AttrDefault\n'), ((2924, 2977), 'attrdict.AttrDefault', 'AttrDefault', (['bool', "{'duration': msec, 'state': state}"], {}), "(bool, {'duration': msec, 'state': state})\n", (2935, 2977), False, 'from attrdict import AttrDefault\n')] |
1212091/python-learning | basic_assignment/39.py | 30fad66460daf73fd3961cf667ee25b91dee923d | input_num = raw_input()
print(str(eval(input_num)))
| [] |
SimonGreenhill/Language5 | website/website/apps/entry/admin.py | c59f502dda7be27fc338f0338cc3b03e63bad9c8 | from django.contrib import admin
from django.db.models import Count
from reversion.admin import VersionAdmin
from website.apps.lexicon.models import Lexicon
from website.apps.entry.models import Task, TaskLog, Wordlist, WordlistMember
from website.apps.core.admin import TrackedModelAdmin
class CheckpointListFilter(admin.SimpleListFilter):
title = 'Has Checkpoint'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'has_checkpoint'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('yes', 'Has Checkpoint'),
('no', 'No Checkpoint'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'yes':
return queryset.filter(checkpoint__isnull=False).exclude(checkpoint__iexact='')
if self.value() == 'no':
return queryset.filter(checkpoint__isnull=True).filter(checkpoint__exact='')
class TaskAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_display = ('id', 'name', 'editor', 'records', 'completable', 'done')
list_filter = ('editor', 'done', 'completable', CheckpointListFilter, 'source', 'language', 'view')
ordering = ('-id',)
exclude = ('lexicon',)
list_select_related = True
class TaskLogAdmin(admin.ModelAdmin):
date_hierarchy = 'time'
list_display = ('person', 'task_id', 'time', 'page', 'message')
list_filter = ('person', 'page', )
ordering = ('-time',)
list_select_related = True
def task_id(self, instance):
return instance.task_id
class WordlistMembersInline(admin.TabularInline):
model = Wordlist.words.through
extra = 0 # don't add anything new unless explicitly told to.
class TaskWordlistAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_display = ('id', 'name', 'words_count')
ordering = ('name',)
filter_horizontal = ('words',)
inlines = [WordlistMembersInline,]
def get_queryset(self, request):
return Wordlist.objects.annotate(words_count=Count("words"))
def words_count(self, inst):
return inst.words_count
words_count.admin_order_field = 'words_count'
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskLog, TaskLogAdmin)
admin.site.register(Wordlist, TaskWordlistAdmin)
| [((2677, 2713), 'django.contrib.admin.site.register', 'admin.site.register', (['Task', 'TaskAdmin'], {}), '(Task, TaskAdmin)\n', (2696, 2713), False, 'from django.contrib import admin\n'), ((2714, 2756), 'django.contrib.admin.site.register', 'admin.site.register', (['TaskLog', 'TaskLogAdmin'], {}), '(TaskLog, TaskLogAdmin)\n', (2733, 2756), False, 'from django.contrib import admin\n'), ((2757, 2805), 'django.contrib.admin.site.register', 'admin.site.register', (['Wordlist', 'TaskWordlistAdmin'], {}), '(Wordlist, TaskWordlistAdmin)\n', (2776, 2805), False, 'from django.contrib import admin\n'), ((2535, 2549), 'django.db.models.Count', 'Count', (['"""words"""'], {}), "('words')\n", (2540, 2549), False, 'from django.db.models import Count\n')] |
fest2bash/fest2bash | src/modules/python.py | 008282f67d4d4415c27b3b9b6162daf54f8d6028 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.dont_write_bytecode = True
from pprint import pprint
from base import BaseFest2Bash
class Fest2Bash(BaseFest2Bash):
def __init__(self, manifest):
super(Fest2Bash, self).__init__(manifest)
def generate(self, *args, **kwargs):
return self.manifest
| [] |
paulculmsee/opennem | opennem/utils/scrapyd.py | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | #!/usr/bin/env python
"""
Srapyd control methods
"""
import logging
from typing import Any, Dict, List
from urllib.parse import urljoin
from opennem.settings import settings
from opennem.utils.http import http
from opennem.utils.scrapy import get_spiders
logger = logging.getLogger("scrapyd.client")
def get_jobs() -> Dict[str, Any]:
job_url = urljoin(
settings.scrapyd_url,
"listjobs.json?project={}".format(settings.scrapyd_project_name),
)
jobs = http.get(job_url).json()
return jobs
def job_cancel(id: str) -> bool:
cancel_job_url = urljoin(settings.scrapyd_url, "cancel.json")
r = http.post(cancel_job_url, data={"project": "opennem", "job": id})
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Cancelled job: {}".format(resp["jobid"]))
return True
def job_schedule(spider_name: str) -> bool:
schedule_url = urljoin(settings.scrapyd_url, "schedule.json")
try:
r = http.post(schedule_url, data={"project": "opennem", "spider": spider_name})
except Exception as e:
logger.error("Error getting {}: {}".format(schedule_url, e))
return False
if not r.ok:
logger.error("Error: {}".format(r.status_code))
return False
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Queued spider {} with task: {}".format(spider_name, resp["jobid"]))
return True
def job_cancel_state(state: str = "pending") -> bool:
jobs = get_jobs()
if state not in jobs:
logger.info("Invalid state or no jobs in state {}".format(state))
return False
pending_jobs = jobs[state]
for job in pending_jobs:
job_id = job["id"]
logger.info("Cancelling {}".format(job_id))
job_cancel(job_id)
return True
def job_schedule_all(matches: str = None) -> List[str]:
spiders = get_spiders()
spider_scheduled = []
for s in spiders:
if matches and matches != s:
continue
job_schedule(s)
spider_scheduled.append(s)
return spider_scheduled
| [((267, 302), 'logging.getLogger', 'logging.getLogger', (['"""scrapyd.client"""'], {}), "('scrapyd.client')\n", (284, 302), False, 'import logging\n'), ((582, 626), 'urllib.parse.urljoin', 'urljoin', (['settings.scrapyd_url', '"""cancel.json"""'], {}), "(settings.scrapyd_url, 'cancel.json')\n", (589, 626), False, 'from urllib.parse import urljoin\n'), ((636, 701), 'opennem.utils.http.http.post', 'http.post', (['cancel_job_url'], {'data': "{'project': 'opennem', 'job': id}"}), "(cancel_job_url, data={'project': 'opennem', 'job': id})\n", (645, 701), False, 'from opennem.utils.http import http\n'), ((979, 1025), 'urllib.parse.urljoin', 'urljoin', (['settings.scrapyd_url', '"""schedule.json"""'], {}), "(settings.scrapyd_url, 'schedule.json')\n", (986, 1025), False, 'from urllib.parse import urljoin\n'), ((2031, 2044), 'opennem.utils.scrapy.get_spiders', 'get_spiders', ([], {}), '()\n', (2042, 2044), False, 'from opennem.utils.scrapy import get_spiders\n'), ((1048, 1123), 'opennem.utils.http.http.post', 'http.post', (['schedule_url'], {'data': "{'project': 'opennem', 'spider': spider_name}"}), "(schedule_url, data={'project': 'opennem', 'spider': spider_name})\n", (1057, 1123), False, 'from opennem.utils.http import http\n'), ((484, 501), 'opennem.utils.http.http.get', 'http.get', (['job_url'], {}), '(job_url)\n', (492, 501), False, 'from opennem.utils.http import http\n')] |
Haiiliin/PyAbaqus | src/abaqus/Material/Elastic/Linear/Elastic.py | f20db6ebea19b73059fe875a53be370253381078 | from abaqusConstants import *
from .FailStrain import FailStrain
from .FailStress import FailStress
class Elastic:
"""The Elastic object specifies elastic material properties.
Notes
-----
This object can be accessed by:
.. code-block:: python
import material
mdb.models[name].materials[name].elastic
import odbMaterial
session.odbs[name].materials[name].elastic
The table data for this object are:
- If *type*=ISOTROPIC, the table data specify the following:
- The Young's modulus, E.
- The Poisson's ratio, v.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=SHEAR, the table data specify the following:
- The shear modulus,G.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENGINEERING_CONSTANTS, the table data specify the following:
- E1.
- E2.
- E3.
- v12.
- v13.
- v23.
- G12.
- G13.
- G23.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=LAMINA, the table data specify the following:
- E1.
- E2.
- v12.
- G12.
- G13. This shear modulus is needed to define transverse shear behavior in shells.
- G23. This shear modulus is needed to define transverse shear behavior in shells.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ORTHOTROPIC, the table data specify the following:
- D1111.
- D1122.
- D2222.
- D1133.
- D2233.
- D3333.
- D1212.
- D1313.
- D2323.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ANISOTROPIC, the table data specify the following:
- D1111.
- D1122.
- D2222.
- D1133.
- D2233.
- D3333.
- D1112.
- D2212.
- D3312.
- D1212.
- D1113.
- D2213.
- D3313.
- D1213.
- D1313.
- D1123.
- D2223.
- D3323.
- D1223.
- D1323.
- D2323.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=TRACTION, the table data specify the following:
- EE for warping elements; Enn for cohesive elements.
- G1 for warping elements; Ess for cohesive elements.
- G2 for warping elements; Ett for cohesive elements.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=BILAMINA, the table data specify the following:
- E1+.
- E2+.
- v12+.
- G12.
- E1-.
- E2-.
- v112-.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=SHORT_FIBER, there is no table data.
The corresponding analysis keywords are:
- ELASTIC
"""
# A FailStress object.
failStress: FailStress = FailStress(((),))
# A FailStrain object.
failStrain: FailStrain = FailStrain(((),))
def __init__(self, table: tuple, type: SymbolicConstant = ISOTROPIC, noCompression: Boolean = OFF,
noTension: Boolean = OFF, temperatureDependency: Boolean = OFF, dependencies: int = 0,
moduli: SymbolicConstant = LONG_TERM):
"""This method creates an Elastic object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].materials[name].Elastic
session.odbs[name].materials[name].Elastic
Parameters
----------
table
A sequence of sequences of Floats specifying the items described below.
type
A SymbolicConstant specifying the type of elasticity data provided. Possible values are:
- ISOTROPIC
- ORTHOTROPIC
- ANISOTROPIC
- ENGINEERING_CONSTANTS
- LAMINA
- TRACTION
- COUPLED_TRACTION
- SHORT_FIBER
- SHEAR
- BILAMINA
The default value is ISOTROPIC.
noCompression
A Boolean specifying whether compressive stress is allowed. The default value is OFF.
noTension
A Boolean specifying whether tensile stress is allowed. The default value is OFF.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
moduli
A SymbolicConstant specifying the time-dependence of the elastic material constants.
Possible values are INSTANTANEOUS and LONG_TERM. The default value is LONG_TERM.
Returns
-------
An Elastic object.
Raises
------
RangeError
"""
pass
def setValues(self):
"""This method modifies the Elastic object.
Raises
------
RangeError
"""
pass
| [] |
UWSysLab/diamond | apps/pyscrabble/pyscrabble-hatchet/setup.py | 1beec323c084d9d477c770ca6b9625c8f5682a39 | # setup.py for pyscrabble
from distutils.core import setup
try:
import py2exe
HAS_PY2EXE = True
except ImportError:
HAS_PY2EXE = False
import glob
import os
import pkg_resources
import sys
from pyscrabble.constants import VERSION
from pyscrabble import util
from pyscrabble import dist
def fix_path(item):
if type(item) in (list, tuple):
if 'config' in item[0]:
return (item[0].replace('config', dist.get_app_data_dir()), item[1])
else:
return (item[0].replace('resources/', 'share/pyscrabble/'), item[1])
else:
return item
kwargs = {
'name': 'pyscrabble',
'version': VERSION,
'author': 'Kevin Conaway',
'author_email': '[email protected]',
'url': 'http://pyscrabble.sourceforge.net',
'data_files': dist.getDataFiles(),
'packages': ['pyscrabble', 'pyscrabble.command', 'pyscrabble.game', 'pyscrabble.gui', 'pyscrabble.net']
}
if HAS_PY2EXE and 'py2exe' in sys.argv:
#eggpacks = pkg_resources.require("nevow")
#for egg in eggpacks:
# if os.path.isdir(egg.location):
# sys.path.insert(0, egg.location)
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com",p)
for extra in ["win32com.shell"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.addPackagePath(extra, p)
except ImportError:
print 'import error'
kwargs['py_modules'] = ['pyscrabble-main', 'server_console', 'db_upgrade']
kwargs['options'] = {
"py2exe": {
"packages": "encodings, nevow",
"includes": "pango,atk,gobject,decimal,dumbdbm,dbhash,xml.sax.expatreader",
"dll_excludes": ["iconv.dll","intl.dll","libatk-1.0-0.dll",
"libgdk_pixbuf-2.0-0.dll","libgdk-win32-2.0-0.dll",
"libglib-2.0-0.dll","libgmodule-2.0-0.dll",
"libgobject-2.0-0.dll","libgthread-2.0-0.dll",
"libgtk-win32-2.0-0.dll","libpango-1.0-0.dll",
"libpangowin32-1.0-0.dll"],
}
}
kwargs['windows'] = [{
"script": "pyscrabble-main.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['console'] = [{
"script": "server_service.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}, {
"script": "server_console.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['service'] = ['server_service']
kwargs['data_files'] += [('.', ['CHANGELOG.txt'])]
kwargs['data_files'] += [('.', ['LICENSE.txt'])]
#for egg in eggpacks:
# kwargs['data_files'] += dist.getResourceDirs(egg.location, ensureLower=False, basePath=None, outdir='extra')
else:
kwargs['scripts'] = ['pyscrabble-main.py', 'server_console.py', 'db_upgrade.py']
kwargs['data_files'] = [fix_path(x) for x in kwargs['data_files']]
kwargs['cmdclass'] = {'install_lib': dist.InstallLib, 'install_scripts' : dist.InstallScripts}
setup(**kwargs) | [] |
liuxiaomiao123/NeuroMathAcademy | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py | 16a7969604a300bf9fbb86f8a5b26050ebd14c65 | def integrate_exponential(a, x0, dt, T):
"""Compute solution of the differential equation xdot=a*x with
initial condition x0 for a duration T. Use time step dt for numerical
solution.
Args:
a (scalar): parameter of xdot (xdot=a*x)
x0 (scalar): initial condition (x at time 0)
dt (scalar): timestep of the simulation
T (scalar): total duration of the simulation
Returns:
ndarray, ndarray: `x` for all simulation steps and the time `t` at each step
"""
# Initialize variables
t = np.arange(0, T, dt)
x = np.zeros_like(t, dtype=complex)
x[0] = x0
# Step through system and integrate in time
for k in range(1, len(t)):
# for each point in time, compute xdot = a*x
xdot = (a*x[k-1])
# update x by adding xdot scaled by dt
x[k] = x[k-1] + xdot * dt
return x, t
# choose parameters
a = -0.5 # parameter in f(x)
T = 10 # total Time duration
dt = 0.001 # timestep of our simulation
x0 = 1. # initial condition of x at time 0
x, t = integrate_exponential(a, x0, dt, T)
with plt.xkcd():
fig = plt.figure(figsize=(8, 6))
plt.plot(t, x.real)
plt.xlabel('Time (s)')
plt.ylabel('x') | [] |
SwaggerKhan/PatrolGis | PyTemp/gis/shapefile_to_geojson.py | 89b1a398ffd6171ac35ea9d023bce98a0fc7e930 | import json
import geojson
import geopandas as gpd
class SaveToGeoJSON:
__name_counter = 0
def file_name(self):
if self.__name_counter == 0:
self.__name_counter = 1
return "./out"+str(self.__name_counter)+".json"
elif self.__name_counter == 1:
self.__name_counter = 2
return "./out"+str(self.__name_counter)+".json"
else:
self.__name_counter = 0
print("Contact developer")
def save(self, name, file_save_name):
self.shape_file = gpd.read_file(name)
self.shape_file.to_file(file_save_name, driver="GeoJSON")
class MergeGeoJSON:
__files_merge_list = ['./out1.json', './out2.json']
__poly_geojson = list()
def save(self):
for i in self.__files_merge_list:
with open(i) as geojson_data:
self.__poly_geojson.append(json.load(geojson_data))
merged = { 'firstObj ' : self.__poly_geojson[1], 'secondObj' : self.__poly_geojson[0] }
json.dumps(merged)
with open('Merged_out.json', 'w') as outfile:
json.dump(merged, outfile, indent=3)
outfile.close()
return True
| [((548, 567), 'geopandas.read_file', 'gpd.read_file', (['name'], {}), '(name)\n', (561, 567), True, 'import geopandas as gpd\n'), ((1015, 1033), 'json.dumps', 'json.dumps', (['merged'], {}), '(merged)\n', (1025, 1033), False, 'import json\n'), ((1101, 1137), 'json.dump', 'json.dump', (['merged', 'outfile'], {'indent': '(3)'}), '(merged, outfile, indent=3)\n', (1110, 1137), False, 'import json\n'), ((886, 909), 'json.load', 'json.load', (['geojson_data'], {}), '(geojson_data)\n', (895, 909), False, 'import json\n')] |
JoshuaMeyers/ssbio | ssbio/databases/pdbflex.py | 624618602437e2c2e4adf90962adcef3af2d5b40 | import requests
import ssbio.utils
import os.path as op
# #### PDB stats
# Request flexibility data about one particular PDB.
#
# http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A
#
# pdbID of structure you are interested in
# chainID of chain you are interested in
#
# [{"pdbID":"1a50",
# "chainID":"A",
# "parentClusterID":"4hn4A",
# "avgRMSD":"0.538",
# "maxRMSD":"2.616",
# "flexibilityLabel":"Low",
# "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...],
# "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}]
#
# Note: you can omit the chainID and PDBFlex will return information for all chains.
#
# #### RMSD profile
# Request RMSD array used for local flexibility plots
#
# http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# {"queryPDB":"1a50A",
# "clusterName":"4hn4A",
# "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"}
#
# #### PDB representatives
# Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster.
#
# http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# ["2trsA","3pr2A","1kfjA"]
def get_pdbflex_info(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_stats.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/PDBStats.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# TODO: will running with chain ID always return a single item list?
assert len(infolist) == 1
newdict = {}
for k, v in infolist[0].items():
if k == 'avgRMSD' and v:
newdict[k] = float(v)
elif k == 'maxRMSD' and v:
newdict[k] = float(v)
else:
newdict[k] = v
return newdict
def get_pdbflex_rmsd_profile(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_rmsdprofile.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/rmsdProfile.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infodict = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
infodict['profile'] = [float(x) for x in infodict['profile'].strip('[]').split(',')]
return infodict
def get_pdbflex_representatives(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_representatives.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/representatives.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# infolist = [str(x) for x in infolist.strip('[]').split(',')]
return infolist | [] |
manisharmagarg/qymatix | api/insights/insights/infrastructure/mysql/read/modify_notes.py | 0dc240970359429ae5105db79f9aebf1a99ba6fd | """
Modify Notes
"""
# pylint: disable=too-few-public-methods
from ...mysql.mysql_connection import MySqlConnection
from ...mysql.orm.autogen_entities import Task
class ModifyNotes(object):
"""
ModifyNotes responsible to update the record in db
"""
def __init__(self, db_name, notes_id, title=None, comment=None):
super(ModifyNotes, self).__init__()
self.data_db = 'data_{}'.format(db_name)
self.notes_id = notes_id
self.title = title
self.comment = comment
connection = MySqlConnection(self.data_db)
self.session = connection.session()
self.results = self.modify_notes()
def modify_notes(self):
"""
function: query to update the notes record
return: updated notes Id
"""
notes_obj = self.session.query(Task). \
filter_by(id=self.notes_id).first()
notes_obj.title = self.title
notes_obj.description = self.comment
self.session.add(notes_obj)
self.session.commit()
return notes_obj.id
| [] |
yetsun/hue | desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import namedval
class NamedValuesCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedval.NamedValues(('off', 0), ('on', 1))
def testDict(self):
assert set(self.e.items()) == set([('off', 0), ('on', 1)])
assert set(self.e.keys()) == set(['off', 'on'])
assert set(self.e) == set(['off', 'on'])
assert set(self.e.values()) == set([0, 1])
assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e
assert 0 in self.e and 1 in self.e and 2 not in self.e
def testInit(self):
assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1}
assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1}
assert namedval.NamedValues(('c', 0)) == {'c': 0}
assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3}
def testLen(self):
assert len(self.e) == 2
assert len(namedval.NamedValues()) == 0
def testAdd(self):
assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1}
def testClone(self):
assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1}
assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1}
def testStrRepr(self):
assert str(self.e)
assert repr(self.e)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| [((391, 415), 'tests.base.BaseTestCase.setUp', 'BaseTestCase.setUp', (['self'], {}), '(self)\n', (409, 415), False, 'from tests.base import BaseTestCase\n'), ((433, 476), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', (["('off', 0)", "('on', 1)"], {}), "(('off', 0), ('on', 1))\n", (453, 476), False, 'from pyasn1.type import namedval\n'), ((1689, 1710), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1708, 1710), False, 'import unittest\n'), ((902, 935), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', ([], {'off': '(0)', 'on': '(1)'}), '(off=0, on=1)\n', (922, 935), False, 'from pyasn1.type import namedval\n'), ((974, 1007), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', (['"""off"""', '"""on"""'], {}), "('off', 'on')\n", (994, 1007), False, 'from pyasn1.type import namedval\n'), ((1046, 1076), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', (["('c', 0)"], {}), "(('c', 0))\n", (1066, 1076), False, 'from pyasn1.type import namedval\n'), ((1104, 1149), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', (['"""a"""', '"""b"""', "('c', 0)"], {'d': '(1)'}), "('a', 'b', ('c', 0), d=1)\n", (1124, 1149), False, 'from pyasn1.type import namedval\n'), ((1786, 1822), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1809, 1822), False, 'import unittest\n'), ((1261, 1283), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', ([], {}), '()\n', (1281, 1283), False, 'from pyasn1.type import namedval\n'), ((1329, 1356), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', ([], {'off': '(0)'}), '(off=0)\n', (1349, 1356), False, 'from pyasn1.type import namedval\n'), ((1359, 1385), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', ([], {'on': '(1)'}), '(on=1)\n', (1379, 1385), False, 'from pyasn1.type import namedval\n'), ((1450, 1477), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', ([], {'off': '(0)'}), '(off=0)\n', (1470, 1477), False, 'from pyasn1.type import namedval\n'), ((1533, 1560), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', ([], {'off': '(0)'}), '(off=0)\n', (1553, 1560), False, 'from pyasn1.type import namedval\n')] |
methane/pymemcache | setup.py | 0ff5430cdcef7ed52fb3edc2a90c1c7d208ad77f | #!/usr/bin/env python
from setuptools import setup, find_packages
from pymemcache import __version__
setup(
name = 'pymemcache',
version = __version__,
author = 'Charles Gordon',
author_email = '[email protected]',
packages = find_packages(),
tests_require = ['nose>=1.0'],
install_requires = ['six'],
description = 'A comprehensive, fast, pure Python memcached client',
long_description = open('README.md').read(),
license = 'Apache License 2.0',
url = 'https://github.com/Pinterest/pymemcache',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Database',
],
)
| [((253, 268), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (266, 268), False, 'from setuptools import setup, find_packages\n')] |
DL-85/pytorch_geometric | torch_geometric/read/ply.py | eb12a94a667e881c4a6bff26b0453428bcb72393 | import torch
from plyfile import PlyData
from torch_geometric.data import Data
def read_ply(path):
with open(path, 'rb') as f:
data = PlyData.read(f)
pos = ([torch.tensor(data['vertex'][axis]) for axis in ['x', 'y', 'z']])
pos = torch.stack(pos, dim=-1)
face = None
if 'face' in data:
faces = data['face']['vertex_indices']
faces = [torch.tensor(face, dtype=torch.long) for face in faces]
face = torch.stack(faces, dim=-1)
data = Data(pos=pos)
data.face = face
return data
| [((252, 276), 'torch.stack', 'torch.stack', (['pos'], {'dim': '(-1)'}), '(pos, dim=-1)\n', (263, 276), False, 'import torch\n'), ((491, 504), 'torch_geometric.data.Data', 'Data', ([], {'pos': 'pos'}), '(pos=pos)\n', (495, 504), False, 'from torch_geometric.data import Data\n'), ((148, 163), 'plyfile.PlyData.read', 'PlyData.read', (['f'], {}), '(f)\n', (160, 163), False, 'from plyfile import PlyData\n'), ((177, 211), 'torch.tensor', 'torch.tensor', (["data['vertex'][axis]"], {}), "(data['vertex'][axis])\n", (189, 211), False, 'import torch\n'), ((452, 478), 'torch.stack', 'torch.stack', (['faces'], {'dim': '(-1)'}), '(faces, dim=-1)\n', (463, 478), False, 'import torch\n'), ((381, 417), 'torch.tensor', 'torch.tensor', (['face'], {'dtype': 'torch.long'}), '(face, dtype=torch.long)\n', (393, 417), False, 'import torch\n')] |
ranguera/ml-agents | ml-agents/mlagents/trainers/brain_conversion_utils.py | 68779b407b32fce2ea14b16ef1bc26dea7d5e5a8 | from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
def step_result_to_brain_info(
step_result: BatchedStepResult,
group_spec: AgentGroupSpec,
agent_id_prefix: int = None,
) -> BrainInfo:
n_agents = step_result.n_agents()
vis_obs_indices = []
vec_obs_indices = []
for index, observation in enumerate(step_result.obs):
if len(observation.shape) == 2:
vec_obs_indices.append(index)
elif len(observation.shape) == 4:
vis_obs_indices.append(index)
else:
raise UnityEnvironmentException(
"Invalid input received from the environment, the observation should "
"either be a vector of float or a PNG image"
)
if len(vec_obs_indices) == 0:
vec_obs = np.zeros((n_agents, 0), dtype=np.float32)
else:
vec_obs = np.concatenate([step_result.obs[i] for i in vec_obs_indices], axis=1)
vis_obs = [step_result.obs[i] for i in vis_obs_indices]
mask = np.ones((n_agents, np.sum(group_spec.action_size)), dtype=np.float32)
if group_spec.is_action_discrete():
mask = np.ones(
(n_agents, np.sum(group_spec.discrete_action_branches)), dtype=np.float32
)
if step_result.action_mask is not None:
mask = 1 - np.concatenate(step_result.action_mask, axis=1)
if agent_id_prefix is None:
agent_ids = [str(ag_id) for ag_id in list(step_result.agent_id)]
else:
agent_ids = [f"${agent_id_prefix}-{ag_id}" for ag_id in step_result.agent_id]
return BrainInfo(
vis_obs,
vec_obs,
list(step_result.reward),
agent_ids,
list(step_result.done),
list(step_result.max_step),
mask,
)
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
vec_size = np.sum(
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
)
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
a_size: List[int] = []
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
vector_action_space_type = 0
else:
a_size += [group_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type
)
| [((990, 1031), 'numpy.zeros', 'np.zeros', (['(n_agents, 0)'], {'dtype': 'np.float32'}), '((n_agents, 0), dtype=np.float32)\n', (998, 1031), True, 'import numpy as np\n'), ((1060, 1129), 'numpy.concatenate', 'np.concatenate', (['[step_result.obs[i] for i in vec_obs_indices]'], {'axis': '(1)'}), '([step_result.obs[i] for i in vec_obs_indices], axis=1)\n', (1074, 1129), True, 'import numpy as np\n'), ((2261, 2295), 'mlagents.trainers.brain.CameraResolution', 'CameraResolution', (['s[0]', 's[1]', 's[2]'], {}), '(s[0], s[1], s[2])\n', (2277, 2295), False, 'from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution\n'), ((1220, 1250), 'numpy.sum', 'np.sum', (['group_spec.action_size'], {}), '(group_spec.action_size)\n', (1226, 1250), True, 'import numpy as np\n'), ((749, 898), 'mlagents.envs.exception.UnityEnvironmentException', 'UnityEnvironmentException', (['"""Invalid input received from the environment, the observation should either be a vector of float or a PNG image"""'], {}), "(\n 'Invalid input received from the environment, the observation should either be a vector of float or a PNG image'\n )\n", (774, 898), False, 'from mlagents.envs.exception import UnityEnvironmentException\n'), ((1358, 1401), 'numpy.sum', 'np.sum', (['group_spec.discrete_action_branches'], {}), '(group_spec.discrete_action_branches)\n', (1364, 1401), True, 'import numpy as np\n'), ((1502, 1549), 'numpy.concatenate', 'np.concatenate', (['step_result.action_mask'], {'axis': '(1)'}), '(step_result.action_mask, axis=1)\n', (1516, 1549), True, 'import numpy as np\n')] |
SoonerRobotics/MRDC22 | mrdc_ws/src/mrdc_serial/setup.py | 00c1360138e468bf313eefc93fbde11f289ece82 | from setuptools import find_packages, setup
from glob import glob
import os
package_name = 'mrdc_serial'
setup(
name=package_name,
version='1.0.0',
packages=find_packages(),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'),
glob(os.path.join('launch', '*.xml')))
],
install_requires=['setuptools'],
maintainer='Dylan Zemlin',
maintainer_email='[email protected]',
description='The MRDC Serial package that controls communication to the arduino',
license='MIT License',
entry_points={
'console_scripts': [
'remote = mrdc_serial.remote:main',
'serial = mrdc_serial.serial:main'
],
},
)
| [((171, 186), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (184, 186), False, 'from setuptools import find_packages, setup\n'), ((360, 405), 'os.path.join', 'os.path.join', (['"""share"""', 'package_name', '"""launch"""'], {}), "('share', package_name, 'launch')\n", (372, 405), False, 'import os\n'), ((421, 452), 'os.path.join', 'os.path.join', (['"""launch"""', '"""*.xml"""'], {}), "('launch', '*.xml')\n", (433, 452), False, 'import os\n')] |
DobromirZlatkov/anteya | orders/views.py | 9c66c64643350ad1710bcf60e2e38169e389a66b | from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.shortcuts import redirect, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from . import forms
from . import models
from custommixins import mixins
class OrderView(generic.View):
template_name = 'orders/order_create.html'
def get(self, request):
qs = models.Product.objects.none()
formset = forms.ProductFormSet(queryset=qs, prefix='formset')
order_form = forms.OrderForm(prefix='order_form')
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
def post(self, request):
formset = forms.ProductFormSet(request.POST, prefix='formset')
order_form = forms.OrderForm(request.POST, prefix='order_form')
if formset.is_valid():
order = order_form.save()
for form in formset.forms:
product = form.save(commit=False)
order.products.add(product)
order.save()
return HttpResponseRedirect(reverse('order_details', args=(order.id,)))
else:
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
class OrderDetails(generic.DetailView):
model = models.Order
template_name_suffix = '_details'
class OrderList(mixins.LoginRequiredMixin, mixins.AdminRequiredMixin, generic.ListView):
model = models.Order
class OrderEdit(generic.View):
template_name = 'orders/order_edit.html'
def get(self, request, pk):
order = models.Order.objects.get(pk=pk)
formset = forms.ProductFormSet(queryset=order.products.all(), prefix='formset')
order_form = forms.OrderForm(prefix='order_form', instance=order)
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
def post(self, request, pk):
order = models.Order.objects.get(pk=pk)
formset = forms.ProductFormSet(request.POST, prefix='formset')
order_form = forms.OrderForm(request.POST, prefix='order_form')
if formset.is_valid():
order = order_form.save()
for form in formset.forms:
product = form.save(commit=False)
order.products.add(product)
order.save()
return HttpResponseRedirect(reverse('order_details', args=(order.id,)))
else:
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
| [((588, 675), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'formset': formset, 'order_form': order_form}"], {}), "(request, self.template_name, {'formset': formset, 'order_form':\n order_form})\n", (594, 675), False, 'from django.shortcuts import redirect, render\n'), ((1836, 1923), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'formset': formset, 'order_form': order_form}"], {}), "(request, self.template_name, {'formset': formset, 'order_form':\n order_form})\n", (1842, 1923), False, 'from django.shortcuts import redirect, render\n'), ((1193, 1280), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'formset': formset, 'order_form': order_form}"], {}), "(request, self.template_name, {'formset': formset, 'order_form':\n order_form})\n", (1199, 1280), False, 'from django.shortcuts import redirect, render\n'), ((2493, 2580), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'formset': formset, 'order_form': order_form}"], {}), "(request, self.template_name, {'formset': formset, 'order_form':\n order_form})\n", (2499, 2580), False, 'from django.shortcuts import redirect, render\n'), ((1116, 1158), 'django.core.urlresolvers.reverse', 'reverse', (['"""order_details"""'], {'args': '(order.id,)'}), "('order_details', args=(order.id,))\n", (1123, 1158), False, 'from django.core.urlresolvers import reverse\n'), ((2416, 2458), 'django.core.urlresolvers.reverse', 'reverse', (['"""order_details"""'], {'args': '(order.id,)'}), "('order_details', args=(order.id,))\n", (2423, 2458), False, 'from django.core.urlresolvers import reverse\n')] |
oleksost/RoutingNetworks | PytorchRouting/Examples/run_experiments.py | 7e3e9219b7389d5af2a832a4882bc9fda0e7fd21 | """
This file defines some simple experiments to illustrate how Pytorch-Routing functions.
"""
import numpy as np
import tqdm
import torch
from PytorchRouting.DecisionLayers import REINFORCE, QLearning, SARSA, ActorCritic, GumbelSoftmax, PerTaskAssignment, \
WPL, AAC, AdvantageLearning, RELAX, EGreedyREINFORCE, EGreedyAAC
from PytorchRouting.Examples.Models import PerTask_all_fc, RoutedAllFC, PerTask_1_fc, PerDecisionSingleAgent, \
Dispatched
from PytorchRouting.Examples.Datasets import CIFAR100MTL
def compute_batch(model, batch):
samples, labels, tasks = batch
out, meta = model(samples, tasks=tasks)
correct_predictions = (out.max(dim=1)[1].squeeze() == labels.squeeze()).cpu().numpy()
accuracy = correct_predictions.sum()
oh_labels = one_hot(labels, out.size()[-1])
module_loss, decision_loss = model.loss(out, meta, oh_labels)
return module_loss, decision_loss, accuracy
def one_hot(indices, width):
indices = indices.squeeze().unsqueeze(1)
oh = torch.zeros(indices.size()[0], width).to(indices.device)
oh.scatter_(1, indices, 1)
return oh
def run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio):
print('Loaded dataset and constructed model. Starting Training ...')
for epoch in range(50):
optimizers = []
parameters = []
if epoch in learning_rates:
try:
optimizers.append(torch.optim.SGD(model.routing_parameters(),
lr=routing_module_learning_rate_ratio*learning_rates[epoch]))
optimizers.append(torch.optim.SGD(model.module_parameters(),
lr=learning_rates[epoch]))
parameters = model.module_parameters() + model.module_parameters()
except AttributeError:
optimizers.append(torch.optim.SGD(model.parameters(), lr=learning_rates[epoch]))
parameters = model.parameters()
train_log, test_log = np.zeros((3,)), np.zeros((3,))
train_samples_seen, test_samples_seen = 0, 0
dataset.enter_train_mode()
model.train()
# while True:
pbar = tqdm.tqdm(unit=' samples')
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
train_samples_seen += len(batch[0])
pbar.update(len(batch[0]))
module_loss, decision_loss, accuracy = compute_batch(model, batch)
(module_loss + decision_loss).backward()
torch.nn.utils.clip_grad_norm_(parameters, 40., norm_type=2)
for opt in optimizers:
opt.step()
model.zero_grad()
train_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
pbar.close()
dataset.enter_test_mode()
model.eval()
model.start_logging_selections()
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
test_samples_seen += len(batch[0])
module_loss, decision_loss, accuracy = compute_batch(model, batch)
test_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
print('Epoch {} finished after {} train and {} test samples..\n'
' Training averages: Model loss: {}, Routing loss: {}, Accuracy: {}\n'
' Testing averages: Model loss: {}, Routing loss: {}, Accuracy: {}'.format(
epoch + 1, train_samples_seen, test_samples_seen,
*(train_log/train_samples_seen).round(3), *(test_log/test_samples_seen).round(3)))
model.stop_logging_selections_and_report()
if __name__ == '__main__':
# MNIST
# dataset = MNIST_MTL(64, data_files=['./Datasets/mnist.pkl.gz'])
# model = PerTask_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
# model = WPL_routed_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
cuda = False
# cuda = True
# CIFAR
dataset = CIFAR100MTL(10, data_files=['./Datasets/cifar-100-py/train', './Datasets/cifar-100-py/test'], cuda=cuda)
model = RoutedAllFC(WPL, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(RELAX, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(EGreedyREINFORCE, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = PerDecisionSingleAgent(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = Dispatched(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
learning_rates = {0: 3e-3, 5: 1e-3, 10: 3e-4}
routing_module_learning_rate_ratio = 0.3
if cuda:
model.cuda()
run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio)
'''
WPL_routed_all_fc(3, 512, 5, dataset.num_tasks, dataset.num_tasks)
Training averages: Model loss: 0.427, Routing loss: 8.864, Accuracy: 0.711
Testing averages: Model loss: 0.459, Routing loss: 9.446, Accuracy: 0.674
'''
| [((4118, 4226), 'PytorchRouting.Examples.Datasets.CIFAR100MTL', 'CIFAR100MTL', (['(10)'], {'data_files': "['./Datasets/cifar-100-py/train', './Datasets/cifar-100-py/test']", 'cuda': 'cuda'}), "(10, data_files=['./Datasets/cifar-100-py/train',\n './Datasets/cifar-100-py/test'], cuda=cuda)\n", (4129, 4226), False, 'from PytorchRouting.Examples.Datasets import CIFAR100MTL\n'), ((4235, 4300), 'PytorchRouting.Examples.Models.RoutedAllFC', 'RoutedAllFC', (['WPL', '(3)', '(128)', '(5)', 'dataset.num_tasks', 'dataset.num_tasks'], {}), '(WPL, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)\n', (4246, 4300), False, 'from PytorchRouting.Examples.Models import PerTask_all_fc, RoutedAllFC, PerTask_1_fc, PerDecisionSingleAgent, Dispatched\n'), ((2212, 2238), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'unit': '""" samples"""'}), "(unit=' samples')\n", (2221, 2238), False, 'import tqdm\n'), ((2034, 2048), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2042, 2048), True, 'import numpy as np\n'), ((2050, 2064), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2058, 2064), True, 'import numpy as np\n'), ((2607, 2668), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['parameters', '(40.0)'], {'norm_type': '(2)'}), '(parameters, 40.0, norm_type=2)\n', (2637, 2668), False, 'import torch\n')] |
tefra/xsdata-w3c-tests | output/models/ms_data/regex/re_g22_xsd/__init__.py | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | from output.models.ms_data.regex.re_g22_xsd.re_g22 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| [] |
rgeirhos/object-recognition | code/image-manipulation.py | 4679f7c60665bd9fb274c6c4372fc0fa34b51485 | #!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
| [((2702, 2731), 'numpy.where', 'np.where', (['(image < 0)', '(0)', 'image'], {}), '(image < 0, 0, image)\n', (2710, 2731), True, 'import numpy as np\n'), ((2744, 2773), 'numpy.where', 'np.where', (['(image > 1)', '(1)', 'image'], {}), '(image > 1, 1, image)\n', (2752, 2773), True, 'import numpy as np\n'), ((5095, 5108), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (5103, 5108), False, 'from skimage.color import rgb2gray\n'), ((5972, 6002), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (5993, 6002), True, 'import numpy as np\n'), ((397, 409), 'skimage.io.imread', 'imread', (['path'], {}), '(path)\n', (403, 409), False, 'from skimage.io import imread, imsave\n'), ((537, 569), 'skimage.io.imsave', 'imsave', (["(imgname + '.JPEG')", 'image'], {}), "(imgname + '.JPEG', image)\n", (543, 569), False, 'from skimage.io import imread, imsave\n'), ((1471, 1486), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (1479, 1486), False, 'from skimage.color import rgb2gray\n'), ((3309, 3365), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': '(nrow, ncol)'}), '(low=low, high=high, size=(nrow, ncol))\n', (3326, 3365), True, 'import numpy as np\n'), ((3749, 3783), 'numpy.logical_and', 'np.logical_and', (['(mat >= 0)', '(mat <= 1)'], {}), '(mat >= 0, mat <= 1)\n', (3763, 3783), True, 'import numpy as np\n'), ((4394, 4415), 'wrapper.data_to_pic', 'wr.data_to_pic', (['image'], {}), '(image)\n', (4408, 4415), True, 'import wrapper as wr\n'), ((587, 621), 'scipy.misc.toimage', 'toimage', (['image'], {'cmin': '(0.0)', 'cmax': '(1.0)'}), '(image, cmin=0.0, cmax=1.0)\n', (594, 621), False, 'from scipy.misc import toimage\n')] |
AlishKZ/ITMO_ICT_WebDevelopment_2020-2021 | students/K33402/Akhmetzhanov Alisher/lr2/main/forms.py | b3ce82e17392d26d815e64343f5103f1bd46cd81 | from django.db.models import fields
from main.models import RoomReservation, UserRoom
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login
from django.contrib.auth import get_user_model
class ReservateRoomForm(forms.Form):
begin_date = forms.DateField()
end_date = forms.DateField()
class AddCommentForm(forms.Form):
text = forms.CharField(max_length=410)
accommodation = forms.ModelChoiceField(queryset=UserRoom.objects.all())
class EditReservationForm(forms.ModelForm):
class Meta:
model = RoomReservation
fields = ['begin_date', 'end_date']
| [((317, 334), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (332, 334), False, 'from django import forms\n'), ((350, 367), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (365, 367), False, 'from django import forms\n'), ((414, 445), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(410)'}), '(max_length=410)\n', (429, 445), False, 'from django import forms\n'), ((498, 520), 'main.models.UserRoom.objects.all', 'UserRoom.objects.all', ([], {}), '()\n', (518, 520), False, 'from main.models import RoomReservation, UserRoom\n')] |
espottesmith/emmet | emmet-core/emmet/core/vasp/calc_types.py | bd28b91d240da9f0c996a2b2efb7e67da9176a09 | """ Module to define various calculation types as Enums for VASP """
import datetime
from itertools import groupby, product
from pathlib import Path
from typing import Dict, Iterator, List
import bson
import numpy as np
from monty.json import MSONable
from monty.serialization import loadfn
from pydantic import BaseModel
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.structure import Structure
from typing_extensions import Literal
from emmet.core import SETTINGS
from emmet.core.utils import ValueEnum
_RUN_TYPE_DATA = loadfn(str(Path(__file__).parent.joinpath("run_types.yaml").resolve()))
_TASK_TYPES = [
"NSCF Line",
"NSCF Uniform",
"Dielectric",
"DFPT",
"DFPT Dielectric",
"NMR Nuclear Shielding",
"NMR Electric Field Gradient",
"Static",
"Structure Optimization",
"Deformation",
]
_RUN_TYPES = (
[
rt
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ [
f"{rt}+U"
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ ["LDA", "LDA+U"]
)
RunType = ValueEnum( # type: ignore
"RunType", dict({"_".join(rt.split()).replace("+", "_"): rt for rt in _RUN_TYPES})
)
RunType.__doc__ = "VASP calculation run types"
TaskType = ValueEnum("TaskType", {"_".join(tt.split()): tt for tt in _TASK_TYPES}) # type: ignore
TaskType.__doc__ = "VASP calculation task types"
CalcType = ValueEnum( # type: ignore
"CalcType",
{
f"{'_'.join(rt.split()).replace('+','_')}_{'_'.join(tt.split())}": f"{rt} {tt}"
for rt, tt in product(_RUN_TYPES, _TASK_TYPES)
},
)
CalcType.__doc__ = "VASP calculation types"
def run_type(parameters: Dict) -> RunType:
"""
Determines the run_type from the VASP parameters dict
This is adapted from pymatgen to be far less unstable
Args:
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
if parameters.get("LDAU", False):
is_hubbard = "+U"
else:
is_hubbard = ""
def _variant_equal(v1, v2) -> bool:
"""
helper function to deal with strings
"""
if isinstance(v1, str) and isinstance(v2, str):
return v1.strip().upper() == v2.strip().upper()
else:
return v1 == v2
# This is to force an order of evaluation
for functional_class in ["HF", "VDW", "METAGGA", "GGA"]:
for special_type, params in _RUN_TYPE_DATA[functional_class].items():
if all(
[
_variant_equal(parameters.get(param, None), value)
for param, value in params.items()
]
):
return RunType(f"{special_type}{is_hubbard}")
return RunType(f"LDA{is_hubbard}")
def task_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict]
) -> TaskType:
"""
Determines the task type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
"""
calc_type = []
incar = inputs.get("incar", {})
if incar.get("ICHARG", 0) > 10:
try:
kpts = inputs.get("kpoints") or {}
kpt_labels = kpts.get("labels") or []
num_kpt_labels = len(list(filter(None.__ne__, kpt_labels)))
except Exception as e:
raise Exception(
"Couldn't identify total number of kpt labels: {}".format(e)
)
if num_kpt_labels > 0:
calc_type.append("NSCF Line")
else:
calc_type.append("NSCF Uniform")
elif incar.get("LEPSILON", False):
if incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
calc_type.append("Dielectric")
elif incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
elif incar.get("LCHIMAG", False):
calc_type.append("NMR Nuclear Shielding")
elif incar.get("LEFG", False):
calc_type.append("NMR Electric Field Gradient")
elif incar.get("NSW", 1) == 0:
calc_type.append("Static")
elif incar.get("ISIF", 2) == 3 and incar.get("IBRION", 0) > 0:
calc_type.append("Structure Optimization")
elif incar.get("ISIF", 3) == 2 and incar.get("IBRION", 0) > 0:
calc_type.append("Deformation")
return TaskType(" ".join(calc_type))
def calc_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict],
parameters: Dict,
) -> CalcType:
"""
Determines the calc type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
rt = run_type(parameters).value
tt = task_type(inputs).value
return CalcType(f"{rt} {tt}")
| [((1674, 1706), 'itertools.product', 'product', (['_RUN_TYPES', '_TASK_TYPES'], {}), '(_RUN_TYPES, _TASK_TYPES)\n', (1681, 1706), False, 'from itertools import groupby, product\n'), ((592, 606), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (596, 606), False, 'from pathlib import Path\n')] |
dawnos/robotcar-to-rosbag | sensors/__init__.py | c51035d7fd7e08487629a9b06d84a86890f7cc03 |
from mono_left import MonoLeft
from mono_right import MonoRight
from mono_rear import MonoRear
from stereo_left import StereoLeft
from stereo_right import StereoRight
from stereo_centre import StereoCentre
| [] |
YiWang-Evonne/disaster_response | models/train_classifier.py | 824f646920ac85a01419101e17e92f592a505782 | import sys
import pandas as pd
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import pickle
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
load data from sql db
:param database_filepath: sql db path
:return: pandas dataframe
"""
engine = create_engine("sqlite:///"+database_filepath)
df = pd.read_sql_table('modeling_data', engine)
yvar = [item for item in list(df) if item not in ['message', 'original', 'genre', 'id']]
X = df['message']
Y = df[yvar]
return X.values, Y.values, list(Y)
def tokenize(text):
"""
processing the text input
:param text: text inputs
:return:
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
build model pipeline
:return: model pipeline
"""
model_pipeline = Pipeline([
('features', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('clf', RandomForestClassifier())
])
return model_pipeline
def model_gridsearch(model, parameters):
cv = GridSearchCV(model, param_grid=parameters, verbose=3)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
evaluate model performances
:param model: model obj
:param X_test: test x
:param Y_test: test y
:param category_names: y names
:return:
"""
y_pred = model.predict(X_test)
print(classification_report(Y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
save model to local path
:param model: model obj
:param model_filepath: saving path
:return:
"""
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
def main():
"""
CLI to fit the model
:return:
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
# model.fit(X_train, Y_train)
parameters = {
'clf__n_estimators': [100, 400, 800],
# 'clf__criterion':["gini", "entropy"]
}
cv = model_gridsearch(model, parameters)
best_model_pipeline = cv.best_estimator_
print('Evaluating model...')
evaluate_model(best_model_pipeline, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(best_model_pipeline, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | [((80, 145), 'nltk.download', 'nltk.download', (["['punkt', 'wordnet', 'averaged_perceptron_tagger']"], {}), "(['punkt', 'wordnet', 'averaged_perceptron_tagger'])\n", (93, 145), False, 'import nltk\n'), ((848, 895), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + database_filepath)"], {}), "('sqlite:///' + database_filepath)\n", (861, 895), False, 'from sqlalchemy import create_engine\n'), ((903, 945), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""modeling_data"""', 'engine'], {}), "('modeling_data', engine)\n", (920, 945), True, 'import pandas as pd\n'), ((1344, 1371), 're.findall', 're.findall', (['url_regex', 'text'], {}), '(url_regex, text)\n', (1354, 1371), False, 'import re\n'), ((1467, 1486), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (1480, 1486), False, 'from nltk.tokenize import word_tokenize\n'), ((1504, 1523), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1521, 1523), False, 'from nltk.stem import WordNetLemmatizer\n'), ((2090, 2143), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['model'], {'param_grid': 'parameters', 'verbose': '(3)'}), '(model, param_grid=parameters, verbose=3)\n', (2102, 2143), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2443, 2509), 'sklearn.metrics.classification_report', 'classification_report', (['Y_test', 'y_pred'], {'target_names': 'category_names'}), '(Y_test, y_pred, target_names=category_names)\n', (2464, 2509), False, 'from sklearn.metrics import classification_report\n'), ((2729, 2750), 'pickle.dump', 'pickle.dump', (['model', 'f'], {}), '(model, f)\n', (2740, 2750), False, 'import pickle\n'), ((3084, 3121), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (3100, 3121), False, 'from sklearn.model_selection import train_test_split\n'), ((1980, 2004), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2002, 2004), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1871, 1906), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'tokenizer': 'tokenize'}), '(tokenizer=tokenize)\n', (1886, 1906), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((1931, 1949), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (1947, 1949), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n')] |
dymaxionlabs/platform | terra/terra/emails.py | 98fe893d4632d62fea3e2357f16d970014037cdf | import os
from datetime import date
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from mailchimp3 import MailChimp
class Email:
from_email = settings.DEFAULT_FROM_EMAIL
subject = None
template_name = 'basic'
preview_text = ''
templates_basedir = os.path.join(settings.BASE_DIR, 'templates')
def __init__(self, recipients, language_code='en'):
self.recipients = recipients
self.language_code = language_code
def send_mail(self):
send_mail(self.subject,
self.body,
self.from_email,
self.recipients,
html_message=self.html_body)
@property
def body(self):
return render_to_string(self.body_template, self.template_params)
@property
def html_body(self):
return self._reformat_mailchimp_template(
render_to_string(self.htmlbody_template, self.template_params))
@property
def body_template(self):
return os.path.join(
self.templates_basedir,
'{name}.{lc}.txt'.format(name=self.template_name,
lc=self.language_code))
@property
def htmlbody_template(self):
return os.path.join(
self.templates_basedir,
'{name}.{lc}.html'.format(name=self.template_name,
lc=self.language_code))
@property
def template_params(self):
return {}
def _reformat_mailchimp_template(self, html):
"""
Replaces MailChimp variables for Django template variables, and do
some post-processing.
"""
for var, newvar in self.mc_variables.items():
html = html.replace(str(var), str(newvar))
return html
@property
def mc_variables(self):
return {
'*|MC:SUBJECT|*': self.subject,
'*|MC_PREVIEW_TEXT|*': self.preview_text,
'*|CURRENT_YEAR|*': date.today().year,
'*|LIST:COMPANY|*': settings.COMPANY_NAME,
'*|HTML:LIST_ADDRESS_HTML|*': settings.LIST_ADDRESS_HTML,
'*|UNSUB|*': '%unsubscribe_url%',
# Unused variables (for now):
'*|IFNOT:ARCHIVE_PAGE|*': '',
'*|LIST:DESCRIPTION|*': '',
'*|END:IF|*': '',
}
class EarlyAccessBetaEmail(Email):
template_name = 'early_access_beta'
@property
def signup_url(self):
return '{base_url}/signup?beta=1&email={email}'.format(
base_url=settings.WEBCLIENT_URL, email= self.recipients[0])
@property
def subject(self):
with translation.override(self.language_code):
return _('validate your email')
@property
def template_params(self):
return {**super().template_params, 'signup_url': self.signup_url}
@property
def mc_variables(self):
return {**super().mc_variables, '*|SIGNUP_URL|*': self.signup_url}
class WelcomeEmail(Email):
template_name = 'welcome'
link = '{base_url}/login'.format(base_url=settings.WEBCLIENT_URL)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
@property
def subject(self):
with translation.override(self.language_code):
return _('your account is ready') % {'name': self.first_name}
@property
def template_params(self):
return {
**super().template_params,
'first_name': self.first_name,
'link': self.link,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|FNAME|*': self.first_name,
'*|TEXT:LINK|*': self.link,
}
@property
def first_name(self):
return self.user.first_name or self.user.username
class TrainingCompletedEmail(Email):
template_name = 'training_completed'
def __init__(self, estimator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.estimator = estimator
self.link = '{web_client_url}/models/new/od/select?id={uuid}'.format(
web_client_url = settings.WEBCLIENT_URL, uuid = estimator.uuid
)
@property
def subject(self):
with translation.override(self.language_code):
return _('training of your model completed')
@property
def template_params(self):
return {
**super().template_params,
'name': self.estimator_name,
'num_classes': self.num_classes,
'link': self.link,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|NAME|*': self.estimator_name,
'*|NUM_CLASSES|*': self.num_classes,
'*|LINK|*': self.link,
}
@property
def estimator_name(self):
return self.estimator.name
@property
def num_classes(self):
return len(self.estimator.classes)
class PredictionCompletedEmail(Email):
template_name = 'prediction_completed'
def __init__(self, estimator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.estimator = estimator
@property
def subject(self):
with translation.override(self.language_code):
return _('prediction of your model completed')
@property
def template_params(self):
return {
**super().template_params,
'name': self.estimator_name,
'num_classes': self.num_classes,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|NAME|*': self.estimator_name,
'*|NUM_CLASSES|*': self.num_classes,
}
@property
def estimator_name(self):
return self.estimator.name
@property
def num_classes(self):
return len(self.estimator.classes)
def notify(subject, body='.'):
send_mail(subject, body, '[email protected]',
['[email protected]'])
| [((436, 480), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""templates"""'], {}), "(settings.BASE_DIR, 'templates')\n", (448, 480), False, 'import os\n'), ((6117, 6203), 'django.core.mail.send_mail', 'send_mail', (['subject', 'body', '"""[email protected]"""', "['[email protected]']"], {}), "(subject, body, '[email protected]', [\n '[email protected]'])\n", (6126, 6203), False, 'from django.core.mail import send_mail\n'), ((652, 753), 'django.core.mail.send_mail', 'send_mail', (['self.subject', 'self.body', 'self.from_email', 'self.recipients'], {'html_message': 'self.html_body'}), '(self.subject, self.body, self.from_email, self.recipients,\n html_message=self.html_body)\n', (661, 753), False, 'from django.core.mail import send_mail\n'), ((872, 930), 'django.template.loader.render_to_string', 'render_to_string', (['self.body_template', 'self.template_params'], {}), '(self.body_template, self.template_params)\n', (888, 930), False, 'from django.template.loader import render_to_string\n'), ((1033, 1095), 'django.template.loader.render_to_string', 'render_to_string', (['self.htmlbody_template', 'self.template_params'], {}), '(self.htmlbody_template, self.template_params)\n', (1049, 1095), False, 'from django.template.loader import render_to_string\n'), ((2790, 2830), 'django.utils.translation.override', 'translation.override', (['self.language_code'], {}), '(self.language_code)\n', (2810, 2830), False, 'from django.utils import translation\n'), ((2851, 2875), 'django.utils.translation.ugettext', '_', (['"""validate your email"""'], {}), "('validate your email')\n", (2852, 2875), True, 'from django.utils.translation import ugettext as _\n'), ((3410, 3450), 'django.utils.translation.override', 'translation.override', (['self.language_code'], {}), '(self.language_code)\n', (3430, 3450), False, 'from django.utils import translation\n'), ((4423, 4463), 'django.utils.translation.override', 'translation.override', (['self.language_code'], {}), '(self.language_code)\n', (4443, 4463), False, 'from django.utils import translation\n'), ((4484, 4521), 'django.utils.translation.ugettext', '_', (['"""training of your model completed"""'], {}), "('training of your model completed')\n", (4485, 4521), True, 'from django.utils.translation import ugettext as _\n'), ((5416, 5456), 'django.utils.translation.override', 'translation.override', (['self.language_code'], {}), '(self.language_code)\n', (5436, 5456), False, 'from django.utils import translation\n'), ((5477, 5516), 'django.utils.translation.ugettext', '_', (['"""prediction of your model completed"""'], {}), "('prediction of your model completed')\n", (5478, 5516), True, 'from django.utils.translation import ugettext as _\n'), ((2131, 2143), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2141, 2143), False, 'from datetime import date\n'), ((3471, 3497), 'django.utils.translation.ugettext', '_', (['"""your account is ready"""'], {}), "('your account is ready')\n", (3472, 3497), True, 'from django.utils.translation import ugettext as _\n')] |
miksu/edward2 | experimental/attentive_uncertainty/toy_regression/datasets.py | 973acdb23701f320ebaee8a56fc44d4414acfa4e | # coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
class GPCurvesReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled by
some factor chosen randomly in a range. Outputs are independent gaussian
processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
l1_scale=0.6,
sigma_scale=1.0,
random_kernel_parameters=False,
testing=False):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma)
are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale].
testing: Boolean that indicates whether we are testing. If so there are
more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._l1_scale = l1_scale
self._sigma_scale = sigma_scale
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):
"""Applies the Gaussian kernel to generate curve data.
Args:
xdata: Tensor of shape [B, num_total_points, x_size] with
the values of the x-axis data.
l1: Tensor of shape [B, y_size, x_size], the scale
parameter of the Gaussian kernel.
sigma_f: Tensor of shape [B, y_size], the magnitude
of the std.
sigma_noise: Float, std of the noise that we add for stability.
Returns:
The kernel, a float tensor of shape
[B, y_size, num_total_points, num_total_points].
"""
num_total_points = tf.shape(xdata)[1]
# Expand and take the difference
xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]
xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]
diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]
# [B, y_size, num_total_points, num_total_points, x_size]
norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])
norm = tf.reduce_sum(
norm, -1) # [B, data_size, num_total_points, num_total_points]
# [B, y_size, num_total_points, num_total_points]
kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)
# Add some noise to the diagonal to make the cholesky work.
kernel += (sigma_noise**2) * tf.eye(num_total_points)
return kernel
def generate_curves(self, num_context=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Args:
num_context: Number of context points. If None, chosen randomly.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
if num_context is None:
num_context = tf.random_uniform(
shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32)
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
if self._testing:
num_target = 400
num_total_points = num_target
x_values = tf.tile(
tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
# During training the number of target points and their x-positions are
# selected at random
else:
num_target = tf.random_uniform(shape=(), minval=0,
maxval=self._max_num_context - num_context,
dtype=tf.int32)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -2, 2)
# Set kernel parameters
# Either choose a set of random parameters for the mini-batch
if self._random_kernel_parameters:
l1 = tf.random_uniform([self._batch_size, self._y_size,
self._x_size], 0.1, self._l1_scale)
sigma_f = tf.random_uniform([self._batch_size, self._y_size],
0.1, self._sigma_scale)
# Or use the same fixed parameters for all mini-batches
else:
l1 = tf.ones(shape=[self._batch_size, self._y_size,
self._x_size]) * self._l1_scale
sigma_f = tf.ones(shape=[self._batch_size,
self._y_size]) * self._sigma_scale
# Pass the x_values through the Gaussian kernel
# [batch_size, y_size, num_total_points, num_total_points]
kernel = self._gaussian_kernel(x_values, l1, sigma_f)
# Calculate Cholesky, using double precision for better stability:
cholesky = tf.cast(tf.cholesky(tf.cast(kernel, tf.float64)), tf.float32)
# Sample a curve
# [batch_size, y_size, num_total_points, 1]
y_values = tf.matmul(
cholesky,
tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))
# [batch_size, num_total_points, y_size]
y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
# Select the observations
idx = tf.random_shuffle(tf.range(num_target))
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
else:
# Select the targets which will consist of the context points as well as
# some new target points
target_x = x_values[:, :num_target + num_context, :]
target_y = y_values[:, :num_target + num_context, :]
# Select the observations
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
return NPRegressionDescription(
context_x=context_x,
context_y=context_y,
target_x=target_x,
target_y=target_y)
| [((871, 976), 'collections.namedtuple', 'collections.namedtuple', (['"""NPRegressionDescription"""', "('context_x', 'context_y', 'target_x', 'target_y')"], {}), "('NPRegressionDescription', ('context_x', 'context_y',\n 'target_x', 'target_y'))\n", (893, 976), False, 'import collections\n'), ((3274, 3303), 'tensorflow.expand_dims', 'tf.expand_dims', (['xdata'], {'axis': '(1)'}), '(xdata, axis=1)\n', (3288, 3303), True, 'import tensorflow as tf\n'), ((3353, 3382), 'tensorflow.expand_dims', 'tf.expand_dims', (['xdata'], {'axis': '(2)'}), '(xdata, axis=2)\n', (3367, 3382), True, 'import tensorflow as tf\n'), ((3571, 3636), 'tensorflow.square', 'tf.square', (['(diff[:, (None), :, :, :] / l1[:, :, (None), (None), :])'], {}), '(diff[:, (None), :, :, :] / l1[:, :, (None), (None), :])\n', (3580, 3636), True, 'import tensorflow as tf\n'), ((3643, 3666), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['norm', '(-1)'], {}), '(norm, -1)\n', (3656, 3666), True, 'import tensorflow as tf\n'), ((3204, 3219), 'tensorflow.shape', 'tf.shape', (['xdata'], {}), '(xdata)\n', (3212, 3219), True, 'import tensorflow as tf\n'), ((3837, 3856), 'tensorflow.exp', 'tf.exp', (['(-0.5 * norm)'], {}), '(-0.5 * norm)\n', (3843, 3856), True, 'import tensorflow as tf\n'), ((3955, 3979), 'tensorflow.eye', 'tf.eye', (['num_total_points'], {}), '(num_total_points)\n', (3961, 3979), True, 'import tensorflow as tf\n'), ((4359, 4447), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(3)', 'maxval': 'self._max_num_context', 'dtype': 'tf.int32'}), '(shape=[], minval=3, maxval=self._max_num_context, dtype=\n tf.int32)\n', (4376, 4447), True, 'import tensorflow as tf\n'), ((4816, 4849), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_values'], {'axis': '(-1)'}), '(x_values, axis=-1)\n', (4830, 4849), True, 'import tensorflow as tf\n'), ((4980, 5081), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '()', 'minval': '(0)', 'maxval': '(self._max_num_context - num_context)', 'dtype': 'tf.int32'}), '(shape=(), minval=0, maxval=self._max_num_context -\n num_context, dtype=tf.int32)\n', (4997, 5081), True, 'import tensorflow as tf\n'), ((5219, 5295), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self._batch_size, num_total_points, self._x_size]', '(-2)', '(2)'], {}), '([self._batch_size, num_total_points, self._x_size], -2, 2)\n', (5236, 5295), True, 'import tensorflow as tf\n'), ((5452, 5543), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self._batch_size, self._y_size, self._x_size]', '(0.1)', 'self._l1_scale'], {}), '([self._batch_size, self._y_size, self._x_size], 0.1, self\n ._l1_scale)\n', (5469, 5543), True, 'import tensorflow as tf\n'), ((5585, 5660), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self._batch_size, self._y_size]', '(0.1)', 'self._sigma_scale'], {}), '([self._batch_size, self._y_size], 0.1, self._sigma_scale)\n', (5602, 5660), True, 'import tensorflow as tf\n'), ((6441, 6512), 'tensorflow.random_normal', 'tf.random_normal', (['[self._batch_size, self._y_size, num_total_points, 1]'], {}), '([self._batch_size, self._y_size, num_total_points, 1])\n', (6457, 6512), True, 'import tensorflow as tf\n'), ((6588, 6611), 'tensorflow.squeeze', 'tf.squeeze', (['y_values', '(3)'], {}), '(y_values, 3)\n', (6598, 6611), True, 'import tensorflow as tf\n'), ((6829, 6875), 'tensorflow.gather', 'tf.gather', (['x_values', 'idx[:num_context]'], {'axis': '(1)'}), '(x_values, idx[:num_context], axis=1)\n', (6838, 6875), True, 'import tensorflow as tf\n'), ((6894, 6940), 'tensorflow.gather', 'tf.gather', (['y_values', 'idx[:num_context]'], {'axis': '(1)'}), '(y_values, idx[:num_context], axis=1)\n', (6903, 6940), True, 'import tensorflow as tf\n'), ((3798, 3816), 'tensorflow.square', 'tf.square', (['sigma_f'], {}), '(sigma_f)\n', (3807, 3816), True, 'import tensorflow as tf\n'), ((5776, 5837), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[self._batch_size, self._y_size, self._x_size]'}), '(shape=[self._batch_size, self._y_size, self._x_size])\n', (5783, 5837), True, 'import tensorflow as tf\n'), ((5897, 5944), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[self._batch_size, self._y_size]'}), '(shape=[self._batch_size, self._y_size])\n', (5904, 5944), True, 'import tensorflow as tf\n'), ((6277, 6304), 'tensorflow.cast', 'tf.cast', (['kernel', 'tf.float64'], {}), '(kernel, tf.float64)\n', (6284, 6304), True, 'import tensorflow as tf\n'), ((6789, 6809), 'tensorflow.range', 'tf.range', (['num_target'], {}), '(num_target)\n', (6797, 6809), True, 'import tensorflow as tf\n'), ((4710, 4758), 'tensorflow.range', 'tf.range', (['(-2.0)', '(2.0)', '(1.0 / 100)'], {'dtype': 'tf.float32'}), '(-2.0, 2.0, 1.0 / 100, dtype=tf.float32)\n', (4718, 4758), True, 'import tensorflow as tf\n')] |
shagun6/critiquebrainz | critiquebrainz/frontend/views/index.py | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | from flask import Blueprint, render_template
from flask_babel import format_number
import critiquebrainz.db.users as db_users
import critiquebrainz.db.review as db_review
from bs4 import BeautifulSoup
from markdown import markdown
DEFAULT_CACHE_EXPIRATION = 10 * 60 # seconds
frontend_bp = Blueprint('frontend', __name__)
@frontend_bp.route('/')
def index():
# Popular reviews
popular_reviews = db_review.get_popular(6)
for review in popular_reviews:
# Preparing text for preview
preview = markdown(review['text'], safe_mode="escape")
review['preview'] = ''.join(BeautifulSoup(preview, "html.parser").findAll(text=True))
# Recent reviews
recent_reviews, _ = db_review.list_reviews(sort='created', limit=9)
# Statistics
review_count = format_number(db_review.get_count(is_draft=False))
user_count = format_number(db_users.total_count())
return render_template('index/index.html', popular_reviews=popular_reviews, recent_reviews=recent_reviews,
reviews_total=review_count, users_total=user_count)
@frontend_bp.route('/about')
def about():
return render_template('index/about.html')
@frontend_bp.route('/guidelines')
def guidelines():
return render_template('index/guidelines.html')
| [((293, 324), 'flask.Blueprint', 'Blueprint', (['"""frontend"""', '__name__'], {}), "('frontend', __name__)\n", (302, 324), False, 'from flask import Blueprint, render_template\n'), ((408, 432), 'critiquebrainz.db.review.get_popular', 'db_review.get_popular', (['(6)'], {}), '(6)\n', (429, 432), True, 'import critiquebrainz.db.review as db_review\n'), ((708, 755), 'critiquebrainz.db.review.list_reviews', 'db_review.list_reviews', ([], {'sort': '"""created"""', 'limit': '(9)'}), "(sort='created', limit=9)\n", (730, 755), True, 'import critiquebrainz.db.review as db_review\n'), ((911, 1071), 'flask.render_template', 'render_template', (['"""index/index.html"""'], {'popular_reviews': 'popular_reviews', 'recent_reviews': 'recent_reviews', 'reviews_total': 'review_count', 'users_total': 'user_count'}), "('index/index.html', popular_reviews=popular_reviews,\n recent_reviews=recent_reviews, reviews_total=review_count, users_total=\n user_count)\n", (926, 1071), False, 'from flask import Blueprint, render_template\n'), ((1145, 1180), 'flask.render_template', 'render_template', (['"""index/about.html"""'], {}), "('index/about.html')\n", (1160, 1180), False, 'from flask import Blueprint, render_template\n'), ((1246, 1286), 'flask.render_template', 'render_template', (['"""index/guidelines.html"""'], {}), "('index/guidelines.html')\n", (1261, 1286), False, 'from flask import Blueprint, render_template\n'), ((523, 567), 'markdown.markdown', 'markdown', (["review['text']"], {'safe_mode': '"""escape"""'}), "(review['text'], safe_mode='escape')\n", (531, 567), False, 'from markdown import markdown\n'), ((807, 842), 'critiquebrainz.db.review.get_count', 'db_review.get_count', ([], {'is_draft': '(False)'}), '(is_draft=False)\n', (826, 842), True, 'import critiquebrainz.db.review as db_review\n'), ((875, 897), 'critiquebrainz.db.users.total_count', 'db_users.total_count', ([], {}), '()\n', (895, 897), True, 'import critiquebrainz.db.users as db_users\n'), ((604, 641), 'bs4.BeautifulSoup', 'BeautifulSoup', (['preview', '"""html.parser"""'], {}), "(preview, 'html.parser')\n", (617, 641), False, 'from bs4 import BeautifulSoup\n')] |
Q-Alpha/Hackathon2020 | Enigma/Enigma-master/GBS/gbsHelper.py | c0ed45b4c1cc4f475f83786e641b859dad94f863 | import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
| [((621, 700), 'strawberryfields.apps.subgraph.search', 'subgraph.search', (['samples', 'pl_graph', 'subgraph_size', 'min_pho'], {'max_count': 'max_count'}), '(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)\n', (636, 700), False, 'from strawberryfields.apps import data, sample, subgraph, plot\n'), ((1212, 1221), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1218, 1221), True, 'import numpy as np\n'), ((1359, 1403), 'strawberryfields.Program', 'sf.Program', (['num_subsystem'], {'name': '"""remote_job"""'}), "(num_subsystem, name='remote_job')\n", (1369, 1403), True, 'import strawberryfields as sf\n'), ((1416, 1440), 'strawberryfields.utils.random_interferometer', 'random_interferometer', (['(4)'], {}), '(4)\n', (1437, 1440), False, 'from strawberryfields.utils import random_interferometer\n'), ((1571, 1586), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1581, 1586), False, 'from strawberryfields import ops\n'), ((1614, 1629), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1624, 1629), False, 'from strawberryfields import ops\n'), ((1657, 1672), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1667, 1672), False, 'from strawberryfields import ops\n'), ((1756, 1777), 'strawberryfields.ops.Interferometer', 'ops.Interferometer', (['U'], {}), '(U)\n', (1774, 1777), False, 'from strawberryfields import ops\n'), ((1817, 1841), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.543)', '(0.123)'], {}), '(0.543, 0.123)\n', (1827, 1841), False, 'from strawberryfields import ops\n'), ((1869, 1885), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(0.453)'], {}), '(0.453)\n', (1878, 1885), False, 'from strawberryfields import ops\n'), ((1905, 1928), 'strawberryfields.ops.MZgate', 'ops.MZgate', (['(0.65)', '(-0.54)'], {}), '(0.65, -0.54)\n', (1915, 1928), False, 'from strawberryfields import ops\n'), ((2018, 2039), 'strawberryfields.ops.Interferometer', 'ops.Interferometer', (['U'], {}), '(U)\n', (2036, 2039), False, 'from strawberryfields import ops\n'), ((2079, 2103), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.543)', '(0.123)'], {}), '(0.543, 0.123)\n', (2089, 2103), False, 'from strawberryfields import ops\n'), ((2131, 2147), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(0.453)'], {}), '(0.453)\n', (2140, 2147), False, 'from strawberryfields import ops\n'), ((2167, 2190), 'strawberryfields.ops.MZgate', 'ops.MZgate', (['(0.65)', '(-0.54)'], {}), '(0.65, -0.54)\n', (2177, 2190), False, 'from strawberryfields import ops\n'), ((2219, 2236), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (2234, 2236), False, 'from strawberryfields import ops\n'), ((1163, 1172), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1169, 1172), True, 'import numpy as np\n')] |
jenniexie/happy | happy/HappyNodeJoin.py | 6ba01586e20bb3e4f92e180fd8dce3752519f7c9 | #!/usr/bin/env python
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyNodeJoin class through which a virtual node join a network.
#
# When a node joins a network, an TAP interface is created in the node and in
# the network. Then TUN is setup on the node.
#
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyLink import HappyLink
from happy.HappyNetwork import HappyNetwork
from happy.HappyNode import HappyNode
import happy.HappyLinkAdd
import happy.HappyNodeAddress
import happy.HappyNodeRoute
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap"] = False
options["network_id"] = None
options["fix_hw_addr"] = None
options["customized_eui64"] = None
def option():
return options.copy()
class HappyNodeJoin(HappyLink, HappyNode, HappyNetwork):
"""
Assigns a virtual node to a specific network.
happy-node-join [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-n --network <NETWORK_NAME>] [-m --mac <HW_ADDR>]
[-c --customizedeui64 <CUST_EUI64>] [-p --tap]
-i --id Required. Node to be added to a network. Find using
happy-node-list or happy-state.
-n --network Required. Network to add the node to. Find using
happy-network-list or happy-state.
-m --mac The MAC hardware address for the node.
-c --customizedeui64 The EUI64 address for the node.
-p --tap Configure the link between the node and the network as an
L2 TAP device with a virtual bridge. Omit this parameter to
default to an L3 TUN configuration for normal IP routing.
Example:
$ happy-node-join ThreadNode HomeThread
Adds the ThreadNode node to the HomeThread network.
$ happy-node-join -i onhub -n HomeWiFi -m 5
Adds the onhub node to the HomeWiFi network with a MAC hardware address of
00:00:00:00:00:05.
$ happy-node-join -i onhub -n HomeWiFi -c 00:00:00:00:00:00:00:05
Adds the onhub node to the HomeWiFi network with an EUI64 address of
00:00:00:00:00:00:00:05.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNetwork.__init__(self)
HappyNode.__init__(self)
HappyLink.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap = opts["tap"]
self.network_id = opts["network_id"]
self.fix_hw_addr = opts["fix_hw_addr"]
self.customized_eui64 = opts["customized_eui64"]
if not self.fix_hw_addr and opts["customized_eui64"]:
self.fix_hw_addr = self.customized_eui64[6:]
self.customized_eui64 = self.customized_eui64.replace(':', '-')
def __pre_check(self):
# Check if the name of the node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should join a network."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if the name of the network is given
if not self.network_id:
emsg = "Missing name of the virtual network that be joined by a virtual node."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if network exists
if not self._networkExists():
emsg = "virtual network %s does not exist." % (self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if node already joined that network
if self.network_id in self.getNodeNetworkIds():
emsg = "virtual node %s is already part of %s network." % (self.node_id, self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
self.fix_hw_addr = self.fixHwAddr(self.fix_hw_addr)
# Check if HW MAC address is valid
if self.fix_hw_addr is not None and self.fix_hw_addr.count(":") != 5:
emsg = "virtual node %s get invalid MAC HW address %s." % (self.node_id, self.fix_hw_addr)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __create_link(self):
options = happy.HappyLinkAdd.option()
options["quiet"] = self.quiet
options["type"] = self.getNetworkType()
options["tap"] = self.tap
link = happy.HappyLinkAdd.HappyLinkAdd(options)
ret = link.run()
self.link_id = ret.Data()
self.readState()
def __post_check_1(self):
# Ensure that the link is saved in the state
if self.link_id not in self.getLinkIds():
emsg = "Link %s does not exist." % (self.link_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __get_node_interface_info(self):
self.link_type = self.getLinkType(self.link_id)
self.link_network_end = self.getLinkNetworkEnd(self.link_id)
self.link_node_end = self.getLinkNodeEnd(self.link_id)
self.node_interface_name = self.getNodeInterfaceName(self.node_id, self.link_type)
def __connect_to_network(self):
self.moveInterfaceToNamespace(self.link_network_end, self.network_id)
# Attach to bridge
cmd = "brctl addif " + self.uniquePrefix(self.network_id) + " " + self.link_network_end
cmd = self.runAsRoot(cmd)
ret = self.CallAtNetwork(self.network_id, cmd)
def __connect_to_node(self):
if not self.isNodeLocal(self.node_id):
if self.getLinkTap(self.link_id):
self.moveLwipInterfaceToNamespace(self.link_id, self.node_id)
else:
self.moveInterfaceToNamespace(self.link_node_end, self.node_id)
cmd = "ip link set " + self.link_node_end
cmd += " name " + self.node_interface_name
if self.fix_hw_addr is not None:
cmd += " address " + self.fix_hw_addr
cmd = self.runAsRoot(cmd)
ret = self.CallAtNode(self.node_id, cmd)
def __nmconf(self):
if not self.isNodeLocal(self.node_id):
return
if not self.tap:
cmd = "nmcli dev disconnect iface " + self.node_interface_name
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
def __check_node_hw_addr(self):
hw_addr = self.getHwAddress(self.node_interface_name, self.node_id)
hw_addr_int = IP.mac48_string_to_int(hw_addr)
if (hw_addr_int & (1 << 41)):
hw_addr_int = hw_addr_int & ~(1 << 41)
new_hw_addr = IP.mac48_string_to_int(hw_addr_int)
cmd = "ip link set " + self.node_interface_name + " address " + str(new_hw_addr)
cmd = self.runAsRoot(cmd)
r = self.CallAtNode(self.node_id, cmd)
def __post_check_2(self):
return
def __bring_up_interface(self):
self.bringLinkUp(self.link_id, self.node_interface_name, self.node_id, self.network_id)
def __add_new_interface_state(self):
self.setLinkNetworkNodeHw(self.link_id, self.network_id, self.node_id, self.fix_hw_addr)
new_network_interface = {}
self.setNetworkLink(self.network_id, self.link_id, new_network_interface)
new_node_interface = {}
new_node_interface["link"] = self.link_id
new_node_interface["type"] = self.link_type
new_node_interface["ip"] = {}
if self.customized_eui64:
new_node_interface["customized_eui64"] = self.customized_eui64
self.setNodeInterface(self.node_id, self.node_interface_name, new_node_interface)
def __assign_network_addresses(self):
network_prefixes = self.getNetworkPrefixes(self.network_id)
for prefix in network_prefixes:
options = happy.HappyNodeAddress.option()
options["quiet"] = self.quiet
options["node_id"] = self.node_id
options["interface"] = self.node_interface_name
if IP.isIpv6(prefix):
nid = self.getInterfaceId(self.node_interface_name, self.node_id)
else:
nid = self.getNextNetworkIPv4Id(prefix, self.network_id)
options["address"] = self.getNodeAddressOnPrefix(prefix, nid)
options["add"] = True
addrctrl = happy.HappyNodeAddress.HappyNodeAddress(options)
ret = addrctrl.run()
def __load_network_routes(self):
routes = self.getNetworkRoutes(self.network_id)
for route_to in routes.keys():
route_record = self.getNetworkRoute(route_to, self.network_id)
options = happy.HappyNodeRoute.option()
options["quiet"] = self.quiet
options["add"] = True
options["node_id"] = self.node_id
options["to"] = route_to
options["via"] = route_record["via"]
options["prefix"] = route_record["prefix"]
noder = happy.HappyNodeRoute.HappyNodeRoute(options)
ret = noder.run()
def run(self):
with self.getStateLockManager():
self.__pre_check()
self.__create_link()
self.__post_check_1()
self.__get_node_interface_info()
self.__connect_to_network()
self.__connect_to_node()
self.__nmconf()
self.__check_node_hw_addr()
self.__bring_up_interface()
self.__post_check_2()
self.__add_new_interface_state()
self.writeState()
self.__assign_network_addresses()
self.__load_network_routes()
return ReturnMsg(0)
| [((3020, 3047), 'happy.HappyNetwork.HappyNetwork.__init__', 'HappyNetwork.__init__', (['self'], {}), '(self)\n', (3041, 3047), False, 'from happy.HappyNetwork import HappyNetwork\n'), ((3056, 3080), 'happy.HappyNode.HappyNode.__init__', 'HappyNode.__init__', (['self'], {}), '(self)\n', (3074, 3080), False, 'from happy.HappyNode import HappyNode\n'), ((3089, 3113), 'happy.HappyLink.HappyLink.__init__', 'HappyLink.__init__', (['self'], {}), '(self)\n', (3107, 3113), False, 'from happy.HappyLink import HappyLink\n'), ((7586, 7617), 'happy.utils.IP.IP.mac48_string_to_int', 'IP.mac48_string_to_int', (['hw_addr'], {}), '(hw_addr)\n', (7608, 7617), False, 'from happy.utils.IP import IP\n'), ((10768, 10780), 'happy.ReturnMsg.ReturnMsg', 'ReturnMsg', (['(0)'], {}), '(0)\n', (10777, 10780), False, 'from happy.ReturnMsg import ReturnMsg\n'), ((7734, 7769), 'happy.utils.IP.IP.mac48_string_to_int', 'IP.mac48_string_to_int', (['hw_addr_int'], {}), '(hw_addr_int)\n', (7756, 7769), False, 'from happy.utils.IP import IP\n'), ((9131, 9148), 'happy.utils.IP.IP.isIpv6', 'IP.isIpv6', (['prefix'], {}), '(prefix)\n', (9140, 9148), False, 'from happy.utils.IP import IP\n')] |
SDRAST/Data_Reduction | __init__.py | f007d716b5c28c086910a81206cffaf37ff6368c | # -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
class Observation(object):
"""
superclass for a data structure and methods
Attributes
==========
aliases - (dict) data keys to replace those in original data
channel - (dict) signal paths, e.g., different freqs and pols
data - (dict) original data, e.g., read from file or database
DOY - (int) day of year of observation
end - (float) UNIX time at the end
latitude - (float) from obs
logger - (logging.Logger)
longitude - (float) from obs
name - (str) user assigned, defaults to YEAR/DOY
numdata - (int) number of data samples
obs - (AE.DSS) observatory
session - (Session) set of observations, parent to Observation
session_path - (str) directory for session files
start - (float) UNIX time at the beginning
year - (int) year of observation
**Reserved Column Names**
These column names are recognized. They are also the keys for attribute
``data``.
These quantities must be present in some form::
unixtime (float) UNIX time in sec
chan_name (str) channel name
integr (float) integration (exposure) in sec
azel (float,float) azimuth and elevation in decimal deg
power (float) power level if only a single channel
Optional::
diode (float) 0 or power in K (integers OK)
level (float) (unidentified -- in ``tlog`` table)
cryotemp (float) cryostat temp in K
windspeed (float) km/hr
winddir (float) deg
ambtemp (float) deg C
pressure (float) mbar
Columns to be computed::
mpldatenum (float) matplotlib ``datenum``
Alternative for ``power``::
tsys (float) system temperature (calibrated power)
top (float) alternative for ``tsys`` (used in DSN)
vfc_counts (int) VFC counts (rate times ``integr``)
Any column with a name which is not a reserved name is assumed to be
power-like data from the channel with that name, unless that name is in a
list provided to the argument ``ignore`` in the method ``get_data_channels``
of the class ``DataGetterMixin``.
Alternative for ``unixtime``::
year (int) year of observation
doy (int) day of year
utc (str) HH:MM:SS
timestr (str) something like 2020/06/14/14:22:21.00
Alternative for ``chan_name``::
chan (int) index in receiver channel names
Alternative for ``azel``::
radec (float,float) precessed right ascension in decimal hours and
precessed declination in decimal deg
radec1950 (float,float) mean right ascension in decimal hours and
mean declination in decimal deg at epoch
radec2000 (float,float) mean right ascension in decimal hours and
mean declination at epoch in decimal deg
az (float) azimuth in decimal deg
el (float) elevation in decimal deg
ra (float) precessed right ascension in decimal hours
dec (float) precessed declination in decimal deg
ra1950 (float) mean right ascension in decimal hours at epoch
dec1950 (float) mean declination in decimal deg at epoch
ra2000 (float) mean right ascension in decimal hours at epoch
dec2000 (float) mean declination in decimal deg at epoch
Notes
=====
* The ``data`` structure is a dict.
* The value of a ``data`` item is either a numpy array or a object
like ``float``, ``int``, or ``str``.
* The keys have reserved words defined above and will be lowercase.
* Items with other keys may be added, typically by a child class.
* Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you
never get one without the other.)
"""
reserved = ['unixtime','chan_name','integr','az','el','year','doy','utc',
'timestr','chan','tsys','top','diode','level','cryotemp',
'windspeed','winddir','ambtemp','pressure',
'ra','dec','ra1950','dec1950','ra2000','dec2000']
power_keys = ['tsys', 'top', 'vfc_counts', 'power']
def __init__(self, parent=None, name=None, dss=None,
date=None, project=None):
"""
Create a base Observation object.
This is not meant to be initialized by itself. A subclass generally
determines how data are read in. However, method ``initialize()``
provides a basic data read capability using ``numpy.genfromtxt()``
and creates the object's data structure.
Args:
parent (Session): session to which this observation belongs
name (str): an identifier; default is station ID + "obs"
dss (int): station number
date (str): "YEAR/DOY"
project (str): directory under /usr/local/projects
"""
self.logger = logging.getLogger(logger.name+".Observation")
self.session = parent
# observatory must be specified
if dss:
self.obs = coords.DSS(dss)
self.longitude = self.obs.long*180/math.pi # deg
self.latitude = self.obs.lat*180/math.pi # deg
else:
self.logger.error("__init__: requires observatory location")
raise Exception("Where were the data taken?")
# give the object a name
if name:
self.name = name
else:
self.name = "DSS"+str(dss)+"obs"
self.logger = logging.getLogger(logger.name+".Observation")
# the observation was part of some project
if project:
self.project = project
else:
self.logger.error("__init__: requires a project")
raise Exception("Where are the session's working files?")
# the observation was done on some date
if date:
y,d = date.split('/')
self.year = int(y);
self.DOY = int(d)
projdatapath, self.sessionpath, rawdatapath = \
get_obs_dirs(project, dss, self.year, self.DOY,
datafmt=None)
self.logger.debug("__init__: session path: %s", self.sessionpath)
else:
self.logger.error("__init__: requires a date")
raise Exception("When were the date taken?")
# accomodate subclass arguments
self.aliases = {}
# what I really want to do here is see if this was called by a subclass,
# in which case I do not try to get the channel info until this
# initialization has finished.
#
#if hasattr(self, "get_data_channels"):
# channels = self, get_data_channels()
# self.make_channels(channels)
#else:
# self.logger.info("__init__: initialize() may now be called")
def splitkey(self, longlat):
"""
Checks for presence of coordinates in pairs or singles
@param longlat : "azel", or "radec", or "radecEPOC"
@type longlat : str
"""
longitude = longlat[:2] # 'az' or 'ra'
if len(longlat) > 5: # has epoch
epoch = longlat[-4:]
longitude += epoch
latitude = longlat[2:-4]+epoch
else: # date of observation
latitude = longlat[2:]
epoch = None
return longitude, latitude, epoch
def check_for(self, data, longlat):
"""
Checks for separate coordinates and splits if coord pairs
Args:
data (dict): attribute ``data``
longlat (str): "azel", or "radec", or "radecEPOC"
"""
longitude, latitude, epoch = self.splitkey(longlat)
if longitude in data.dtype.names and \
latitude in data.dtype.names:
self.logger.debug("check_for: data has %s and %s", longitude, latitude)
self.data[longitude] = data[longitude]
self.data[latitude] = data[latitude]
return True
elif longlat in data.dtype.names:
self.logger.debug("check_for: data has %s", longlat)
self.data[longitude],self.data[latitude] = map(None, *data[longlat])
self.logger.debug("check_for: added %s and %s to data",
longitude, latitude)
return True
else:
# coords need to be computed from other coords
return False
def unpack_to_complex(self, rawdata):
"""
Converts a sequence of alternating real/imag samples to complex
@param rawdata : alternating real and imaginary bytes
@type rawdata : numpy array of signed int8
@return: numpy array of complex
"""
datalen = len(rawdata)
real = rawdata[0:datalen:2]
imag = rawdata[1:datalen:2]
data = real + 1j*imag
return data
def sideband_separate(self, data):
"""
Converts a complex spectrum array and returns two reals with USB and LSB
This applies a Hilbert transform to the complex data.
"""
usb = (data.real + scipy.fftpack.hilbert(data).imag)
lsb = (scipy.fftpack.hilbert(data).real + data.imag)
return lsb,usb
class Channel(support.PropertiedClass):
"""
Class for a signal path
"""
def __init__(self, parent, name, freq=None, bw=None, pol=None, IFtype=None,
atten=None):
"""
Notes
=====
The properties can be accessed as if the class were a dict.
Arguments
=========
freq:float or int: center frequency in MHz
bw:float or int: bandwidth in MHz
pol:str: polarization code
"""
support.PropertiedClass.__init__(self)
self.parent = parent
self.logger = logging.getLogger(self.parent.name+".Channel")
self.logger.debug("__init__: created %s", self.logger.name)
self.logger.debug("__init__: parent is %s", self.parent)
self.name = name
self.data['freq'] = freq
self.data['bw'] = bw
self.data['pol'] = pol
self.data['ifmode'] = IFtype
self.data['atten'] = atten
class DataGetterMixin(object):
"""
Class for getting data from a CSV file.
"""
def initialize(self, filename, delimiter=" ", names=True, skip_header=0,
source=None):
"""
Get the data and make a data structure for the observations.
This is not included by default in ``__init__()`` to keep it simple for
subclasses.
Args:
filename (str): name only, required; the path is provided
delimiter (str): what separates the columns
names (bool): the first line has column names
skip_header (int) : number of rows to skip
"""
# get the data
data = self.open_datafile(filename, delimiter=delimiter, names=names,
skip_header=skip_header)
# get the signal columns and names
metadata, signals = self.get_data_channels(data)
# create Channel objects for the signal properties
self.make_channels(signals)
# create the data structure
self.make_data_struct(data, metadata, signals)
# compute the offsets from the source center for each data point
if source:
self.get_offsets(source=source)
else:
self.logger.warning("initialize: no source specified; no offsets")
def open_datafile(self, filename, delimiter=" ", names=True, skip_header=0):
"""
Opens and reads a data file
This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file
for each signal).
Args:
filename (str): text data file name
delimiter (str): separator between columns (default: whitespace)
names (bool): file row has column names (default: True)
skip_header (int): number of rows to skip at beginning of file
Returns:
ndarray:
"""
data = NP.genfromtxt(self.sessionpath+filename,
delimiter=delimiter,
dtype=None,
names=names,
case_sensitive='lower',
skip_header=skip_header,
encoding=None)
return data
def get_data_channels(self, data, ignore=None):
"""
Gets or sets the names of the signal columns
Column names are separated into metadata and signals. Names in
``ignore`` re ignored. Names in ``aliases`` are replaced.
Args:
data (ndarray): data read from text file
ignore (list of str): columns to ignore; default None
Returns:
(list of str, list of str): metadata, signals
"""
names = data.dtype.names
metadata = []
signals = []
for name in names:
if ignore:
if name in ignore:
pass
if name.casefold() in map(str.casefold, self.aliases):
key = self.aliases[name].lower() # we use only lower case names
else:
key = name.lower()
self.logger.debug("get_data_channels: doing %s for %s", key, name)
if key in map(str.casefold, Observation.reserved):
if key.casefold() in ['top', 'tsys']:
signals.append(key)
else:
metadata.append(key)
else:
signals.append(key)
self.logger.debug("get_data_channels: signals: %s", signals)
self.logger.debug("get_data_channels: metadata: %s", metadata)
return metadata, signals
def make_data_struct(self, data, metadata, signals):
"""
Takes a text table with headers and converts it into a numpy ``ndarray``.
That means that a column can be extracted using `data[label]`.
Args
====
data: (ndarray) the data from the text file
metadata: (list of str) the column names for metadata
signals: (list of str) the column names for power-like data
"""
# get the known columns:
self.data = {}
self.numdata = len(data)
#self.logger.debug("make_data_struct: using aliases: %s", self.aliases)
# get columns that are not metadata; each has power for a channel
for signal in signals:
#self.logger.debug("make_data_struct: for signal: %s", signal)
#if signal in self.aliases.items():
# get the key in 'data' which matches 'value' in 'aliases'
# power = data[next(key for key, value in self.aliases.items()
# if value == signal)][idx]
#else:
# power = data[signal]
#self.channel[signal]['power'] = power
self.channel[signal]['power'] = data[signal]
# get UNIX time
if 'unixtime' in metadata:
if 'unixtime' in data.dtype.names:
self.data['unixtime'] = data['unixtime']
else:
# look up the equivalent of UNIX time in the data table
self.data['unixtime'] = data[next(key
for key, value in self.aliases.items()
if value == 'unixtime')]
# compute other convenient forms of time
self.data['datetime'] = [] # Python datetime.date
self.data['date_num'] = [] # matplotlib.dates date number
for idx in list(range(self.numdata)):
if 'unixtime' in data.dtype.names:
tm = data['unixtime'][idx]
else:
tm = data[next(key for key, value in self.aliases.items()
if value == 'unixtime')][idx]
dt = datetime.datetime.utcfromtimestamp(tm)
self.data['datetime'].append(dt)
self.data['date_num'].append(MPLd.date2num(dt))
self.start = self.data['unixtime'][0]
self.end = self.data['unixtime'][-1]
else:
# figure out how to process the time data columns
pass
# compute alternate coordinates
if self.check_for(data, 'azel'):
# azel exists; compute radec if needed; then radec2000 if needed
if self.check_for(data, 'radec'):
pass
else:
self.radec_from_azel()
if self.check_for(data, 'radec2000'):
# ra2000 and dec2000 already exist
pass
else:
self.radec2000_from_radec()
elif self.check_for(data, 'radec2000'):
# coordinates exist; compute back to azimuth and elevation
if self.check_for(data, 'radec'):
pass
else:
# compute observed RA and dec
self.radec_from_radec2000()
if self.check_for(data, 'azel'):
pass
else:
self.azel_from_radec()
# in here check for 'radec'
else:
self.logger.error("no coordinates found in data")
raise Exception("check INFO logging for columns found")
self.start = self.data['unixtime'].min()
self.end = self.data['unixtime'].max()
def make_channels(self, signals, props=None):
"""
Assign properties to the channels.
The prop keys are "freq", "pol", and "IFtype".
Args:
props (dict of dicts): signal channel properties.
"""
self.channel = {}
for ch in signals:
chindex = signals.index(ch)
if props:
self.channel[ch] = self.Channel(self, ch,
freq =props[ch]['freq'],
bw =props[ch]['bw'],
pol =props[ch]['pol'],
IFtype=props[ch]['IFtype'],
atten =props[ch]['atten'])
else:
self.channel[ch] = self.Channel(self, ch)
class GriddingMixin(object):
"""
Class for all the data and methods associated with a raster scan map
It is expected that the parent class is a subclass of ``Observation`` already
by virtue of it being a superclass of subclass which inherits these methods.
Attrs:
cfg (dict):
data (numpy array): from ``Observation``
logger (logging.Logger): replaces ``Observation`` logger
name (str): replaces ``Observation`` name
session (Session):
source (str):
step (float): map step size
"""
def get_grid_stepsize(self, xy=None):
"""
Determine the stepsize of gridded data
This assumes xdec and dec data increase incrementally by 'stepsize'.
The sequences may repeat in a sawtooth-like series. The number of
'xdec' and 'dec' points is multiple times the gridsize.
Arguments:
xy (tuple or list) - X-array and Y-array (default Map.data)
"""
# get the absolute value of coordinate intervals
if xy:
dxdecs = abs(xy[0][1:] - xy[0][:-1])
ddecs = abs(xy[1][1:] - xy[1][:-1])
else:
dxdecs = abs(self.data['xdec_offset'][1:]-self.data['xdec_offset'][:-1])
ddecs = abs(self.data['dec_offset'][1:] -self.data['dec_offset'][:-1])
# form array of X,Y pairs
coords = NP.array(list(zip(dxdecs,ddecs)))
# expect two clusters (default)
cluster_pos = VQ.find_clusters(coords).round(4) # tenths of mdeg
# return the non-zero intervals
return cluster_pos[0].max(), cluster_pos[1].max()
def regrid(self, width=1.0, height=1.0, step=None, power_key=None):
"""
converts a map from observed coordinates to map coordinates
If ``step`` is not given then the step size will be the average step size
in X and the average step in Y. In this case, the effect is to make a
regular grid if the original positions were not exact, i.e., pointing error.
@param width : map width in deg
@type width : float
@param height : map height in deg
@type height : float
@param step : map step size in X and Y in deg
@type step : (float, float)
@param power_key : dict key of Z-value
@type power_key : str
"""
# what is the power-like quantity?
if power_key:
pass
else:
# take the first that matches
for key in Observation.power_keys:
if key in self.data:
power_key = key
self.logger.info("regrid: using '%s'", power_key)
break
else:
continue
if power_key:
pass
else:
self.logger.error("regrid: no power data key found")
return None
if step == None:
# use the original stepsize
self.xstep, self.ystep = self.get_grid_stepsize()
else:
self.xstep, self.ystep = step
self.data['grid_x'] = NP.arange(
-width/2, width/2+self.xstep/2, self.xstep/2)
self.data['grid_y'] = NP.arange(
-height/2,height/2+self.ystep/2, self.ystep/2)
self.logger.debug("regrid: grid shape is %dx%d", len(self.data['grid_x']),
len(self.data['grid_y']))
self.data['grid_z'] = {}
for chnl in self.channel:
self.logger.debug("regrid: processing %s", chnl)
points = list(zip(self.data['xdec_offset'],self.data['dec_offset']))
self.logger.debug("regrid: %d positions", len(points))
values = self.data[power_key][chnl]
self.logger.debug("regrid: %d values", len(values))
xi, yi = NP.meshgrid(self.data['grid_x'], self.data['grid_y'])
try:
self.data['grid_z'][chnl] = scipy.interpolate.griddata(points, values,
(xi, yi), method='nearest')
except ValueError as details:
self.logger.error("regrid: gridding failed: %s", str(details))
self.logger.debug("regrid: channel %s length of points is %d",
chnl, len(points))
self.logger.debug("regrid: channel %s length of values is %d", chnl,
len(values))
continue
def radec_from_azel(self):
"""
compute RA and dec from az and el
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
dt = self.data['datetime'][idx]
# format time as (YEAR, DOY.fff)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
azimuth = self.data['az'][idx]
elevation = self.data['el'][idx]
# compute
ra,dec = A.AzEl_to_RaDec(azimuth, elevation,
self.latitude,
-self.longitude,
time_tuple)
RA.append(ra)
decs.append(dec)
RAdecs.append((RA,decs))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def radec2000_from_radec(self):
"""
compute RA2000 and dec2000 from observed RA and dec
"""
RA2000 = []; decs2000 = []; RAdec2000 = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra = self.data['ra']
dec = self.data['dec']
# compute
ra2000,dec2000 = A.apparent_to_J2000(MJD,UT,
ra, dec,
self.longitude, self.latitude)
RA2000.append(ra2000)
decs2000.append(dec2000)
RAdec2000.append((ra2000,dec2000))
self.data['ra2000'] = RA2000
self.data['dec2000'] = dec2000
self.data['radec2000'] = RAdec2000
def radec_from_radec2000(self):
"""
compute apparent RA and dec. from J2000 RA and dec
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra2000 = self.data['ra2000'][idx]
dec2000 = self.data['dec2000'][idx]
# compute
ra, dec = A.J2000_to_apparent(MJD, UT,
ra2000*math.pi/12, dec2000*math.pi/180)
RA.append(ra)
decs.append(dec)
RAdecs.append((ra,dec))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def azel_from_radec(self):
"""
compute azimuth and elevation from apparent right ascension and declination
"""
azs = []; els = []; azels = []
for idx in list(range(self.numdata)):
# setup
ra = self.data['ra'][idx]
dec = self.data['dec'][idx]
timetuple = self.data['datetime'][idx].timetuple()
year = timetuple.tm_year
doy = timetuple.tm_yday + (timetuple.tm_hour
+(timetuple.tm_min+timetuple.tm_sec/60)/60)/24
# compute
az, el = A.RaDec_to_AzEl(ra, dec,
self.latitude, self.longitude, (year,doy))
azs.append(az)
els.append(el)
azels.append((az,el))
self.data['az'] = azs
self.data['el'] = els
self.data['azel'] = azels
def get_offsets(self, source="Sun", xdec_ofst=0., dec_ofst=0.):
"""
Generates a map in coordinates relative to a source
If the source is the default, the position of the Sun will be computed for
the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS
ALSO.
This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the
attribute ``data``.
@param source : source at map center
@type source : ephem source instance
@param xdec_ofst : relative X-dec position of sample
@type xdec_ofst : float
@param dec_ofst : relative dec position of sample
@type dec_ofst : float
@return: (dxdecs,ddecs) in degrees
"""
if source.lower() == "sun":
src = AE.ephem.Sun()
else:
src = AE.calibrator(source)
self.data['dec_offset'] = []
self.data['xdec_offset'] = []
for count in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][count])
if type(src) == AE.Quasar:
pass
else:
src.compute(dt)
ra_center = src.ra*12/math.pi # hours
dec_center = src.dec*180/math.pi # degrees
decrad = src.dec
# right ascension increases to the left, cross-dec to the right
self.data['xdec_offset'].append(xdec_ofst -
(self.data['ra'][count] - ra_center)*15*math.cos(decrad) )
self.data['dec_offset'].append( dec_ofst +
self.data['dec'][count] - dec_center)
# change list to NP.array
self.data['xdec_offset'] = NP.array(self.data['xdec_offset'])
self.data['dec_offset'] = NP.array(self.data['dec_offset'])
class Map(Observation, GriddingMixin):
"""
Map class without special features for GAVRT and Malargue
Most of the methods are mixed in to avoid conflicting with subclasses
"""
def __init__(self, parent=None, name=None, dss=None, date=None, project=None):
"""
Create a Map object
Args:
parent (Session): an observing session to which this belongs
name (str): an identifier, like a scan number
dss (int): station where the data were taken
date (str): date of observation as "YEAR/DOY"
project (str): project for which this observation was made
"""
Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
class Recording(h5py.File):
"""
Class for raw data
This is typically the contents of a data file transcribed into a standard
format. It may be the data of one Observation object, or data for multiple
Observation objects, or contain part of the data for an Observation object.
If the data being curated are not in a standard project, and they are not
in a standard place,
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
Initialize a metadata container and data directory
Args
====
session (Session): required, unless:
path (str) : location of raw data files
date
"""
self.logger = logging.getLogger(logger.name+".Recording")
if session:
self.session = session
if not name:
name = session.project + "-" + str(session.year) + "-" + \
('%03d' % session.doy) + "-dss" + str(session.dss)+".info"
self.year = session.year
self.doy = session.doy
self.dss = session.dss
self.project = session.project
self.session_dir = session.session_dir
elif path and name:
self.session = Session() # for its methods and attributes
self.session_dir = path
self.name = name
else:
raise RuntimeError("either a session or a path and filename required")
h5py.File.__init__(self, name, 'w')
self.attrs['project'] = self.project
self.attrs['dss'] = self.dss
self.attrs['year'] = self.year
self.attrs['doy'] = self.doy
class Session(object):
"""
Base class for an observing session on a given year and DOY
Public Attributes::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
"""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (str) required, format YEAR/DOY
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
self.logger = logging.getLogger(logger.name+".Session")
if parent:
self.session = parent
if date and project and dss:
y,d = date.split('/')
self.year = int(y);
self.doy = int(d)
self.project = project
self.dss = dss
self.name = "'%s %4d/%03d'" % (self.project, self.year, self.doy)
else:
self.logger.error("__init__: missing DSS or year or DOY or project")
raise Exception("Where and when and for what project were the data taken?")
self.find_session_dir(path=path)
def find_session_dir(self, path=None):
"""
find or make the sessions directory
Args:
path (str) - explicit path to files
"""
self.logger.debug("find_session_dir: entered for path=%s", path)
if path:
self.session_dir = path
else:
obs_dir = local_dirs.projects_dir + self.project \
+"/Observations/dss"+str(self.dss)+"/"
self.session_dir = obs_dir+ "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def select_data_files(self, datapath=None, name_pattern="", auto=True,
load_hdf=False):
"""
Provide the user with menu to select data files.
Finding the right data store is complicated as there are many kinds of data
files
* If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish)
or .hdf5 (Dean).
* If datapath is ...RA_data/FITS/... then the extent is .fits.
* If datapath is ...project_data/... then the extent is .pkl
* If datapath is ...projects/... (default) then the extent is probably
.csv or .dat or .prd.
@param datapath : path to top of the tree where the DSS subdirectories are
@type datapath : str
@param name_pattern : pattern for selecting file names, e.g. source
@type name_pattern : str
@param load_hdf : use RA_data/HDF5 directory if True
@type load_hdf : bool
@para auto : take all files found
@type auto : bool
@return: list of str
"""
# Get the data files to be processed
self.logger.debug("select_data_files: looking in %s", datapath)
if name_pattern:
name,extent = os.path.splitext(name_pattern)
if extent.isalpha(): # a proper extent with no wildcards
# take name pattern as is
pass
else:
# only one * at front and back of pattern
name_pattern = "*"+name_pattern.rstrip('*')+"*"
else:
# no pattern specified. All files.
name_pattern = "*"
self.logger.debug("select_data_files: for pattern %s", name_pattern)
if datapath:
if re.search('HDF5', datapath):
load_hdf = True
elif re.search('project_data', datapath):
load_hdf = False
datafiles = support.text.select_files(datapath+name_pattern+"[0-9].pkl")
elif re.search('FITS', datapath):
datafiles = support.text.select_files(datapath+name_pattern+".fits")
if load_hdf:
full = datapath+name_pattern+".h*5"
else:
full = datapath+name_pattern
else:
full = self.session_dir + name_pattern
self.logger.debug("select_data_files: from: %s", full)
if auto:
datafiles = glob.glob(full)
else:
datafiles = support.text.select_files(full)
self.logger.debug("select_data_files: found %s", datafiles)
if datafiles == []:
self.logger.error(
"select_data_files: None found. Is the data directory mounted?")
raise RuntimeError('No data files found.')
if type(datafiles) == str:
datafiles = [datafiles]
self.logger.info("select_data_files: to be processed: %s", datafiles)
return datafiles
class Spectrum(Observation):
"""
Class for spectra
"""
def __init__(self):
"""
needs a spectrum attribute
"""
self.logger = logging.getLogger(logger.name+".Spectrum")
def get_num_chans(self, linefreq, bandwidth, max_vel_width):
"""
compute the base 2 number of output channels for the specified resolution
"""
kmpspMHz = 300000./linefreq
BW_kmps = bandwidth*kmpspMHz
est_num_chan_out = BW_kmps/max_vel_width
self.logger.debug("get_num_chans: estimated num chans out = %d",
est_num_chan_out)
return 2**int(math.ceil(math.log(est_num_chan_out,2)))
def reduce_spectrum_channels(self, refval, refpix, delta,
num_chan=1024, axis=0):
"""
Reduce the number of channels in the spectrum.
The default option is to reduce the spectrum to a specified number of
channels with a default of 1024. The input spectrum is presumed to have
2**N channels so that num_chan/num_chan_in is an integer.
If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis'
which defaults to 0.
'delta' is negative for lower sideband or reversed double sideband spectra.
@param spectrum : spectrum values
@type spectrum : list or nparray
@param refval : X-axis value at the reference pixel of 'spectrum'
@type refval : float
@param refpix : reference pixel for 'spectrum'
@type refpix : int
@param delta : interval between pixels on the X-axis
@type delta : float
@param num_chan : optional number of channels to be returned (default: 2^10)
@type num_chan : int
@return: numpy.array
"""
if math.log(num_chan,2) % 1:
raise RuntimeError("num_chan = %d is not a power of 2", num_chan)
if type(self.spectrum) == NP.ndarray:
num_chans_in = self.spectrum.shape[axis]
else:
num_chans_in = len(self.spectrum)
if math.log(num_chans_in,2) % 1:
raise RuntimeError("input spectrum length = %d is not a power of 2",
num_chans_in)
self.logger.debug("reduce_spectrum_channels: %d channels in", num_chans_in)
num_chan_avg = num_chans_in/num_chan
newrefpix = refpix/num_chan_avg
self.logger.debug("reduce_spectrum_channels: refpix from %d to %d",
refpix, newrefpix)
newdelta = delta*num_chan_avg
self.logger.debug("reduce_spectrum_channels: delta from %.3f to %.3f",
delta, newdelta)
newrefval = refval + delta*(num_chan_avg/2 - 1)
self.logger.debug("reduce_spectrum_channels: refval from %.3f to %.3f",
refval, newrefval)
self.logger.debug("reduce_spectrum_channels: averaging %d channels", num_chan_avg)
specout = NP.array([spectrum[index*num_chan_avg:(index+1)*num_chan_avg].mean()
for index in range(num_chan)])
self.logger.debug("reduce_spectrum_channels: %d channels out", num_chan)
return specout, newrefval, newrefpix, newdelta
def get_freq_array(self, bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return NP.arange(n_chans)*float(bandwidth)/n_chans
def freq_to_chan(frequency,bandwidth,n_chans):
"""
Returns the channel number where a given frequency is to be found.
@param frequency : frequency of channel in sane units as bandwidth.
@type frequency : float
@param bandwidth : upper limit of spectrometer passband
@type bandwidth : float
@param n_chans : number of channels in the spectrometer
@type n_chans : int
@return: channel number (int)
"""
if frequency < 0:
frequency = bandwidth + frequency
if frequency > bandwidth:
raise RuntimeError("that frequency is too high.")
return round(float(frequency)/bandwidth*n_chans) % n_chans
def get_smoothed_bandshape(self, degree = None, poly_order=15):
"""
Do a Gaussian smoothing of the spectrum and then fit a polynomial.
Optionally, the raw and smoothed data and the fitted polynomial can be
plotted.
Note
====
``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)``
Least squares polynomial fit.
Fit a polynomial::
p(x) = p[0] * x**deg + ... + p[deg]
of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
@param spectrum : input data
@type spectrum : list of float
@param degree : number of samples to smoothed (Gaussian FWHM)
@type degree : int
@param poly_order : order of the polynomial
@type poly_order : int
@param plot : plotting option
@type plot : boolean
@return: (polynomial_coefficient, smoothed_spectrum)
"""
if degree == None:
degree = len(self.spectrum)/100
# normalize the spectrum so max is 1 and convert to dB.
max_lev = NP.max(self.spectrum)
norm_spec = NP.array(self.spectrum)/float(max_lev)
norm_spec_db = 10*NP.log10(norm_spec)
# do a Gaussian smoothing
norm_spec_db_smoothed = smoothListGaussian(norm_spec_db, degree=degree)
# deal with the edges by making them equal to the smoothed end points
norm_spec_db_smoothed_resized = NP.ones(len(self.spectrum))
# left end
norm_spec_db_smoothed_resized[0:degree] = norm_spec_db_smoothed[0]
# middle
norm_spec_db_smoothed_resized[degree:degree+len(norm_spec_db_smoothed)] = \
norm_spec_db_smoothed
# right end
norm_spec_db_smoothed_resized[degree+len(norm_spec_db_smoothed):] = \
norm_spec_db_smoothed[-1]
return poly, norm_spec_db_smoothed_resized
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
| [((2597, 2637), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (2620, 2637), False, 'import readline\n'), ((2648, 2675), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2665, 2675), False, 'import logging\n'), ((43188, 43265), 'numpy.genfromtxt', 'NP.genfromtxt', (['filename'], {'dtype': 'None', 'names': 'None', 'skip_header': '(1)', 'encoding': 'None'}), '(filename, dtype=None, names=None, skip_header=1, encoding=None)\n', (43201, 43265), True, 'import numpy as NP\n'), ((45758, 45769), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45767, 45769), False, 'import os\n'), ((46816, 46829), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (46824, 46829), False, 'import os\n'), ((7573, 7620), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Observation')"], {}), "(logger.name + '.Observation')\n", (7590, 7620), False, 'import logging\n'), ((8097, 8144), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Observation')"], {}), "(logger.name + '.Observation')\n", (8114, 8144), False, 'import logging\n'), ((14217, 14378), 'numpy.genfromtxt', 'NP.genfromtxt', (['(self.sessionpath + filename)'], {'delimiter': 'delimiter', 'dtype': 'None', 'names': 'names', 'case_sensitive': '"""lower"""', 'skip_header': 'skip_header', 'encoding': 'None'}), "(self.sessionpath + filename, delimiter=delimiter, dtype=None,\n names=names, case_sensitive='lower', skip_header=skip_header, encoding=None\n )\n", (14230, 14378), True, 'import numpy as NP\n'), ((22713, 22778), 'numpy.arange', 'NP.arange', (['(-width / 2)', '(width / 2 + self.xstep / 2)', '(self.xstep / 2)'], {}), '(-width / 2, width / 2 + self.xstep / 2, self.xstep / 2)\n', (22722, 22778), True, 'import numpy as NP\n'), ((22830, 22897), 'numpy.arange', 'NP.arange', (['(-height / 2)', '(height / 2 + self.ystep / 2)', '(self.ystep / 2)'], {}), '(-height / 2, height / 2 + self.ystep / 2, self.ystep / 2)\n', (22839, 22897), True, 'import numpy as NP\n'), ((28963, 28997), 'numpy.array', 'NP.array', (["self.data['xdec_offset']"], {}), "(self.data['xdec_offset'])\n", (28971, 28997), True, 'import numpy as NP\n'), ((29028, 29061), 'numpy.array', 'NP.array', (["self.data['dec_offset']"], {}), "(self.data['dec_offset'])\n", (29036, 29061), True, 'import numpy as NP\n'), ((30517, 30562), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Recording')"], {}), "(logger.name + '.Recording')\n", (30534, 30562), False, 'import logging\n'), ((31174, 31209), 'h5py.File.__init__', 'h5py.File.__init__', (['self', 'name', '"""w"""'], {}), "(self, name, 'w')\n", (31192, 31209), False, 'import h5py\n'), ((32576, 32619), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Session')"], {}), "(logger.name + '.Session')\n", (32593, 32619), False, 'import logging\n'), ((36501, 36545), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Spectrum')"], {}), "(logger.name + '.Spectrum')\n", (36518, 36545), False, 'import logging\n'), ((41525, 41546), 'numpy.max', 'NP.max', (['self.spectrum'], {}), '(self.spectrum)\n', (41531, 41546), True, 'import numpy as NP\n'), ((45289, 45304), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (45298, 45304), False, 'import glob\n'), ((45825, 45858), 'os.chdir', 'os.chdir', (['local_dirs.projects_dir'], {}), '(local_dirs.projects_dir)\n', (45833, 45858), False, 'import os\n'), ((46154, 46194), 'os.chdir', 'os.chdir', (["(projectpath + '/Observations/')"], {}), "(projectpath + '/Observations/')\n", (46162, 46194), False, 'import os\n'), ((7710, 7725), 'Astronomy.DSN_coordinates.DSS', 'coords.DSS', (['dss'], {}), '(dss)\n', (7720, 7725), True, 'import Astronomy.DSN_coordinates as coords\n'), ((11986, 12024), 'support.PropertiedClass.__init__', 'support.PropertiedClass.__init__', (['self'], {}), '(self)\n', (12018, 12024), False, 'import support\n'), ((12072, 12120), 'logging.getLogger', 'logging.getLogger', (["(self.parent.name + '.Channel')"], {}), "(self.parent.name + '.Channel')\n", (12089, 12120), False, 'import logging\n'), ((23444, 23497), 'numpy.meshgrid', 'NP.meshgrid', (["self.data['grid_x']", "self.data['grid_y']"], {}), "(self.data['grid_x'], self.data['grid_y'])\n", (23455, 23497), True, 'import numpy as NP\n'), ((24731, 24810), 'Astronomy.AzEl_to_RaDec', 'A.AzEl_to_RaDec', (['azimuth', 'elevation', 'self.latitude', '(-self.longitude)', 'time_tuple'], {}), '(azimuth, elevation, self.latitude, -self.longitude, time_tuple)\n', (24746, 24810), True, 'import Astronomy as A\n'), ((25326, 25348), 'DatesTimes.UnixTime_to_MJD', 'DT.UnixTime_to_MJD', (['tm'], {}), '(tm)\n', (25344, 25348), True, 'import DatesTimes as DT\n'), ((25489, 25557), 'Astronomy.apparent_to_J2000', 'A.apparent_to_J2000', (['MJD', 'UT', 'ra', 'dec', 'self.longitude', 'self.latitude'], {}), '(MJD, UT, ra, dec, self.longitude, self.latitude)\n', (25508, 25557), True, 'import Astronomy as A\n'), ((26099, 26121), 'DatesTimes.UnixTime_to_MJD', 'DT.UnixTime_to_MJD', (['tm'], {}), '(tm)\n', (26117, 26121), True, 'import DatesTimes as DT\n'), ((26281, 26357), 'Astronomy.J2000_to_apparent', 'A.J2000_to_apparent', (['MJD', 'UT', '(ra2000 * math.pi / 12)', '(dec2000 * math.pi / 180)'], {}), '(MJD, UT, ra2000 * math.pi / 12, dec2000 * math.pi / 180)\n', (26300, 26357), True, 'import Astronomy as A\n'), ((27075, 27143), 'Astronomy.RaDec_to_AzEl', 'A.RaDec_to_AzEl', (['ra', 'dec', 'self.latitude', 'self.longitude', '(year, doy)'], {}), '(ra, dec, self.latitude, self.longitude, (year, doy))\n', (27090, 27143), True, 'import Astronomy as A\n'), ((28094, 28108), 'Astronomy.Ephem.ephem.Sun', 'AE.ephem.Sun', ([], {}), '()\n', (28106, 28108), True, 'import Astronomy.Ephem as AE\n'), ((28131, 28152), 'Astronomy.Ephem.calibrator', 'AE.calibrator', (['source'], {}), '(source)\n', (28144, 28152), True, 'import Astronomy.Ephem as AE\n'), ((28283, 28347), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (["self.data['unixtime'][count]"], {}), "(self.data['unixtime'][count])\n", (28317, 28347), False, 'import datetime\n'), ((33606, 33638), 'os.path.exists', 'os.path.exists', (['self.session_dir'], {}), '(self.session_dir)\n', (33620, 33638), False, 'import os\n'), ((33646, 33685), 'os.makedirs', 'os.makedirs', (['self.session_dir'], {'mode': '(509)'}), '(self.session_dir, mode=509)\n', (33657, 33685), False, 'import os\n'), ((34858, 34888), 'os.path.splitext', 'os.path.splitext', (['name_pattern'], {}), '(name_pattern)\n', (34874, 34888), False, 'import os\n'), ((35293, 35320), 're.search', 're.search', (['"""HDF5"""', 'datapath'], {}), "('HDF5', datapath)\n", (35302, 35320), False, 'import re\n'), ((35874, 35889), 'glob.glob', 'glob.glob', (['full'], {}), '(full)\n', (35883, 35889), False, 'import glob\n'), ((35918, 35949), 'support.text.select_files', 'support.text.select_files', (['full'], {}), '(full)\n', (35943, 35949), False, 'import support\n'), ((38060, 38081), 'math.log', 'math.log', (['num_chan', '(2)'], {}), '(num_chan, 2)\n', (38068, 38081), False, 'import math\n'), ((38304, 38329), 'math.log', 'math.log', (['num_chans_in', '(2)'], {}), '(num_chans_in, 2)\n', (38312, 38329), False, 'import math\n'), ((41563, 41586), 'numpy.array', 'NP.array', (['self.spectrum'], {}), '(self.spectrum)\n', (41571, 41586), True, 'import numpy as NP\n'), ((41624, 41643), 'numpy.log10', 'NP.log10', (['norm_spec'], {}), '(norm_spec)\n', (41632, 41643), True, 'import numpy as NP\n'), ((46232, 46261), 'os.chdir', 'os.chdir', (['local_dirs.hdf5_dir'], {}), '(local_dirs.hdf5_dir)\n', (46240, 46261), False, 'import os\n'), ((46550, 46561), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46559, 46561), False, 'import os\n'), ((17776, 17814), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['tm'], {}), '(tm)\n', (17810, 17814), False, 'import datetime\n'), ((21262, 21286), 'Math.clusters.find_clusters', 'VQ.find_clusters', (['coords'], {}), '(coords)\n', (21278, 21286), True, 'import Math.clusters as VQ\n'), ((35357, 35392), 're.search', 're.search', (['"""project_data"""', 'datapath'], {}), "('project_data', datapath)\n", (35366, 35392), False, 'import re\n'), ((39777, 39795), 'numpy.arange', 'NP.arange', (['n_chans'], {}), '(n_chans)\n', (39786, 39795), True, 'import numpy as NP\n'), ((45371, 45390), 'os.path.isdir', 'os.path.isdir', (['name'], {}), '(name)\n', (45384, 45390), False, 'import os\n'), ((46301, 46330), 'os.chdir', 'os.chdir', (['local_dirs.fits_dir'], {}), '(local_dirs.fits_dir)\n', (46309, 46330), False, 'import os\n'), ((17893, 17910), 'matplotlib.dates.date2num', 'MPLd.date2num', (['dt'], {}), '(dt)\n', (17906, 17910), True, 'import matplotlib.dates as MPLd\n'), ((24407, 24448), 'DatesTimes.day_of_year', 'DT.day_of_year', (['dt.year', 'dt.month', 'dt.day'], {}), '(dt.year, dt.month, dt.day)\n', (24421, 24448), True, 'import DatesTimes as DT\n'), ((35439, 35503), 'support.text.select_files', 'support.text.select_files', (["(datapath + name_pattern + '[0-9].pkl')"], {}), "(datapath + name_pattern + '[0-9].pkl')\n", (35464, 35503), False, 'import support\n'), ((35511, 35538), 're.search', 're.search', (['"""FITS"""', 'datapath'], {}), "('FITS', datapath)\n", (35520, 35538), False, 'import re\n'), ((36953, 36982), 'math.log', 'math.log', (['est_num_chan_out', '(2)'], {}), '(est_num_chan_out, 2)\n', (36961, 36982), False, 'import math\n'), ((28771, 28787), 'math.cos', 'math.cos', (['decrad'], {}), '(decrad)\n', (28779, 28787), False, 'import math\n'), ((35560, 35620), 'support.text.select_files', 'support.text.select_files', (["(datapath + name_pattern + '.fits')"], {}), "(datapath + name_pattern + '.fits')\n", (35585, 35620), False, 'import support\n'), ((45414, 45436), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (45430, 45436), False, 'import os\n'), ((46477, 46488), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46486, 46488), False, 'import os\n')] |
HughPaynter/PyGRB | PyGRB/__init__.py | 2eaf834cf3c62a639a056285ca9518456daa4b7c | """
PyGRB.
A GRB light-curve analysis package.
"""
__version__ = "0.0.5"
__author__ = 'James Paynter'
from . import backend
from . import fetch
from . import main
from . import postprocess
from . import preprocess
| [] |
john9384/PyblogRestAPI | src/config.py | f8cd42b6ffd5ccc3224d18f71cbea654f05023d0 | import os
from dotenv import load_dotenv
load_dotenv()
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI')
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('EMAIL_PASSWORD')
| [((41, 54), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (52, 54), False, 'from dotenv import load_dotenv\n'), ((88, 116), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (102, 116), False, 'import os\n'), ((147, 177), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URI"""'], {}), "('DATABASE_URI')\n", (161, 177), False, 'import os\n'), ((277, 309), 'os.environ.get', 'os.environ.get', (['"""EMAIL_USERNAME"""'], {}), "('EMAIL_USERNAME')\n", (291, 309), False, 'import os\n'), ((330, 362), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PASSWORD"""'], {}), "('EMAIL_PASSWORD')\n", (344, 362), False, 'import os\n')] |
Huda-Hakami/Context-Guided-Relation-Embeddings | Context_Guided_RelRep/train.py | 520ce89fe7bad3aba2f3eb112329300625bb55f7 | import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Compositional relation embeddings (G1) Hyperparameters
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.001 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# LSTM pattern encoding (G2) Hyperparameters
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=CGRE_Model.CGRE(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
# --------------------------------------------------
def Train_Model(self):
# Hyperparameters
epochs=500
hist_loss=[]
hist_acc=[]
winn_loss=1e7
win_acc=-1
# Discriminator Hyperparameters (for Rel-Rep-alignment model)
D_HL=0
D_Hdim=WR.dim
D_BN=False # boolean variable T/F for batch normalization on D
self.D_pkeep=1.0 # 1.0 means no Dropout applied during training on the Discriminator D
D_l2_reg=0.001 # L2 regularization coefficient (to perform l2 regularized cross-entropy)
Train = DS.Training_triplesIDs
Train_Relations=set([rel for (a,b,p,w,rel) in Train])
Num_of_Classes=len(Train_Relations)
print ("Number of relation labels for cross-entropy objective=",Num_of_Classes)
# Assign ids to relations
Rel2id={}
i=0
for rel in Train_Relations:
Rel2id[rel]=i
i+=1
Train_dic={}
for (a,b,p,w,rel) in Train:
Train_dic.setdefault((a,b,rel),[])
Train_dic[(a,b,rel)].append((p,w))
Training_patterns=set([p for (_,_,p,_,_) in Train])
print ('Number of training patterns after removing test instances=',len(Training_patterns))
Train_list=list(Train_dic.keys())
print ("Number of training word-pairs (a,b,[(p,w)])",len(Train_list))
self.RelModel.define_loss(D_HL,D_Hdim,D_BN,D_l2_reg,Num_of_Classes)
self.RelModel.optimize()
self.sess=tf.Session()
self.sess.run(tf.global_variables_initializer())
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train_list)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
hist_acc.append(acc_2)
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# break
# Training
for minibatch in next_batch(self.batchSize,Train_list):
a_ids,b_ids,labels=shred_tuples(minibatch)
Train_Y=np.zeros((len(minibatch),Num_of_Classes))
for i,rel in enumerate(labels):
rel_id=Rel2id[rel]
Train_Y[i,rel_id]=1.0
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,\
self.RelModel.is_training:True,self.RelModel.D_pkeep:self.D_pkeep}
minibatch_patterns=[Train_dic[(a,b,rel)] for (a,b,rel) in minibatch]
max_num_of_patterns,pattern_seq,early_stop,weights=Pattern_Sequences(a_ids,b_ids,minibatch_patterns)
train_data[self.RelModel.max_num_of_patterns]=max_num_of_patterns
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.weights]=weights
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
# Loss options
train_data[self.RelModel.Y_]=Train_Y
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
labels=[t[2] for t in tuples]
return a_ids,b_ids,labels
# -------------------------------------------------------
def Pattern_Sequences(a_ids,b_ids,minibatch_patterns):
max_num_of_patterns=np.max([len(L) for L in minibatch_patterns])
min_num_of_patterns=np.min([len(L) for L in minibatch_patterns])
# print ("Max num of patterns:",max_num_of_patterns)
# print ("Min num of patterns:",min_num_of_patterns)
pattern_seq=np.zeros((len(a_ids)*max_num_of_patterns,DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[0 for i in range(len(a_ids)*max_num_of_patterns)]
weights=[0.0 for i in range(len(a_ids)*max_num_of_patterns)]
for i in range(len(a_ids)):
set_of_patterns=minibatch_patterns[i]
for j in range(max_num_of_patterns):
if j<len(set_of_patterns):
pattern_id,w=set_of_patterns[j][0],set_of_patterns[j][1]
pattern=DS.id2Patterns[pattern_id]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop[(i*max_num_of_patterns)+j]=len(words)
weights[(i*max_num_of_patterns)+j]=w
for k,word in enumerate(words):
pattern_seq[(i*max_num_of_patterns)+j,k]=DS.word2id[word]
return max_num_of_patterns,pattern_seq,early_stop,weights
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
| [((7593, 7603), 'wordreps.WordReps', 'WordReps', ([], {}), '()\n', (7601, 7603), False, 'from wordreps import WordReps\n'), ((7714, 7730), 'numpy.zeros', 'np.zeros', (['WR.dim'], {}), '(WR.dim)\n', (7722, 7730), True, 'import numpy as np\n'), ((8127, 8199), 'dataset.DataSet', 'DataSet', (['corpus', 'Train_dataset', 'Test_dataset', 'labels_type', 'Reverse_pairs'], {}), '(corpus, Train_dataset, Test_dataset, labels_type, Reverse_pairs)\n', (8134, 8199), False, 'from dataset import DataSet\n'), ((828, 866), 'CGRE_Model.CGRE', 'CGRE_Model.CGRE', (['activ', 'self.batchSize'], {}), '(activ, self.batchSize)\n', (843, 866), False, 'import CGRE_Model\n'), ((2289, 2301), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2299, 2301), True, 'import tensorflow as tf\n'), ((4539, 4594), 'numpy.save', 'np.save', (['"""res/Pair_Embeddings.npy"""', 'Pair_Embeddings_dic'], {}), "('res/Pair_Embeddings.npy', Pair_Embeddings_dic)\n", (4546, 4594), True, 'import numpy as np\n'), ((5540, 5587), 'numpy.hstack', 'np.hstack', (['(Pair_Embeddings1, Pair_Embeddings2)'], {}), '((Pair_Embeddings1, Pair_Embeddings2))\n', (5549, 5587), True, 'import numpy as np\n'), ((2318, 2351), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2349, 2351), True, 'import tensorflow as tf\n'), ((2529, 2555), 'random.shuffle', 'random.shuffle', (['Train_list'], {}), '(Train_list)\n', (2543, 2555), False, 'import random\n'), ((7827, 7856), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'WR.dim'}), '(size=WR.dim)\n', (7843, 7856), True, 'import numpy as np\n'), ((7892, 7921), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'WR.dim'}), '(size=WR.dim)\n', (7908, 7921), True, 'import numpy as np\n'), ((2671, 2708), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""Test"""'], {}), "(Pair_Embeddings, 'Test')\n", (2683, 2708), False, 'from Eval import eval_SemEval\n'), ((2725, 2763), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""Valid"""'], {}), "(Pair_Embeddings, 'Valid')\n", (2737, 2763), False, 'from Eval import eval_SemEval\n'), ((2780, 2816), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""All"""'], {}), "(Pair_Embeddings, 'All')\n", (2792, 2816), False, 'from Eval import eval_SemEval\n')] |
HerculesJack/grtrans | synch_integrate.py | bc005307d81dac1bdb9520e776e7627126dd690a | from radtrans_integrate import radtrans_integrate
from polsynchemis import polsynchemis
import numpy as np
import scipy.integrate
# calculate synchrotron emissivity for given coefficients
def synch_jarho(nu,n,B,T,theta):
if ((np.isscalar(nu)==False) & (np.isscalar(n)==True)):
n = n + np.zeros(len(nu))
B = B + np.zeros(len(nu))
T = T + np.zeros(len(nu))
theta = theta + np.zeros(len(nu))
e = polsynchemis.polsynchth(nu,n,B,T,theta)
j = e[:,:4]; a = e[:,4:8]; rho = e[:,8:]
return j,a,rho
def run(x,jarr,aarr,rhoarr,sphstokes=-1,atol=1e-8,rtol=1e-6,max_tau=10):
if sphstokes==-1:
method=0
else:
method=3
radtrans_integrate.init_radtrans_integrate_data(method,4,len(x),len(x),max_tau,0.1,atol,rtol,1e-2,100000)
Karr = (np.append(aarr,rhoarr,axis=1))
tau = np.append(0.,scipy.integrate.cumtrapz(Karr[:,0],x))
radtrans_integrate.integrate(x[::-1],jarr[:,:],Karr[:,:],tau,4)
i = radtrans_integrate.intensity.copy()
radtrans_integrate.del_radtrans_integrate_data()
return i
| [((434, 477), 'polsynchemis.polsynchemis.polsynchth', 'polsynchemis.polsynchth', (['nu', 'n', 'B', 'T', 'theta'], {}), '(nu, n, B, T, theta)\n', (457, 477), False, 'from polsynchemis import polsynchemis\n'), ((800, 831), 'numpy.append', 'np.append', (['aarr', 'rhoarr'], {'axis': '(1)'}), '(aarr, rhoarr, axis=1)\n', (809, 831), True, 'import numpy as np\n'), ((897, 966), 'radtrans_integrate.radtrans_integrate.integrate', 'radtrans_integrate.integrate', (['x[::-1]', 'jarr[:, :]', 'Karr[:, :]', 'tau', '(4)'], {}), '(x[::-1], jarr[:, :], Karr[:, :], tau, 4)\n', (925, 966), False, 'from radtrans_integrate import radtrans_integrate\n'), ((969, 1004), 'radtrans_integrate.radtrans_integrate.intensity.copy', 'radtrans_integrate.intensity.copy', ([], {}), '()\n', (1002, 1004), False, 'from radtrans_integrate import radtrans_integrate\n'), ((1009, 1057), 'radtrans_integrate.radtrans_integrate.del_radtrans_integrate_data', 'radtrans_integrate.del_radtrans_integrate_data', ([], {}), '()\n', (1055, 1057), False, 'from radtrans_integrate import radtrans_integrate\n'), ((231, 246), 'numpy.isscalar', 'np.isscalar', (['nu'], {}), '(nu)\n', (242, 246), True, 'import numpy as np\n'), ((258, 272), 'numpy.isscalar', 'np.isscalar', (['n'], {}), '(n)\n', (269, 272), True, 'import numpy as np\n')] |
pjimmybrcd/campus_ztp_nps | actions/lib/Template_Parser.py | 2ab266b32fbcddcbdf9031138aabc40942914c3a | """
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta
class Template_Parser(object):
def __init__(self, configuration_template_file, variables={}):
''' Loads the configuration file '''
self.profile = ""
self.variables = variables
try:
with open(configuration_template_file, 'r') as f:
self.profile = "".join(line for line in f)
except:
raise IOError("Template file '%s' not found!", configuration_template_file)
def set_variables(self, variables):
''' Sets the variables '''
self.variables = variables
def get_required_variables(self):
''' Returns a set of the required variables in the template '''
return meta.find_undeclared_variables(Environment().parse(self.profile))
def get_parsed_lines(self):
''' Returns a set of lines with all variables filed in '''
try:
return Template(self.profile, undefined=StrictUndefined).render(self.variables)
except UndefinedError as e:
raise Exception(e)
| [((1376, 1389), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (1387, 1389), False, 'from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta\n'), ((1543, 1592), 'jinja2.Template', 'Template', (['self.profile'], {'undefined': 'StrictUndefined'}), '(self.profile, undefined=StrictUndefined)\n', (1551, 1592), False, 'from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta\n')] |
line-mind/lca_writer | lca_writer/data/loader.py | 0f356cf20285ba684826dfdd18b75d0f0ebea120 | import os
__all__ = ['DATA_FOLDER', 'load_data']
DATA_FOLDER = os.path.dirname(os.path.abspath(__file__))
def load_data(name):
"""
Loads an Excel form from the data folder with the specified name.
Parameters
----------
name : str
The name of the form without file extension.
"""
from ..lca_writer import LCAWriter # to prevent recursive import
p = os.path.join(DATA_FOLDER, name + '.xlsx')
return LCAWriter(p)
| [((82, 107), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (97, 107), False, 'import os\n'), ((395, 436), 'os.path.join', 'os.path.join', (['DATA_FOLDER', "(name + '.xlsx')"], {}), "(DATA_FOLDER, name + '.xlsx')\n", (407, 436), False, 'import os\n')] |
Dephilia/pipenv-docker-development | main.py | 3be5f63120638922fe98336b6ee5b3b0f6f182dc | var = "Docker"
print(f"Hello {var} world!")
| [] |
pndemo/yummy-recipes-api | app/v1/utils/mixins.py | ae6729bd1c886ce9872d83488a6eaa99e92be513 | """ Model mixin classes for auth, category and recipe modules """
from app import db
# pylint: disable=C0103
# pylint: disable=E1101
class BaseMixin(object):
""" Define the 'BaseModel' mapped to all database tables. """
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
def save(self):
"""Save to database table"""
db.session.add(self)
db.session.commit()
def delete(self):
"""Delete from database table"""
db.session.delete(self)
db.session.commit()
class TimestampMixin(object):
""" Database logging of data manipulation timestamps. """
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(), \
onupdate=db.func.current_timestamp())
| [((237, 296), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(db.Integer, primary_key=True, autoincrement=True)\n', (246, 296), False, 'from app import db\n'), ((363, 383), 'app.db.session.add', 'db.session.add', (['self'], {}), '(self)\n', (377, 383), False, 'from app import db\n'), ((392, 411), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (409, 411), False, 'from app import db\n'), ((484, 507), 'app.db.session.delete', 'db.session.delete', (['self'], {}), '(self)\n', (501, 507), False, 'from app import db\n'), ((516, 535), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (533, 535), False, 'from app import db\n'), ((680, 707), 'app.db.func.current_timestamp', 'db.func.current_timestamp', ([], {}), '()\n', (705, 707), False, 'from app import db\n'), ((760, 787), 'app.db.func.current_timestamp', 'db.func.current_timestamp', ([], {}), '()\n', (785, 787), False, 'from app import db\n'), ((812, 839), 'app.db.func.current_timestamp', 'db.func.current_timestamp', ([], {}), '()\n', (837, 839), False, 'from app import db\n')] |
JeroenvdSande/dash-sample-apps | apps/dash-port-analytics/app/ui/tab_map_controls.py | 106fa24693cfdaf47c06466a0aed78e642344f91 | import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
)
| [((1491, 1557), 'dash_html_components.Div', 'html.Div', ([], {'className': '"""tab-port-map-single-control-separator area-b"""'}), "(className='tab-port-map-single-control-separator area-b')\n", (1499, 1557), True, 'import dash_html_components as html\n'), ((2269, 2335), 'dash_html_components.Div', 'html.Div', ([], {'className': '"""tab-port-map-single-control-separator area-d"""'}), "(className='tab-port-map-single-control-separator area-d')\n", (2277, 2335), True, 'import dash_html_components as html\n'), ((1051, 1119), 'dash_html_components.Label', 'html.Label', ([], {'className': '"""control-label"""', 'children': '[strings.LABEL_PORT]'}), "(className='control-label', children=[strings.LABEL_PORT])\n", (1061, 1119), True, 'import dash_html_components as html\n'), ((1187, 1329), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""port-map-dropdown-port"""', 'clearable': '(False)', 'options': "[{'label': port, 'value': port} for port in port_arr]", 'value': 'port_val'}), "(id='port-map-dropdown-port', clearable=False, options=[{\n 'label': port, 'value': port} for port in port_arr], value=port_val)\n", (1199, 1329), True, 'import dash_core_components as dcc\n'), ((1702, 1772), 'dash_html_components.Label', 'html.Label', ([], {'className': '"""control-label"""', 'children': '[strings.LABEL_VESSEL]'}), "(className='control-label', children=[strings.LABEL_VESSEL])\n", (1712, 1772), True, 'import dash_html_components as html\n'), ((1840, 2029), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""port-map-dropdown-vessel-type"""', 'clearable': '(False)', 'options': "[{'label': vessel_type, 'value': vessel_type} for vessel_type in\n vessel_types_arr]", 'value': 'vessel_type_val'}), "(id='port-map-dropdown-vessel-type', clearable=False, options=[\n {'label': vessel_type, 'value': vessel_type} for vessel_type in\n vessel_types_arr], value=vessel_type_val)\n", (1852, 2029), True, 'import dash_core_components as dcc\n'), ((3241, 3313), 'dash_html_components.Div', 'html.Div', ([], {'className': '"""tab-port-map-single-control-separator smaller-line"""'}), "(className='tab-port-map-single-control-separator smaller-line')\n", (3249, 3313), True, 'import dash_html_components as html\n'), ((2643, 2711), 'dash_html_components.Label', 'html.Label', ([], {'className': '"""control-label"""', 'children': '[strings.LABEL_YEAR]'}), "(className='control-label', children=[strings.LABEL_YEAR])\n", (2653, 2711), True, 'import dash_html_components as html\n'), ((2803, 2945), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""port-map-dropdown-year"""', 'clearable': '(False)', 'options': "[{'label': year, 'value': year} for year in year_arr]", 'value': 'year_val'}), "(id='port-map-dropdown-year', clearable=False, options=[{\n 'label': year, 'value': year} for year in year_arr], value=year_val)\n", (2815, 2945), True, 'import dash_core_components as dcc\n'), ((3534, 3603), 'dash_html_components.Label', 'html.Label', ([], {'className': '"""control-label"""', 'children': '[strings.LABEL_MONTH]'}), "(className='control-label', children=[strings.LABEL_MONTH])\n", (3544, 3603), True, 'import dash_html_components as html\n'), ((3728, 3876), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""port-map-dropdown-month"""', 'clearable': '(False)', 'options': "[{'label': month, 'value': month} for month in month_arr]", 'value': 'month_val'}), "(id='port-map-dropdown-month', clearable=False, options=[{\n 'label': month, 'value': month} for month in month_arr], value=month_val)\n", (3740, 3876), True, 'import dash_core_components as dcc\n')] |
TFarla/subs2srs-cross-platform | subs2srs/gui/state.py | 79158a313ca4099adb20df97207b19d7bc948697 | from typing import List
from subs2srs.core.preview_item import PreviewItem
class StatePreview:
items: List[PreviewItem] = []
inactive_items = set()
def __init__(self):
super().__init__()
self.items = []
self.inactive_items = set()
self.audio = None
class State:
deck_name = None
sub1_file = "/Users/thomasfarla/Documents/subs2srs-cross-platform/tests/fixtures/in.srt"
sub2_file = None
video_file = "/Users/thomasfarla/Documents/subs2srs-cross-platform/tests/fixtures/in.mkv"
output_file = "/Users/thomasfarla/Documents/test-subs"
preview = StatePreview()
| [] |
nirav1997/sync_ends | sync_ends/main.py | 04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb | import sys
sys.path.append("..")
from src.sync_ends_service import SyncEnd
from src.parser import Parser
def main():
# get the arguments from commadn line
parser = Parser()
collection_name, api_key, trigger_interval, slack_channel, slack_token = parser.get_argumenets()
sync_end = SyncEnd(api_key, collection_name, trigger_interval, slack_channel, slack_token)
try:
sync_end.start()
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| [((12, 33), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (27, 33), False, 'import sys\n'), ((176, 184), 'src.parser.Parser', 'Parser', ([], {}), '()\n', (182, 184), False, 'from src.parser import Parser\n'), ((301, 380), 'src.sync_ends_service.SyncEnd', 'SyncEnd', (['api_key', 'collection_name', 'trigger_interval', 'slack_channel', 'slack_token'], {}), '(api_key, collection_name, trigger_interval, slack_channel, slack_token)\n', (308, 380), False, 'from src.sync_ends_service import SyncEnd\n')] |
0xflotus/graphql-compiler | graphql_compiler/compiler/workarounds/orientdb_query_execution.py | 0c892f5254d0cf3d03a68012080d0b736bc49913 | # Copyright 2018-present Kensho Technologies, LLC.
"""Workarounds for OrientDB scheduler issue that causes poor query planning for certain queries.
For purposes of query planning, the OrientDB query planner ignores "where:" clauses
that hit indexes but do not use the "=" operator. For example, "CONTAINS" can be used to check
that a field covered by an index is in a specified list of values, and can therefore be covered
by an index, but OrientDB will ignore this. When no equality ("=") checks on indexed columns
are present, OrientDB will generate a query plan that starts execution at the class with
lowest cardinality, which can lead to excessive numbers of scanned and discarded records.
Assuming the query planner creates a query plan where a location with CONTAINS is
the first in the execution order, the execution system will apply indexes
to speed up this operation. Therefore, it's sufficient to trick the query planner into
always creating such a query plan, even though it thinks indexes cannot be used in the query.
Valid query execution start points for the OrientDB query planner must satisfy the following:
- Must not be "optional: true".
- Must not have a "while:" clause nor follow a location that has one.
- Must have a "class:" defined. This class is used for cardinality estimation, and to
look for available indexes that may cover any "where:" clause that may be present.
The optimizations in this file improve performance by enabling execution start points according
to the following assumptions:
1. Start points with "where:" clauses that reference only local fields (i.e. not tagged values
from other query locations) are always better than start points without a "where:".
This is because the filter will have to be applied one way or the other, so we might as well
apply it early.
2. If no such start points are available, we'd like to make available as many start points
as possible, since we'd like OrientDB to start at the start point whose class has
the lowest possible cardinality.
The process of applying the optimizations is as follows:
- Exclude and ignore all query steps that are inside a fold, optional, or recursion scope,
or have a "where:" clause that references a non-local (i.e. tagged) field.
- Find all remaining query steps with "where:" clauses that reference only local fields.
- If any are found, we guide our actions from assumption 1 above:
- Ensure they have a defined "class:" -- i.e. the OrientDB scheduler will consider them
valid start points.
- Then, prune all other query steps (ones without such "where:" clauses) by removing their
"class:" clause, making them invalid as query start points for OrientDB's scheduler.
- If none are found, we guide our actions from assumption 2 above:
- Ensure that all query points not inside fold, optional, or recursion scope contain
a "class:" clause. That increases the number of available query start points,
so OrientDB can choose the start point of lowest cardinality.
"""
from ..blocks import CoerceType, QueryRoot, Recurse, Traverse
from ..expressions import ContextField, ContextFieldExistence
from ..helpers import get_only_element_from_collection
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
| [] |
ofalk/cleware-traffic-light | traffic_light/core.py | be319fec8e190811463ade8aabc37ca2b4f17e57 | from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
BM_REQUEST_TYPE = 0x21
B_REQUEST = 0x09
W_VALUE = 0x200
W_INDEX = 0x00
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.ctrl_transfer(BM_REQUEST_TYPE, B_REQUEST, W_VALUE, W_INDEX, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
| [((3429, 3474), 'functools.partial', 'functools.partial', (['self.set_led', 'color', 'state'], {}), '(self.set_led, color, state)\n', (3446, 3474), False, 'import functools\n'), ((1131, 1184), 'traffic_light.error.TrafficLightError', 'TrafficLightError', (['"""Cleware traffic light not found!"""'], {}), "('Cleware traffic light not found!')\n", (1148, 1184), False, 'from traffic_light.error import TrafficLightError, MultipleTrafficLightsError\n'), ((756, 912), 'traffic_light.error.MultipleTrafficLightsError', 'MultipleTrafficLightsError', (['"""No address is given and there are multiple devices conected! Use \'print_devices\' to see a list of connected devices."""'], {}), '(\n "No address is given and there are multiple devices conected! Use \'print_devices\' to see a list of connected devices."\n )\n', (782, 912), False, 'from traffic_light.error import TrafficLightError, MultipleTrafficLightsError\n')] |
dubiety/azure-sdk-for-python | sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py | 62ffa839f5d753594cf0fe63668f454a9d87a346 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_orchestration_app_luis_response_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query using an orchestration project.
In this sample, orchestration project's top intent will map to a LUIS project.
For more info about how to setup a CLU orchestration project, see the README.
USAGE:
python sample_analyze_orchestration_app_luis_response_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project.
4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project.
"""
import asyncio
async def sample_analyze_orchestration_app_luis_response_async():
# [START analyze_orchestration_app_luis_response]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
# get secrets
clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
clu_key = os.environ["AZURE_CONVERSATIONS_KEY"]
project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"]
deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"]
# analyze query
client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key))
async with client:
query = "Reserve a table for 2 at the Italian restaurant"
result = await client.analyze_conversation(
task={
"kind": "Conversation",
"analysisInput": {
"conversationItem": {
"participantId": "1",
"id": "1",
"modality": "text",
"language": "en",
"text": query
},
"isLoggingEnabled": False
},
"parameters": {
"projectName": project_name,
"deploymentName": deployment_name,
"verbose": True
}
}
)
# view result
print("query: {}".format(result["result"]["query"]))
print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"]))
# top intent
top_intent = result["result"]["prediction"]["topIntent"]
print("top intent: {}".format(top_intent))
top_intent_object = result["result"]["prediction"]["intents"][top_intent]
print("confidence score: {}".format(top_intent_object["confidenceScore"]))
print("project kind: {}".format(top_intent_object["targetProjectKind"]))
if top_intent_object["targetProjectKind"] == "Luis":
print("\nluis response:")
luis_response = top_intent_object["result"]["prediction"]
print("top intent: {}".format(luis_response["topIntent"]))
print("\nentities:")
for entity in luis_response["entities"]:
print("\n{}".format(entity))
# [END analyze_orchestration_app_luis_response]
async def main():
await sample_analyze_orchestration_app_luis_response_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | [((3586, 3610), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3608, 3610), False, 'import asyncio\n'), ((1731, 1758), 'azure.core.credentials.AzureKeyCredential', 'AzureKeyCredential', (['clu_key'], {}), '(clu_key)\n', (1749, 1758), False, 'from azure.core.credentials import AzureKeyCredential\n')] |
la-mar/sunstruck-api | src/sunstruck/schemas/__init__.py | 90074a55d3b243f7f0eee6e897a98699d2cebc43 | # flake8: noqa
from schemas.client_credentials import *
from schemas.message import *
from schemas.token import *
from schemas.user import *
| [] |
terziev-viktor/SolidityCourse | intro/deploy.py | 6f10852e94eec69438c5e577795d317694227337 | import json
from web3 import Web3
from solcx import compile_standard, install_solc
with open("./SimpleStorage.sol", "r") as file:
simple_storage_src = file.read()
# install solcx
install_solc("0.8.0")
# compile the source
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_src}},
"settings":
{
"outputSelection":
{
"*":
{
"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]
}
}
},
},
solc_version = "0.8.0"
)
with open("./out.json", "w") as file:
json.dump(compiled_sol, file)
# getting the bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"]["bytecode"]["object"]
# getting the abi
abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
# connecting to ganache
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:7545"))
chain_id = 1337
my_address = "0x02ECDdb09504C4d4B2ba2c7Ec80d77d44f6e631c"
private_key = "0xa9ddbecce894fdad11cd9864d9c58f794d23bd5f0d78d1c2eea204b284edfefc"
# Create the contract in python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# Get the latest test transaction
nonce = w3.eth.getTransactionCount(my_address)
# 1. Build a transaction
# 2. Sing the transaction
# 3. Send the transaction
transaction = SimpleStorage.constructor().buildTransaction({"gasPrice": w3.eth.gas_price, "chainId": chain_id, "from": my_address, "nonce": nonce})
signed_txn = w3.eth.account.sign_transaction(transaction, private_key)
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# confirm transaction is received
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
print("tx_hash=", tx_hash)
print("receipt=", tx_receipt)
# working on-chain
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
print(simple_storage.functions.retrieve().call())
store_transaction = simple_storage.functions.store(15).buildTransaction({
"gasPrice": w3.eth.gas_price,
"chainId": chain_id,
"from": my_address,
"nonce": nonce + 1
}
)
singed_store_transaction = w3.eth.account.sign_transaction(store_transaction, private_key)
store_transaction_hash = w3.eth.send_raw_transaction(singed_store_transaction.rawTransaction)
store_transaction_receipt = w3.eth.wait_for_transaction_receipt(store_transaction_hash)
| [((186, 207), 'solcx.install_solc', 'install_solc', (['"""0.8.0"""'], {}), "('0.8.0')\n", (198, 207), False, 'from solcx import compile_standard, install_solc\n'), ((245, 489), 'solcx.compile_standard', 'compile_standard', (["{'language': 'Solidity', 'sources': {'SimpleStorage.sol': {'content':\n simple_storage_src}}, 'settings': {'outputSelection': {'*': {'*': [\n 'abi', 'metadata', 'evm.bytecode', 'evm.sourceMap']}}}}"], {'solc_version': '"""0.8.0"""'}), "({'language': 'Solidity', 'sources': {'SimpleStorage.sol':\n {'content': simple_storage_src}}, 'settings': {'outputSelection': {'*':\n {'*': ['abi', 'metadata', 'evm.bytecode', 'evm.sourceMap']}}}},\n solc_version='0.8.0')\n", (261, 489), False, 'from solcx import compile_standard, install_solc\n'), ((694, 723), 'json.dump', 'json.dump', (['compiled_sol', 'file'], {}), '(compiled_sol, file)\n', (703, 723), False, 'import json\n'), ((983, 1025), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', (['"""HTTP://127.0.0.1:7545"""'], {}), "('HTTP://127.0.0.1:7545')\n", (1000, 1025), False, 'from web3 import Web3\n')] |
mgp25/noise | noise/extras/meta/protocol/protocol.py | 8560849fa4a1d6e938adde27d26572f4da16e422 | from noise.dh.dh import DH
from noise.cipher.cipher import Cipher
from noise.hash.hash import Hash
from noise.processing.handshakepatterns.handshakepattern import HandshakePattern
from noise.processing.impl.handshakestate import HandshakeState
from noise.processing.impl.symmetricstate import SymmetricState
from noise.processing.impl.cipherstate import CipherState
class NoiseProtocol(object):
def __init__(self, pattern, dh, cipher, hash):
"""
:param pattern:
:type pattern:
:param dh:
:type dh:
:param cipher:
:type cipher:
:param hash:
:type hash:
"""
self._pattern = pattern # type: HandshakePattern
self._dh = dh # type: DH
self._cipher = cipher # type: Cipher
self._hash = hash # type: Hash
self._oneway = len(HandshakePattern.parse_handshakepattern(pattern.name)[0]) == 1 # type: bool
@property
def oneway(self):
return self._oneway
@property
def pattern(self):
return self._pattern
@property
def dh(self):
return self._dh
@property
def cipher(self):
return self._cipher
@property
def hash(self):
return self._hash
def create_cipherstate(self, cipher=None):
"""
:param cipher:
:type cipher: Cipher
:return:
:rtype: CipherState
"""
return CipherState(cipher or self._cipher)
def create_symmetricstate(self, cipherstate=None, hash=None):
"""
:param cipherstate:
:type cipherstate: CipherState
:param hash:
:type hash: Hash
:return:
:rtype: SymmetricState
"""
return SymmetricState(cipherstate or self.create_cipherstate(), hash or self._hash)
def create_handshakestate(self, symmetricstate=None, dh=None):
"""
:param symmetricstate:
:type symmetricstate: SymmetricState
:param dh:
:type dh: DH
:return:
:rtype: HandshakeState
"""
return HandshakeState(symmetricstate or self.create_symmetricstate(), dh or self._dh)
| [((1424, 1459), 'noise.processing.impl.cipherstate.CipherState', 'CipherState', (['(cipher or self._cipher)'], {}), '(cipher or self._cipher)\n', (1435, 1459), False, 'from noise.processing.impl.cipherstate import CipherState\n'), ((848, 901), 'noise.processing.handshakepatterns.handshakepattern.HandshakePattern.parse_handshakepattern', 'HandshakePattern.parse_handshakepattern', (['pattern.name'], {}), '(pattern.name)\n', (887, 901), False, 'from noise.processing.handshakepatterns.handshakepattern import HandshakePattern\n')] |
cartazio/SublimeHaskell | info_popup.py | e6f12ea69de939d12212a6ec594bf0aae0603f6d | import urllib.parse
import webbrowser
import json
from xml.etree import ElementTree
import sublime
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.internals.unicode_opers as UnicodeOpers
import SublimeHaskell.symbols as symbols
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.parseoutput as ParseOutput
import SublimeHaskell.types as types
# Unused module variable:
# style_header = "<style>" \
# "a { text-decoration: underline; }" \
# ".type { color: red; }" \
# ".tyvar { color: blue; }" \
# ".operator { color: green; }" \
# ".comment { color: gray; font-style: italic; }" \
# ".docs { color: gray; }" \
# "</style>"
class Styles(object):
"""
Loads and holds cache of scheme styles
Also generates style header
"""
def __init__(self):
self.schemes = {}
CSS_CLASSES = {
'comment': 'comment',
'function': 'entity.name.function',
'type': 'entity.name.type',
'operator': 'keyword.operator',
'keyword': 'keyword.declaration',
'tyvar': 'variable.generic',
'error': 'sublimehaskell.mark.error',
'warning': 'sublimehaskell.mark.warning',
'hint': 'sublimehaskell.mark.hint'
}
def load_scheme(self, scheme_path):
if scheme_path not in self.schemes:
scheme_res = sublime.load_resource(scheme_path)
if scheme_res:
# Go through all styles and collect scope/foreground/fontStyle etc.
# Prefer ST3 'sublime-color-scheme' JSON over older TextMate XML.
self.schemes[scheme_path] = self.collect_sublime_scheme(json.loads(scheme_res)) \
if scheme_path.endswith('.sublime-color-scheme') \
else self.collect_textmate_scheme(ElementTree.fromstring(scheme_res))
return self.schemes.get(scheme_path, {})
def collect_textmate_scheme(self, scheme_tree):
scheme = {}
for style in scheme_tree.findall(".//dict[key='scope']"):
try:
cur_style = {}
cur_tag = None
for elem in style.iter():
if elem.tag == 'key':
cur_tag = elem.text # We are going to fill it next time
elif elem.tag == 'string' and cur_tag is not None:
cur_style[cur_tag] = elem.text
cur_tag = None
if 'scope' in cur_style:
scheme[cur_style['scope']] = cur_style
except ValueError:
pass
return scheme
def collect_sublime_scheme(self, scheme_dict):
scheme = {}
for rule in scheme_dict.get('rules', []):
scope = rule.get('scope', '')
if scope:
scheme[scope] = rule
return scheme
def gen_style(self, scheme_path):
scheme = self.load_scheme(scheme_path)
parts = []
parts.append("<style>")
parts.append("a { text-decoration: underline; }")
# generate CSS style for each class
for cls, scope in self.CSS_CLASSES.items():
# find scope or its parent in scheme
scope_parts = scope.split('.')
for css_scope in reversed(['.'.join(scope_parts[0:i+1]) for i in range(0, len(scope_parts))]):
if css_scope in scheme: # Found some scope, fill style class
style_parts = []
if 'foreground' in scheme[css_scope]:
style_parts.append("color: {0}".format(scheme[css_scope]['foreground']))
# Prefer ST3 'sublime-color-scheme' JSON attribute over the older TextMate-ish name
font_style = scheme[css_scope].get('font_style', scheme[css_scope].get('fontStyle', ''))
if font_style:
style_parts.append("font-style: {0}".format(font_style))
parts.append(".{0} {{ {1} }}".format(cls, "; ".join(style_parts)))
break
parts.append("</style>")
return "".join(parts)
class SublimeHaskellHoverPopup(object):
# HTML style formatting
STYLES = Styles()
def __init__(self, view, filename, point, hover_zone):
super().__init__()
self.view = view
self.filename = filename
self.point = point
self.hover_zone = hover_zone
self.line = view.rowcol(point)[0]
self.shown = False
def do_hover(self):
if self.hover_zone == sublime.HOVER_TEXT:
qsymbol = Common.get_qualified_symbol_at_point(self.view, self.point)
## print('hover: qualified symbol {0}'.format(qsymbol))
module_word = qsymbol.module
ident = qsymbol.name
if module_word is not None and ident is None:
# TODO: Any ideas for popup about module?
pass
elif ident is not None:
whois_name = qsymbol.qualified_name()
full_name = qsymbol.full_name()
# Try get type of hovered symbol
typed_expr = None
if types.SourceHaskellTypeCache().has(self.filename):
typed_expr = self.get_type(types.SourceHaskellTypeCache().get(self.filename), whois_name)
else:
project_name = Common.locate_cabal_project_from_view(self.view)[1]
point_rgn = sublime.Region(self.point, self.point)
typed_expr = self.get_type(types.get_type_view(self.view, project_name, point_rgn), whois_name)
# Try whois
suggest_import = False
decl = Utils.head_of(BackendManager.active_backend().whois(whois_name, self.filename))
if not decl:
suggest_import = True
decl = Utils.head_of(BackendManager.active_backend().lookup(full_name, self.filename))
self.create_symbol_popup(typed_expr, decl, suggest_import)
elif self.hover_zone == sublime.HOVER_GUTTER:
errs = [err for err in ParseOutput.MARKER_MANAGER.marks_for_view(self.view) if err.region.start.line == self.line]
if errs:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
for err in errs:
msg = UnicodeOpers.use_unicode_operators(symbols.escape_text(err.message))
# Decorate first word with style
decors = {
'Error': 'error',
'Warning': 'warning',
'Hint': 'hint'
}
for dec, dec_style in decors.items():
msg = msg.replace(dec, u'<span class="{0}">{1}</span>'.format(dec_style, dec))
popup_parts.append(u'<p>{0}</p>'.format(msg))
if err.correction is not None:
popup_parts.append(err.correction.popup())
popup_text = u''.join(popup_parts)
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
def create_symbol_popup(self, typed_expr, decl, suggest_import):
if typed_expr or decl:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
if typed_expr:
popup_parts.append(u'<p><span class="function">{0}</span>{1}</p>'.format(
typed_expr.substr(self.view),
symbols.format_type(UnicodeOpers.use_unicode_operators(' :: {0}'.format(typed_expr.typename)))))
if decl:
popup_msg = [u'<a href="import:{0}">Add import</a>'.format(urllib.parse.quote_plus(decl.name))] \
if suggest_import else []
popup_parts.append(decl.popup(popup_msg))
popup_text = u''.join(popup_parts)
if not self.shown:
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
else:
self.view.update_popup(popup_text)
def get_type(self, type_list, qual_name):
filt_types = [t for t in type_list
if t.substr(self.view) == qual_name and t.region(self.view).contains(self.point)]
return Utils.head_of(filt_types)
def on_navigate(self, url):
if self.view.is_popup_visible():
self.view.hide_popup()
if url[0:4] == 'http':
webbrowser.open(url)
elif url[0:8] == 'autofix:':
rgn = symbols.Region.from_str(url[8:])
ParseOutput.MARKER_MANAGER.apply_autocorrect(self.view, rgn)
elif url[0:7] == "import:":
decl_name = urllib.parse.unquote(url[7:])
self.view.run_command('sublime_haskell_insert_import_for_symbol',
{'filename': self.view.file_name(),
'decl': decl_name})
else:
self.view.window().open_file(url, sublime.ENCODED_POSITION | sublime.TRANSIENT)
def on_hide(self):
self.shown = False
| [((8691, 8716), 'SublimeHaskell.internals.utils.head_of', 'Utils.head_of', (['filt_types'], {}), '(filt_types)\n', (8704, 8716), True, 'import SublimeHaskell.internals.utils as Utils\n'), ((1436, 1470), 'sublime.load_resource', 'sublime.load_resource', (['scheme_path'], {}), '(scheme_path)\n', (1457, 1470), False, 'import sublime\n'), ((4675, 4734), 'SublimeHaskell.sublime_haskell_common.get_qualified_symbol_at_point', 'Common.get_qualified_symbol_at_point', (['self.view', 'self.point'], {}), '(self.view, self.point)\n', (4711, 4734), True, 'import SublimeHaskell.sublime_haskell_common as Common\n'), ((8878, 8898), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (8893, 8898), False, 'import webbrowser\n'), ((8962, 8994), 'SublimeHaskell.symbols.Region.from_str', 'symbols.Region.from_str', (['url[8:]'], {}), '(url[8:])\n', (8985, 8994), True, 'import SublimeHaskell.symbols as symbols\n'), ((9011, 9071), 'SublimeHaskell.parseoutput.MARKER_MANAGER.apply_autocorrect', 'ParseOutput.MARKER_MANAGER.apply_autocorrect', (['self.view', 'rgn'], {}), '(self.view, rgn)\n', (9055, 9071), True, 'import SublimeHaskell.parseoutput as ParseOutput\n'), ((1736, 1758), 'json.loads', 'json.loads', (['scheme_res'], {}), '(scheme_res)\n', (1746, 1758), False, 'import json\n'), ((1887, 1921), 'xml.etree.ElementTree.fromstring', 'ElementTree.fromstring', (['scheme_res'], {}), '(scheme_res)\n', (1909, 1921), False, 'from xml.etree import ElementTree\n'), ((5558, 5596), 'sublime.Region', 'sublime.Region', (['self.point', 'self.point'], {}), '(self.point, self.point)\n', (5572, 5596), False, 'import sublime\n'), ((6228, 6280), 'SublimeHaskell.parseoutput.MARKER_MANAGER.marks_for_view', 'ParseOutput.MARKER_MANAGER.marks_for_view', (['self.view'], {}), '(self.view)\n', (6269, 6280), True, 'import SublimeHaskell.parseoutput as ParseOutput\n'), ((5256, 5286), 'SublimeHaskell.types.SourceHaskellTypeCache', 'types.SourceHaskellTypeCache', ([], {}), '()\n', (5284, 5286), True, 'import SublimeHaskell.types as types\n'), ((5474, 5522), 'SublimeHaskell.sublime_haskell_common.locate_cabal_project_from_view', 'Common.locate_cabal_project_from_view', (['self.view'], {}), '(self.view)\n', (5511, 5522), True, 'import SublimeHaskell.sublime_haskell_common as Common\n'), ((5644, 5699), 'SublimeHaskell.types.get_type_view', 'types.get_type_view', (['self.view', 'project_name', 'point_rgn'], {}), '(self.view, project_name, point_rgn)\n', (5663, 5699), True, 'import SublimeHaskell.types as types\n'), ((6531, 6563), 'SublimeHaskell.symbols.escape_text', 'symbols.escape_text', (['err.message'], {}), '(err.message)\n', (6550, 6563), True, 'import SublimeHaskell.symbols as symbols\n'), ((5818, 5849), 'SublimeHaskell.internals.backend_mgr.active_backend', 'BackendManager.active_backend', ([], {}), '()\n', (5847, 5849), True, 'import SublimeHaskell.internals.backend_mgr as BackendManager\n'), ((5354, 5384), 'SublimeHaskell.types.SourceHaskellTypeCache', 'types.SourceHaskellTypeCache', ([], {}), '()\n', (5382, 5384), True, 'import SublimeHaskell.types as types\n'), ((5996, 6027), 'SublimeHaskell.internals.backend_mgr.active_backend', 'BackendManager.active_backend', ([], {}), '()\n', (6025, 6027), True, 'import SublimeHaskell.internals.backend_mgr as BackendManager\n')] |
artizanatweb/ghome-assistant | modules/google_home_lights.py | dba2bc58979ebae48afc71c356ae2d40b8830eee | #!/usr/bin/env python
# Copyright (C) 2017 Seeed Technology Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modules.pixel_ring import pixel_ring
import numpy
import time
import threading
try:
import queue as Queue
except ImportError:
import Queue as Queue
class GoogleHomeLights:
def __init__(self):
self.basis = numpy.array([0] * 4 * 12)
self.basis[0 * 4 + 0] = 2
self.basis[3 * 4 + 2] = 2
self.basis[6 * 4 + 1] = 1
self.basis[6 * 4 + 2] = 1
self.basis[9 * 4 + 1] = 2
self.pixels = self.basis * 0
self.write(self.pixels)
pixel_ring.write(0, [6, 0, 0, 0])
self.next = threading.Event()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def wakeup(self, direction=0):
def f():
self._wakeup(direction)
self.queue.put(f)
def listen(self):
self.next.set()
self.queue.put(self._listen)
def think(self):
self.next.set()
self.queue.put(self._think)
def speak(self):
self.next.set()
self.queue.put(self._speak)
def off(self):
self.next.set()
self.queue.put(self._off)
def _run(self):
while True:
func = self.queue.get()
func()
def _wakeup(self, direction=0):
position = int((direction + 15) / 30) % 12
basis = numpy.roll(self.basis, position * 4)
for i in range(1, 25):
pixels = basis * i
self.write(pixels)
time.sleep(0.005)
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.1)
for i in range(2):
new_pixels = numpy.roll(pixels, 4)
self.write(new_pixels * 0.5 + pixels)
pixels = new_pixels
time.sleep(0.1)
self.write(pixels)
self.pixels = pixels
def _listen(self):
pixels = self.pixels
for i in range(1, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
def _think(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.2)
t = 0.1
for i in range(0, 5):
pixels = numpy.roll(pixels, 4)
self.write(pixels * (4 - i) / 4)
time.sleep(t)
t /= 2
# time.sleep(0.5)
self.pixels = pixels
def _speak(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
for i in range(5, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
for i in range(24, 4, -1):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
self._off()
def _off(self):
self.write([0] * 4 * 12)
def write(self, data):
if type(data) is list:
pixel_ring.write(3, data)
else:
pixel_ring.write(3, data.astype('uint8').tostring())
lights = GoogleHomeLights()
if __name__ == '__main__':
while True:
try:
lights.wakeup()
time.sleep(3)
lights.think()
time.sleep(3)
lights.speak()
time.sleep(3)
lights.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixel_ring.off() | [((4110, 4126), 'modules.pixel_ring.pixel_ring.off', 'pixel_ring.off', ([], {}), '()\n', (4124, 4126), False, 'from modules.pixel_ring import pixel_ring\n'), ((849, 874), 'numpy.array', 'numpy.array', (['([0] * 4 * 12)'], {}), '([0] * 4 * 12)\n', (860, 874), False, 'import numpy\n'), ((1124, 1157), 'modules.pixel_ring.pixel_ring.write', 'pixel_ring.write', (['(0)', '[6, 0, 0, 0]'], {}), '(0, [6, 0, 0, 0])\n', (1140, 1157), False, 'from modules.pixel_ring import pixel_ring\n'), ((1179, 1196), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1194, 1196), False, 'import threading\n'), ((1218, 1231), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (1229, 1231), True, 'import Queue as Queue\n'), ((1254, 1288), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._run'}), '(target=self._run)\n', (1270, 1288), False, 'import threading\n'), ((1994, 2030), 'numpy.roll', 'numpy.roll', (['self.basis', '(position * 4)'], {}), '(self.basis, position * 4)\n', (2004, 2030), False, 'import numpy\n'), ((2173, 2194), 'numpy.roll', 'numpy.roll', (['pixels', '(4)'], {}), '(pixels, 4)\n', (2183, 2194), False, 'import numpy\n'), ((2230, 2245), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2240, 2245), False, 'import time\n'), ((2136, 2153), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (2146, 2153), False, 'import time\n'), ((2299, 2320), 'numpy.roll', 'numpy.roll', (['pixels', '(4)'], {}), '(pixels, 4)\n', (2309, 2320), False, 'import numpy\n'), ((2415, 2430), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2425, 2430), False, 'import time\n'), ((2624, 2640), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2634, 2640), False, 'import time\n'), ((2779, 2800), 'numpy.roll', 'numpy.roll', (['pixels', '(4)'], {}), '(pixels, 4)\n', (2789, 2800), False, 'import numpy\n'), ((2844, 2859), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2854, 2859), False, 'import time\n'), ((2928, 2949), 'numpy.roll', 'numpy.roll', (['pixels', '(4)'], {}), '(pixels, 4)\n', (2938, 2949), False, 'import numpy\n'), ((3007, 3020), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (3017, 3020), False, 'import time\n'), ((3339, 3354), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (3349, 3354), False, 'import time\n'), ((3485, 3500), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (3495, 3500), False, 'import time\n'), ((3647, 3672), 'modules.pixel_ring.pixel_ring.write', 'pixel_ring.write', (['(3)', 'data'], {}), '(3, data)\n', (3663, 3672), False, 'from modules.pixel_ring import pixel_ring\n'), ((3881, 3894), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3891, 3894), False, 'import time\n'), ((3934, 3947), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3944, 3947), False, 'import time\n'), ((3987, 4000), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3997, 4000), False, 'import time\n'), ((4038, 4051), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4048, 4051), False, 'import time\n'), ((3309, 3325), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3319, 3325), False, 'import time\n'), ((3455, 3471), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3465, 3471), False, 'import time\n')] |
Subsets and Splits