content
stringlengths 5
1.05M
|
---|
from __future__ import division
from physicsTable import *
from physicsTable.models import PointSimulation
from get_flood_length import Flooder
from view_trial_parsing import view_trial_parsing
import glob, os, sys
MAX_WALLS_DEL = 5
MIN_WALLS_DEL = 1
def get_noncontained_walls(tr, enforce_goal_switch=False,
min_walls_del=MIN_WALLS_DEL, max_walls_del=MAX_WALLS_DEL):
orig_goal = get_goal(tr)
orig_rev_goal = get_goal(tr, reverse_dir=True)
walls = list(tr.normwalls)
new_walls = None
num_walls_del = min_walls_del
min_flood_dist = 9999999 # big integer
while new_walls is None and num_walls_del <= min_walls_del:
for i in range(len(tr.normwalls) - num_walls_del + 1):
for _ in range(num_walls_del):
tr.normwalls.remove(tr.normwalls[i])
fl = Flooder(tr, useBallSize=True)
fl.run()
# assumes gsteps is steps to the outside goal
if fl.gsteps != -1 and fl.gsteps < min_flood_dist:
new_goal = get_goal(tr)
new_rev_goal = get_goal(tr, reverse_dir=True)
if not enforce_goal_switch or (orig_goal != new_goal
and orig_rev_goal != new_rev_goal):
min_flood_dist = fl.gsteps
new_walls = tr.normwalls
tr.normwalls = list(walls)
num_walls_del += 1
if new_walls is not None:
print 'Removed ' + str(num_walls_del - 1) + ' walls.'
return new_walls
def get_noncontained_walls_same_path(tr):
orig_path = get_path(tr)
orig_rev_path = get_path(tr, reverse_dir=True)
walls = list(tr.normwalls)
new_walls = None
min_flood_dist = 9999999 # big integer
for i in range(len(tr.normwalls)):
tr.normwalls.remove(tr.normwalls[i])
fl = Flooder(tr, useBallSize=True)
fl.run()
# assumes gsteps is steps to the outside goal
if fl.gsteps != -1 and fl.gsteps < min_flood_dist:
new_path = get_path(tr)
new_rev_path = get_path(tr, reverse_dir=True)
if orig_path == new_path and orig_rev_path == new_rev_path:
min_flood_dist = fl.gsteps
new_walls = tr.normwalls
tr.normwalls = list(walls)
return new_walls
def get_path(tr, reverse_dir=False):
tb = tr.makeTable()
if reverse_dir:
bvel = tr.ball[1]
tb.balls.setvel([-bvel[0], -bvel[1]])
return tb.simulate(return_path=True)[1]
def get_goal(tr, reverse_dir=False):
tb = tr.makeTable()
if reverse_dir:
bvel = tr.ball[1]
tb.balls.setvel([-bvel[0], -bvel[1]])
return tb.simulate()
if __name__ == '__main__':
assert len(sys.argv) == 2, "Need trial or directory"
if sys.argv[1][-4:] == 'json':
tr = loadTrialFromJSON(sys.argv[1])
tr.normwalls = get_noncontained_walls(tr)
if tr.normwalls is None:
print 'No wall can be removed.'
else:
view_trial_parsing(tr, view_parsing=False)
else:
print 'Reading trials in directory ' + sys.argv[1]
for tr_path in glob.iglob(sys.argv[1] + '/*.json'):
if tr_path[-15:] == 'trial_list.json':
continue
print 'Processing ' + tr_path
tr = loadTrialFromJSON(tr_path)
tr.normwalls = get_noncontained_walls(tr)
if tr.normwalls is None:
print 'No wall can be removed.'
continue
tr.jsonify(tr_path[:-5] + '_noncont.json', askoverwrite=False)
|
import logging
logger = logging.getLogger(__name__)
from nymms.schemas import Result, types
from nymms.daemon import NymmsDaemon
from nymms.resources import Monitor
from nymms.utils import commands
from nymms.config.yaml_config import load_config, EmptyConfig
import arrow
TIMEOUT_OUTPUT = "Command timed out after %d seconds."
class Probe(NymmsDaemon):
state_manager = None
def get_private_context(self, private_context_file):
if not private_context_file:
return None
try:
return load_config(private_context_file)[1]
except (IOError, EmptyConfig):
logger.exception("Unable to open private context file: %s",
private_context_file)
return None
# TODO: This calls on _state_manager but setting up of the _state_manager
# needs to be handled in the subclass. Not sure how I should handle
# this, but I really like the idea of these being base class
# methods since in reality all reactors should have some sort of
# state backend, even if its a no-op
def get_state(self, task_id):
return self.state_manager.get_state(task_id)
def get_task(self, **kwargs):
raise NotImplementedError
def resubmit_task(self, task, delay, **kwargs):
raise NotImplementedError
def submit_result(self, result, **kwargs):
raise NotImplementedError
def delete_task(self, task):
raise NotImplementedError
def execute_task(self, task, timeout, **kwargs):
log_prefix = "%s - " % (task.id,)
monitor = Monitor.registry[task.context['monitor']['name']]
command = monitor.command.command_string
current_attempt = int(task.attempt) + 1
logger.debug(log_prefix + "attempt %d, executing: %s", current_attempt,
command)
result = Result({'id': task.id,
'timestamp': task.created,
'task_context': task.context})
try:
output = monitor.execute(task.context, timeout,
self._private_context)
result.output = output
result.state = types.STATE_OK
except commands.CommandException as e:
if isinstance(e, commands.CommandFailure):
result.state = e.return_code
result.output = e.output
if isinstance(e, commands.CommandTimeout):
result.state = types.STATE_UNKNOWN
result.output = (TIMEOUT_OUTPUT % timeout)
except Exception as e:
result.state = types.STATE_UNKNOWN
result.output = str(e)
result.state_type = types.STATE_TYPE_HARD
result.validate()
return result
def expire_task(self, task, task_expiration):
if task_expiration:
now = arrow.get()
task_lifetime = now.timestamp - task.created.timestamp
if task_lifetime > task_expiration:
logger.debug("Task %s is older than expiration limit %d. "
"Skipping.", task.id, task_expiration)
return True
return False
return False
def handle_task(self, task, **kwargs):
log_prefix = "%s - " % (task.id,)
task_expiration = kwargs.get('task_expiration', None)
if self.expire_task(task, task_expiration):
return None
# Used to add the command context to the task
monitor = Monitor.registry[task.context['monitor']['name']]
command = monitor.command
task.context = command.build_context(task.context)
previous_state = self.get_state(task.id)
# check if the timeout is defined on the task first, if not then
# go with what was passed into handle_task via run
timeout = task.context.get('monitor_timeout',
kwargs.get('monitor_timeout'))
max_retries = task.context.get('max_retries',
kwargs.get('max_retries'))
last_attempt = int(task.attempt)
current_attempt = last_attempt + 1
result = self.execute_task(task, timeout, **kwargs)
# Trying to emulate this:
# http://nagios.sourceforge.net/docs/3_0/statetypes.html
if result.state == types.STATE_OK:
if (previous_state and not
previous_state.state == types.STATE_OK and
previous_state.state_type == types.STATE_TYPE_SOFT):
result.state_type = types.STATE_TYPE_SOFT
else:
logger.debug(log_prefix + "current_attempt: %d, max_retries: %d",
current_attempt, max_retries)
if current_attempt <= max_retries:
# XXX Hate this logic - hope to find a cleaner way to handle
# it someday.
if (not previous_state or
previous_state.state_type == types.STATE_TYPE_SOFT or
previous_state.state == types.STATE_OK):
result.state_type = types.STATE_TYPE_SOFT
delay = task.context.get('retry_delay',
kwargs.get('retry_delay'))
delay = max(delay, 0)
logger.debug('Resubmitting task with %ds delay.', delay)
self.resubmit_task(task, delay, **kwargs)
else:
logger.debug("Retry limit hit, not resubmitting.")
result.validate()
return result
def run(self, **kwargs):
""" This will run in a tight loop. It is expected that the subclass's
get_task() method will introduce a delay if the results queue is
empty.
"""
private_context_file = kwargs.get('private_context_file', None)
self._private_context = self.get_private_context(private_context_file)
while True:
task = self.get_task(**kwargs)
if not task:
logger.debug("Task queue is empty.")
continue
result = self.handle_task(task, **kwargs)
if result:
self.submit_result(result, **kwargs)
self.delete_task(task)
|
# Modified: 2021-08-30
# Description: Implements a controller for /player
#
from fastapi import APIRouter, Path, HTTPException, status, Query
from models import player_model, game_model
from .responses import CustomJSONResponse as JSONResponse
from . import ID_REGEX, PLAYER_ID_DESC, SKIP_DESC, LIMIT_DESC
router = APIRouter()
@router.post("/", response_description="Create a new player", response_model=player_model.PlayerModel)
async def create_player(player_input: player_model.PlayerInput) -> JSONResponse:
"""Handles /player create requests."""
# ensure auth0_id is unique
if await player_model.find_by_auth0_id(player_input.auth0_id) is not None:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"User with auth0_id={player_input.auth0_id} already exists"
)
if (created := await player_model.create(player_input)) is None:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Internal server error")
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created)
@router.get("/{player_id}", response_description="Get a player", response_model=player_model.PlayerModel)
async def retrieve_player(
player_id: str = Path(..., regex=ID_REGEX, description=PLAYER_ID_DESC),
) -> JSONResponse:
"""Handles /player/{player_id} retrieve requests."""
if (retrieved := await player_model.find_by_id(player_id)) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User with id={player_id} not found")
return JSONResponse(status_code=status.HTTP_200_OK, content=retrieved)
@router.get(
"/{player_id}/games",
response_description="Get all of a player's games (both current and completed)",
response_model=game_model.GameModelArray
)
async def retrieve_player_games(
player_id: str = Path(..., regex=ID_REGEX, description=PLAYER_ID_DESC),
skip: int = Query(..., ge=0, description=SKIP_DESC),
limit: int = Query(..., gt=0, le=100, description=LIMIT_DESC),
) -> JSONResponse:
"""Handles /player/{player_id}/games retrieve requests."""
# find_by_player_id should always return an array (barring database connection issues); if the array is empty, we
# need to check whether the player exists so that we can return a specific error (otherwise, the empty array)
if (retrieved := await game_model.find_by_player_id(player_id, skip, limit)) is None:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Internal server error")
if not retrieved and await(player_model.find_by_id(player_id)) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User with id={player_id} not found")
return JSONResponse(status_code=status.HTTP_200_OK, content=retrieved)
@router.get(
"/{player_id}/games/current",
response_description="Get a player's current games",
response_model=game_model.GameModelArray
)
async def retrieve_player_games_current(
player_id: str = Path(..., regex=ID_REGEX, description=PLAYER_ID_DESC),
skip: int = Query(..., ge=0, description=SKIP_DESC),
limit: int = Query(..., ge=1, le=100, description=LIMIT_DESC),
) -> JSONResponse:
"""Handles /player/{player_id}/games/current retrieve requests."""
if (retrieved := await game_model.find_by_player_id(player_id, skip, limit, {"completed": False})) is None:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Internal server error")
if not retrieved and await(player_model.find_by_id(player_id)) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User with id={player_id} not found")
return JSONResponse(status_code=status.HTTP_200_OK, content=retrieved)
@router.get(
"/{player_id}/games/completed",
response_description="Get a player's completed games",
response_model=game_model.GameModelArray
)
async def retrieve_player_games_completed(
player_id: str = Path(..., regex=ID_REGEX, description=PLAYER_ID_DESC),
skip: int = Query(..., ge=0, description=SKIP_DESC),
limit: int = Query(..., ge=1, le=100, description=LIMIT_DESC),
) -> JSONResponse:
"""Handles /player/{player_id}/games/current retrieve requests."""
if (retrieved := await game_model.find_by_player_id(player_id, skip, limit, {"completed": True})) is None:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Internal server error")
if not retrieved and await(player_model.find_by_id(player_id)) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User with id={player_id} not found")
return JSONResponse(status_code=status.HTTP_200_OK, content=retrieved)
|
import json
AJAX_URL = '/ajax_callback'
AJAX_FUNC_URL = '/ajax_func_callback'
js_manager = None
js_ajax = None
live_methods = {}
class RE:
def __init__(self, re):
self.re = re
def re(pattern):
return RE(pattern)
def js_procedure(func_id, ajax_args=''):
return '''
Ext.Ajax.request(
{
url: '%s',
method: 'GET',
params: {
fn: %d%s
},
success: function () { eval(arguments[0].responseText); }
}
);
''' % (
AJAX_URL,
func_id,
ajax_args
)
def js_function(func_id, ajax_args=''):
return '''
var response = Ext.Ajax.request(
{
url: '%s',
method: 'GET',
async: false,
params: {
fn: %d%s
},
}
);
var ajax_result = JSON.parse(response.responseText);
return ajax_result.data;
''' % (
AJAX_FUNC_URL,
func_id,
ajax_args
)
def list2extjs(l):
return '[ %s ]' % ', '.join([encode(v) for v in l])
def dict2extjs(d):
return '{ %s }' % ', '.join('%s: %s' % (k, encode(v)) for k,v in d.items())
class JsProcedure:
def __init__(self, *a, **kw):
self.func = a[0]
self.params = kw.get('params', {})
self.args = kw.get('args', [])
@property
def js(self):
return 'function (%s) { %s }' % (', '.join(self.args), js_ajax(self, arg_dict=self.params, f_type=js_procedure))
class JsFunction(JsProcedure):
@property
def js(self):
return 'function (%s) { %s }' % (', '.join(self.args), js_ajax(self, arg_dict=self.params, f_type=js_function))
class JsStrFunction:
def __init__(self, *a, **kw):
self.code = a[0]
self.args = kw.get('args', [])
@property
def js(self):
return 'function (%s) { %s }' % (', '.join(self.args), self.code)
function = JsFunction
procedure = JsProcedure
strfunction = JsStrFunction
def encode(o):
if isinstance(o, JsNode):
return str(o)
if isinstance(o, RE):
return o.re
elif isinstance(o, (list, tuple)):
return list2extjs(o)
elif isinstance(o, bool):
return str(o).lower()
elif isinstance(o, int):
return str(o)
elif isinstance(o, float):
return str(o)
elif isinstance(o, str):
return '\'%s\'' % o
elif isinstance(o, (function, procedure, strfunction)):
return o.js
elif isinstance(o, dict):
return dict2extjs(o)
elif isinstance(o, JsObject):
return o._id
else:
return str(o)
class JsBlock:
def __init__(self, *args, **kwargs):
self.code = args[0]
self.args = args[1] if len(args) > 1 else []
def __str__(self):
return self.code
def write(code):
if js_manager:
js_manager.write(str(code))
def __lshift__(self, value):
print('test')
class JsManager(object):
def __init__(self):
self.output = []
def write(self, data):
self.output.append(data)
def __str__(self):
output = self.output[:]
s = '\n'.join(output)
return s
class JsNode(object):
def __init__(self, name='', parent=None):
if parent and parent.name:
self.name = parent.name + '.' + name
else:
self.name = name
def __getattr__(self, attr):
return JsNode(attr, self)
def __setattr__(self, attr, value):
if attr == 'name':
super(JsNode, self).__setattr__(attr, value)
else:
value = encode(value)
if self is client.var:
s = 'var %s = %s' % (attr, value)
else:
name = self.name + '.' if self.name else ''
s = '%s%s = %s' % (name, attr, value)
write(s)
def __add__(self, other):
return JsNode('%s + %s' % (encode(self), encode(other)))
def __sub__(self, other):
return JsNode('%s - %s' % (encode(self), encode(other)))
def __mul__(self, other):
return JsNode('%s * %s' % (encode(self), encode(other)))
def __truediv__(self, other):
return JsNode('%s / %s' % (encode(self), encode(other)))
def __call__(self, *args, **kwargs):
l = []
d = []
for arg in args:
l.append(encode(arg))
for k, v in kwargs.items():
d.append('%s=%s' % (k, encode(v)))
_args = []
if l:
_args.extend(l)
if d:
_args.extend(d)
s = '%s(%s)' % (self.name, ','.join(_args))
self.name = s
return self
def __str__(self):
return self.name
class JsClient(JsNode):
def __init__(self, name='', parent=None):
if parent and parent.name:
self.name = parent.name + '.' + name
else:
self.name = name
self.__dict__['var'] = JsNode('var')
def __lshift__(self, other):
write(other)
class JsObjectNode(JsNode):
def __call__(self, *args, **kwargs):
super(JsObjectNode, self).__call__(*args, **kwargs)
write(str(self))
class JsOutput(object):
def __init__(self, manager=True):
self.body = []
if manager:
js_manager = self
def __lshift__(self, other):
self.write(other)
def write(self, code):
self.body.append(code)
def __str__(self):
s = ';\n'.join(self.body)
return s
out = output = JsOutput
class JsObject(object):
def __init__(self, *args, **kwargs):
self._loading = True
self._id = 'js_%s' % id(self)
self._create()
self._js = kwargs
self._loading = False
def _create(self):
pass
def _update(self, config):
self._js.update(config)
def __getattr__(self, attr):
if not self.__dict__.get('_loading', True):
if attr in self._js:
return self._js.get(attr)
else:
return JsObjectNode(attr, JsNode(self._id))
def __setattr__(self, attr, value):
if '_js' in self.__dict__ and not attr in self.__dict__:
self[attr] = value
else:
super(JsObject, self).__setattr__(attr, value)
def __setitem__(self, attr, value):
if not self._loading:
write('%s.%s = %s' % (self._id, attr, json.dumps(value)))
self._js[attr] = value
def alert(msg):
write(client.alert(msg))
def load(filename, klass=JsObject):
return klass(**json.load(open(filename)))
cli = client = JsClient()
if __name__ == '__main__':
class MyManager(JsManager):
def write(self, code):
print(code)
js_manager = MyManager()
write(client.console.log('test'))
n = JsNode('console')
write(n.print(n.log({'id': 'item id'})))
client.var.x = 1
client.x.y = client.window.open('http://www.google.com')
client << client.x.y()([client.x])
client << client.Ext.create('window', {'left': 10})
client << client.x
# test block
print(encode({'click': 'call'}))
|
# Autogenerated file.
from .client import TvocClient # type: ignore
|
#!/usr/local/bin/python
import cgi
import cgitb; cgitb.enable()
import os, sys
try: import msvcrt # are we on Windows?
except ImportError: pass # nope, no problem
else: # yep, need to set I/O to binary mode
for fd in (0, 1): msvcrt.setmode(fd, os.O_BINARY)
UPLOAD_DIR = "/tmp"
HTML_TEMPLATE = \
"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><title>Upload Files</title>
</head><body><h1>Upload Files</h1>
<form action="%(SCRIPT_NAME)s" method="POST" enctype="multipart/form-data">
File name: <input name="file_1" type="file"><br>
File name: <input name="file_2" type="file"><br>
File name: <input name="file_3" type="file"><br>
<input name="submit" type="submit">
</form> </body> </html>"""
def print_html_form():
""" print the form to stdout, with action set to this very script (a
'self-posting form': script both displays AND processes the form). """
print "content-type: text/html; charset=iso-8859-1\n"
print HTML_TEMPLATE % {'SCRIPT_NAME': os.environ['SCRIPT_NAME']}
def save_uploaded_file(form_field, upload_dir):
""" Save to disk a file just uploaded, form_field being the name of the
file input field on the form. No-op if field or file is missing. """
form = cgi.FieldStorage()
if not form.has_key(form_field): return
fileitem = form[form_field]
if not fileitem.file: return
fout = open(os.path.join(upload_dir, fileitem.filename), 'wb')
while True:
chunk = fileitem.file.read(100000)
if not chunk: break
fout.write(chunk)
fout.close()
save_uploaded_file("file_1", UPLOAD_DIR)
save_uploaded_file("file_2", UPLOAD_DIR)
save_uploaded_file("file_3", UPLOAD_DIR)
print_html_form()
|
from datetime import date
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.decorators import decorator_from_middleware_with_args
from mock import Mock, patch
from regulations import url_caches
class UrlCachesTests(TestCase):
@patch('regulations.url_caches.date')
def test_daily_cache(self, patched_date):
"""Cache should be consistent within a day but not between days"""
fn = Mock(return_value=HttpResponse('response'))
request = RequestFactory().get('a-path')
mock_caches = {
'example': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
},
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
},
}
with self.settings(CACHES=mock_caches):
daily_cache = decorator_from_middleware_with_args(
url_caches.DailyCacheMiddleware)(cache_alias='example')
wrapped_fn = daily_cache(fn)
patched_date.today.return_value = date(2010, 10, 10)
self.assertEqual(fn.call_count, 0)
wrapped_fn(request)
self.assertEqual(fn.call_count, 1)
wrapped_fn(request)
self.assertEqual(fn.call_count, 1)
patched_date.today.return_value = date(2010, 10, 11)
wrapped_fn(request)
self.assertEqual(fn.call_count, 2)
wrapped_fn(request)
self.assertEqual(fn.call_count, 2)
|
# Copyright The IETF Trust 2007-2019, All Rights Reserved
from django import forms
from ietf.doc.models import Document
from ietf.meeting.models import Session
from ietf.meeting.utils import add_event_info_to_session_qs
# ---------------------------------------------
# Globals
# ---------------------------------------------
VALID_SLIDE_EXTENSIONS = ('.doc','.docx','.pdf','.ppt','.pptx','.txt','.zip')
VALID_MINUTES_EXTENSIONS = ('.txt','.html','.htm','.pdf')
VALID_AGENDA_EXTENSIONS = ('.txt','.html','.htm')
VALID_BLUESHEET_EXTENSIONS = ('.pdf','.jpg','.jpeg')
#----------------------------------------------------------
# Forms
#----------------------------------------------------------
class RecordingForm(forms.Form):
external_url = forms.URLField(label='Url')
session = forms.ModelChoiceField(queryset=Session.objects,empty_label='')
def __init__(self, *args, **kwargs):
self.meeting = kwargs.pop('meeting')
super(RecordingForm, self).__init__(*args,**kwargs)
self.fields['session'].queryset = add_event_info_to_session_qs(
Session.objects.filter(meeting=self.meeting, type__in=['regular','plenary','other'])
).filter(current_status='sched').order_by('group__acronym')
class RecordingEditForm(forms.ModelForm):
class Meta:
model = Document
fields = ['external_url']
def __init__(self, *args, **kwargs):
super(RecordingEditForm, self).__init__(*args, **kwargs)
self.fields['external_url'].label='Url'
|
# -*- coding: utf-8 -*-
"""
mslib.conftest
~~~~~~~~~~~~~~
common definitions for py.test
This file is part of mss.
:copyright: Copyright 2016-2017 Reimar Bauer
:copyright: Copyright 2016-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import importlib.machinery
import os
import sys
# Disable pyc files
sys.dont_write_bytecode = True
import pytest
import fs
from mslib.mswms.demodata import DataFiles
import mslib._tests.constants as constants
def pytest_addoption(parser):
parser.addoption("--mss_settings", action="store")
def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.mss_settings
if option_value is not None:
mss_settings_file_fs = fs.open_fs(constants.MSS_CONFIG_PATH)
mss_settings_file_fs.writetext("mss_settings.json", option_value)
mss_settings_file_fs.close()
if os.getenv("TESTS_VISIBLE") == "TRUE":
Display = None
else:
try:
from pyvirtualdisplay import Display
except ImportError:
Display = None
if not constants.SERVER_CONFIG_FS.exists(constants.SERVER_CONFIG_FILE):
print('\n configure testdata')
# ToDo check pytest tmpdir_factory
examples = DataFiles(data_fs=constants.DATA_FS,
server_config_fs=constants.SERVER_CONFIG_FS)
examples.create_server_config(detailed_information=True)
examples.create_data()
if not constants.SERVER_CONFIG_FS.exists(constants.MSCOLAB_CONFIG_FILE):
config_string = f'''# -*- coding: utf-8 -*-
"""
mslib.mscolab.conf.py.example
~~~~~~~~~~~~~~~~~~~~
config for mscolab.
This file is part of mss.
:copyright: Copyright 2019 Shivashis Padhi
:copyright: Copyright 2019-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class mscolab_settings(object):
# SQLALCHEMY_DB_URI = 'mysql://user:[email protected]/mscolab'
import os
import logging
import fs
import secrets
from werkzeug.urls import url_join
ROOT_DIR = '{constants.ROOT_DIR}'
# directory where mss output files are stored
root_fs = fs.open_fs(ROOT_DIR)
if not root_fs.exists('colabTestData'):
root_fs.makedir('colabTestData')
BASE_DIR = ROOT_DIR
DATA_DIR = fs.path.join(ROOT_DIR, 'colabTestData')
# mscolab data directory
MSCOLAB_DATA_DIR = fs.path.join(DATA_DIR, 'filedata')
# used to generate and parse tokens
SECRET_KEY = secrets.token_urlsafe(16)
SQLALCHEMY_DB_URI = 'sqlite:///' + url_join(DATA_DIR, 'mscolab.db')
# mscolab file upload settings
UPLOAD_FOLDER = fs.path.join(DATA_DIR, 'uploads')
MAX_UPLOAD_SIZE = 2 * 1024 * 1024 # 2MB
# text to be written in new mscolab based ftml files.
STUB_CODE = """<?xml version="1.0" encoding="utf-8"?>
<FlightTrack version="1.7.6">
<ListOfWaypoints>
<Waypoint flightlevel="250" lat="67.821" location="Kiruna" lon="20.336">
<Comments></Comments>
</Waypoint>
<Waypoint flightlevel="250" lat="78.928" location="Ny-Alesund" lon="11.986">
<Comments></Comments>
</Waypoint>
</ListOfWaypoints>
</FlightTrack>
"""
enable_basic_http_authentication = False
'''
ROOT_FS = fs.open_fs(constants.ROOT_DIR)
if not ROOT_FS.exists('mscolab'):
ROOT_FS.makedir('mscolab')
with fs.open_fs(fs.path.join(constants.ROOT_DIR, "mscolab")) as mscolab_fs:
# windows needs \\ or / but mixed is terrible. *nix needs /
mscolab_fs.writetext('mscolab_settings.py', config_string.replace('\\', '/'))
path = fs.path.join(constants.ROOT_DIR, 'mscolab', 'mscolab_settings.py')
parent_path = fs.path.join(constants.ROOT_DIR, 'mscolab')
importlib.machinery.SourceFileLoader('mss_wms_settings', constants.SERVER_CONFIG_FILE_PATH).load_module()
sys.path.insert(0, constants.SERVER_CONFIG_FS.root_path)
importlib.machinery.SourceFileLoader('mscolab_settings', path).load_module()
sys.path.insert(0, parent_path)
@pytest.fixture(scope="session", autouse=True)
def configure_testsetup(request):
if Display is not None:
# needs for invisible window output xvfb installed,
# default backend for visible output is xephyr
# by visible=0 you get xvfb
VIRT_DISPLAY = Display(visible=0, size=(1280, 1024))
VIRT_DISPLAY.start()
yield
VIRT_DISPLAY.stop()
else:
yield
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "Utility_Functions",
author="Allison Wu",
author_email="[email protected]",
description="Utility functions",
notes = "Add in stats_functions",
version = "0.3.1",
packages = find_packages(),
long_description=long_description,
classifiers = ['Programming Language :: Python :: 3.7'],
)
|
import json
import os.path
from urllib import request, error
import olerror, olresult
# importorator
__all__ = ['ExamToolsLookup']
class ExamToolsLookup:
def lookup(self, call):
"""
Uses exam.tools to look up information on a US callsign
:param call: the callsign to look up
:returns: LookupResult class filled with information from HamQTH
:raises LookupResultError: if the lookup returns no information
"""
# setup
lr = olresult.LookupResult()
# make request
req = (f'https://exam.tools/api/uls/individual/{call}')
with request.urlopen(req) as url:
data = json.loads(url.read().decode())
# check if callsign or not
if 'type' in data:
if data['type'] == 'NotFound':
raise olerror.LookupResultError('ExamTools')
# ## GET THE GOODS ## #
lr.source = 'exam.tools'
# basic info
lr.callsign = data['callsign']
first_name = data['first_name']
middle_initial = data['middle_initial']
last_name = data['first_name']
lr.name = f'{first_name} {middle_initial} {last_name}'
lr.opclass = data['license_class']
# location
lr.country = 'United States'
lr.city = data['city']
lr.state = data['state']
lr.zip = data['zip']
# club stuff (ASK ABOUT HOW THIS PART WORKS BECAUSE IT DOES NOT RIGHT NOW)
# if data['type'] == 'CLUB':
# lr.club = True
# lr.trusteename = data['trustee']['name']
# lr.trusteecall = data['trustee']['callsign']
# uls stuff
lr.frn = data['frn']
# raw data
lr.raw = data
return lr |
import sys
import logging
from vogue.build.application_tag import build_application_tag
from vogue.exceptions import MissingApplicationTag
LOG = logging.getLogger(__name__)
def load_aplication_tags(adapter, json_list):
"""Will go through all application tags in json_list and add/update them to trending-db.
Args:
adapter(adapter.VogueAdapter)
json_list(list(dict)): [{'tag':'MELPCFR030', 'category':'wgs',...},...]
"""
for application_tag in json_list:
try:
mongo_application_tag = build_application_tag(application_tag)
adapter.add_or_update_document(mongo_application_tag, adapter.app_tag_collection)
except MissingApplicationTag:
LOG.warning("ApplicationTag missing in JSON list")
|
import unittest
from functools import partial
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.metrics import pairwise_distances
from MulticoreTSNE import MulticoreTSNE
make_blobs = partial(make_blobs, random_state=0)
MulticoreTSNE = partial(MulticoreTSNE, random_state=3)
def pdist(X):
"""Condensed pairwise distances, like scipy.spatial.distance.pdist()"""
return pairwise_distances(X)[np.triu_indices(X.shape[0], 1)]
class TestMulticoreTSNE(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.Xy = make_blobs(20, 100, 2, shuffle=False)
def test_tsne(self):
X, y = self.Xy
tsne = MulticoreTSNE(perplexity=5, n_iter=500)
E = tsne.fit_transform(X)
self.assertEqual(E.shape, (X.shape[0], 2))
max_intracluster = max(pdist(E[y == 0]).max(),
pdist(E[y == 1]).max())
min_intercluster = pairwise_distances(E[y == 0],
E[y == 1]).min()
self.assertGreater(min_intercluster, max_intracluster)
def test_n_jobs(self):
X, y = self.Xy
tsne = MulticoreTSNE(n_iter=100, n_jobs=-2)
tsne.fit_transform(X)
def test_perplexity(self):
X, y = self.Xy
tsne = MulticoreTSNE(perplexity=X.shape[0], n_iter=100)
tsne.fit_transform(X)
def test_dont_change_x(self):
X = np.random.random((20, 4))
X_orig = X.copy()
MulticoreTSNE(n_iter=400).fit_transform(X)
np.testing.assert_array_equal(X, X_orig)
def test_init_from_y(self):
X, y = self.Xy
tsne = MulticoreTSNE(n_iter=500)
E = tsne.fit_transform(X)
tsne = MulticoreTSNE(n_iter=0, init=E)
E2 = tsne.fit_transform(X)
np.testing.assert_allclose(E, E2)
tsne = MulticoreTSNE(n_iter=1, init=E)
E2 = tsne.fit_transform(X)
mean_diff = np.abs((E - E2).sum(1)).mean()
self.assertLess(mean_diff, 30)
def test_attributes(self):
X, y = self.Xy
N_ITER = 200
tsne = MulticoreTSNE(n_iter=N_ITER)
E = tsne.fit_transform(X, y)
self.assertIs(tsne.embedding_, E)
self.assertGreater(tsne.kl_divergence_, 0)
self.assertEqual(tsne.n_iter_, N_ITER)
|
import pandas as pd
import numpy as np
import unittest
import decipy.executors as exe
import decipy.normalizers as norm
import decipy.weigtings as wgt
matrix = np.array([
[4, 3, 2, 4],
[5, 4, 3, 7],
[6, 5, 5, 3],
])
alts = ['A1', 'A2', 'A3']
crits = ['C1', 'C2', 'C3', 'C4']
beneficial = [True, True, True, True]
weights = [0.10, 0.20, 0.30, 0.40]
xij = pd.DataFrame(matrix, index=alts, columns=crits)
class NormalizerTestCase(unittest.TestCase):
def setUp(self):
self.vector = norm.Vector(xij=xij, beneficial=beneficial)
self.minmax = norm.MinMax(xij=xij, beneficial=beneficial)
self.minmax2 = norm.MinMax2(xij=xij, beneficial=beneficial)
self.max = norm.Max(xij=xij, beneficial=beneficial)
self.sum = norm.Sum(xij=xij, beneficial=beneficial)
self.zscore = norm.ZScore(xij=xij, beneficial=beneficial)
self.gaussian = norm.Gaussian(xij=xij, beneficial=beneficial)
self.softmax = norm.SoftMax(xij=xij, beneficial=beneficial)
self.sigmoid = norm.Sigmoid(xij=xij, beneficial=beneficial)
def test_dataframe(self):
self.assertIsInstance(
self.vector.dataframe, pd.DataFrame,
msg="Normalizer dataframe method should return pandas DataFrame instance")
self.assertIsInstance(
self.vector.dataframe, pd.DataFrame,
msg="Normalizer dataframe method should return pandas DataFrame instance")
def test_vector_values(self):
results = np.array([[0.4558, 0.4243, 0.3244, 0.4650],
[0.5698, 0.5657, 0.4867, 0.8137],
[0.6838, 0.7071, 0.8111, 0.3487]])
np.testing.assert_array_equal(self.vector.dataframe.values, results)
def test_minmax_values(self):
results = np.array([[0.0000, 0.0000, 0.0000, 0.2500],
[0.5000, 0.5000, 0.3333, 1.0000],
[1.0000, 1.0000, 1.0000, 0.0000]])
np.testing.assert_array_equal(self.minmax.dataframe.values, results)
def test_minmax2_values(self):
results = np.array([[0.0000, 0.0000, 0.0000, 0.2500],
[0.5000, 0.5000, 0.3333, 1.0000],
[1.0000, 1.0000, 1.0000, 0.0000]])
np.testing.assert_array_equal(self.minmax2.dataframe.values, results)
def test_max_values(self):
results = np.array([[0.6667, 0.6000, 0.4000, 0.5714],
[0.8333, 0.8000, 0.6000, 1.0000],
[1.0000, 1.0000, 1.0000, 0.4286]])
np.testing.assert_array_equal(self.max.dataframe.values, results)
def test_sum_values(self):
results = np.array([[0.2667, 0.2500, 0.2000, 0.2857],
[0.3333, 0.3333, 0.3000, 0.5000],
[0.4000, 0.4167, 0.5000, 0.2143]])
np.testing.assert_array_equal(self.sum.dataframe.values, results)
def test_zscore_values(self):
results = np.array([[-1.2247, -1.2247, -1.069, -0.3922],
[0.0000, 0.0000, -0.2673, 1.3728],
[1.2247, 1.2247, 1.3363, -0.9806]])
np.testing.assert_array_equal(self.zscore.dataframe.values, results)
def test_gaussian_values(self):
results = np.array([[0.1103, 0.1103, 0.1425, 0.3474],
[0.5000, 0.5000, 0.3946, 0.9151],
[0.8897, 0.8897, 0.9093, 0.1634]])
np.testing.assert_array_equal(self.gaussian.dataframe.values, results)
def test_softmax_values(self):
results = np.array([[-0.5458, -0.5458, -0.4888, -0.1936],
[0.0000, 0.0000, -0.1328, 0.5957],
[0.5458, 0.5458, 0.5838, -0.4544]])
np.testing.assert_array_equal(self.softmax.dataframe.values, results)
def test_sigmoid_values(self):
results = np.array([[0.2271, 0.2271, 0.2556, 0.4032],
[0.5000, 0.5000, 0.4336, 0.7978],
[0.7729, 0.7729, 0.7919, 0.2728]])
np.testing.assert_array_equal(self.sigmoid.dataframe.values, results)
class WeightingTestCase(unittest.TestCase):
def setUp(self):
self.rij = norm.MinMax(xij=xij, beneficial=beneficial).dataframe
self.power = wgt.Power(self.rij, weights=weights)
self.multi = wgt.Multi(self.rij, weights=weights)
self.minmax = wgt.MinMax(self.rij, weights=weights)
def test_dataframe(self):
self.assertIsInstance(
self.power.dataframe, pd.DataFrame,
msg="Normalizer dataframe method should return pandas DataFrame instance")
self.assertIsInstance(
self.power.dataframe, pd.DataFrame,
msg="Normalizer dataframe method should return pandas DataFrame instance")
def test_power_values(self):
results = np.array([[0.000, 0.0000, 0.0000, 0.5743],
[0.933, 0.8706, 0.7192, 1.0000],
[1.000, 1.0000, 1.0000, 0.0000]])
np.testing.assert_array_equal(self.power.dataframe.values, results)
def test_multi_values(self):
results = np.array([[0.0000, 0.0000, 0.0000, 0.1000],
[0.0500, 0.1000, 0.1000, 0.4000],
[0.1000, 0.2000, 0.3000, 0.0000]])
np.testing.assert_array_equal(self.multi.dataframe.values, results)
def test_minmax_values(self):
results = np.array([[0.1000, 0.2000, 0.3000, 0.3000],
[0.0500, 0.1000, 0.2000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.4000]])
np.testing.assert_array_equal(self.minmax.dataframe.values, results)
class ExecutorTestCase(unittest.TestCase):
def setUp(self):
kwargs = {
'data': xij,
'beneficial': beneficial,
'weights': weights,
'rank_reverse': True,
'rank_method': "ordinal"
}
self.wsm = exe.WSM(**kwargs)
self.wpm = exe.WPM(**kwargs)
self.moora = exe.Moora(**kwargs)
self.topsis = exe.Topsis(**kwargs)
self.vikor = exe.Vikor(**kwargs)
def test_wsm_rank(self):
results = np.array([[0.1000, 3.],
[0.6500, 1.],
[0.6000, 2.]])
np.testing.assert_array_equal(self.wsm.dataframe.values, results)
def test_wpm_rank(self):
results = np.array([[3.0672, 3.],
[4.6933, 1.],
[4.1508, 2.]])
np.testing.assert_array_equal(self.wpm.dataframe.values, results)
def test_moora_rank(self):
results = np.array([[3.2000, 0.0000, 3.2000, 3.],
[5.0000, 0.0000, 5.0000, 1.],
[4.3000, 0.0000, 4.3000, 2.]])
np.testing.assert_array_equal(self.moora.dataframe.values, results)
def test_topsis_rank(self):
results = np.array([[0.2109, 0.0465, 0.1806, 3.],
[0.1020, 0.1947, 0.6562, 1.],
[0.1860, 0.1582, 0.4596, 2.]])
np.testing.assert_array_equal(self.topsis.dataframe.values, results)
def test_vikor_rank(self):
results = np.array([[0.9000, 0.3000, 0.7500, 3.],
[0.3500, 0.2000, 0.0000, 1.],
[0.4000, 0.4000, 0.5455, 2.]])
np.testing.assert_array_equal(self.vikor.dataframe.values, results)
class RankSimilarityTestCase(unittest.TestCase):
def setUp(self):
kwargs = {
'data': xij,
'beneficial': beneficial,
'weights': weights,
'rank_reverse': True,
'rank_method': "ordinal"
}
self.wsm = exe.WSM(**kwargs)
self.wpm = exe.WPM(**kwargs)
self.moora = exe.Moora(**kwargs)
def test_rank_similarity_analysis(self):
analizer = exe.RankSimilarityAnalyzer()
analizer.add_executor(self.wsm)
analizer.add_executor(self.wpm)
analizer.add_executor(self.moora)
results = analizer.analyze()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
import glob
import argparse
import os
import sys
from psnr_analyzer import PsnrAnalyzer
from report_manager import ReportData, ReportManager
from transcoder import Transcoder
from configuration import prepare_config
def prepare_output_dir(output_dir):
if os.path.exists(output_dir):
print("Clear output folder")
filelist = glob.glob(os.path.join(output_dir, "*"))
for file in filelist:
print("Remove: " + file)
os.remove(file)
else:
print("Create output folder")
os.mkdir(output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Ping script")
parser.add_argument("-i", dest="input", help="Origin input file", required=True)
parser.add_argument("-c", dest="config", help="Config file", default="config.json", required=False)
args = parser.parse_args()
if not vars(args):
parser.print_usage()
sys.exit()
original_file_size = os.path.getsize(args.input)
config = prepare_config(args)
prepare_output_dir(config.output_dir)
analyzers = { "PSNR": PsnrAnalyzer(config.ffmpeg_tools, config.hwaccel, config.output_dir) }
transcoder = Transcoder(config.ffmpeg_tools, config.hwaccel, config.output_dir)
report_manager = ReportManager(config.output_dir)
row_id = 1
report_datas = []
for profile in config.profiles:
transcode_file_path = transcoder.transcode(args.input, profile)
transcode_file_size = os.path.getsize(transcode_file_path)
difference_in_percent = 100 - (transcode_file_size / original_file_size) * 100
report_data = ReportData(row_id, transcode_file_path, profile.build_output_file_name(), transcode_file_size, difference_in_percent)
report_datas.append(report_data)
row_id += 1
for report_data in report_datas:
for item in config.metrics:
log_file = analyzers[item].analyze(report_data.transcode_file_path, args.input, report_data.transcode_file_name)
report_data.add_report(item, log_file)
for report_data in report_datas:
report_manager.add_report(report_data)
report_manager.close()
|
from .craigslist import (
CraigslistCommunity, CraigslistEvents, CraigslistForSale, CraigslistGigs,
CraigslistHousing, CraigslistJobs, CraigslistResumes, CraigslistServices)
__all__ = [
'CraigslistCommunity', 'CraigslistEvents', 'CraigslistForSale', 'CraigslistGigs',
'CraigslistHousing', 'CraigslistJobs', 'CraigslistResumes', 'CraigslistServices']
|
RESULTS_KEY = "results"
class ResultField(object):
PROCESSING_REQUEST_ID = "execution.processing_request.id"
|
from rest_framework import viewsets
from rest_framework import permissions
from .models import Referral
from .serializers import ReferralSerializer
from django_filters import rest_framework as filters
from healthmap.medicalofficer.permissions import IsMOOrReadOnly
# Filter Class for Referral
class ReferFilter(filters.FilterSet):
refby = filters.CharFilter(
field_name="refered_by",
lookup_expr="iexact",
help_text="Referring Person")
reffrom = filters.CharFilter(
field_name="refered_from",
lookup_expr="icontiains",
help_text="Refered from Hospital / Department / Ward")
reftype = filters.CharFilter(
field_name="referral_type",
lookup_expr="iexact",
help_text="Referral Type")
class Meta:
model = Referral
fields = ['refered_date']
# View Class for Referral Records
class ReferralViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Referral.objects.all().order_by('-refered_date')
serializer_class = ReferralSerializer
filter_backends = [filters.DjangoFilterBackend]
filter_class = ReferFilter
permission_classes = [permissions.IsAuthenticated, IsMOOrReadOnly]
|
# Copyright (c) 2018 LG Electronics, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import os.path
import time
import math
import numpy as np
import rclpy
from rclpy.node import Node
from duckietown_msgs.msg import WheelsCmdStamped, Twist2DStamped
from duckietown_msgs.srv import SetValue
from std_srvs.srv import Empty
WHEEL_GAIN = 0.5
class InverseKinematicsNode(Node):
def __init__(self, args):
self.node_name = 'inverse_kinematics_node'
super().__init__(self.node_name)
self.args = args
self.v_gain = 0.41
self.omega_gain = 8.3
self.bicycle_kinematics = False
self.steer_angle_gain = 1
self.simulated_vehicle_length = 0.18
self.gain = 0.65
self.trim = 0.0
self.baseline = 0.1
self.radius = 0.0318
self.k = 27.0
self.limit = 1.0
self.limit_max = 1.0
self.limit_min = 0.0
self.srv_set_gain = self.create_service(SetValue, 'set_gain', self.cbSrvSetGain)
self.srv_set_trim = self.create_service(SetValue, 'set_trim', self.cbSrvSetTrim)
self.srv_set_baseline = self.create_service(SetValue, 'set_baseline', self.cbSrvSetBaseline)
self.srv_set_radius = self.create_service(SetValue, 'set_radius', self.cbSrvSetRadius)
self.srv_set_k = self.create_service(SetValue, 'set_trim', self.cbSrvSetTrim)
self.srv_set_limit = self.create_service(SetValue, 'set_trim', self.cbSrvSetTrim)
#self.srv_save = self.create_service(Empty, 'save_calibration', self.cbSrvSaveCalibration)
self.sub_car_cmd = self.create_subscription(Twist2DStamped, self.args.subscribe_topic, self.car_cmd_callback)
self.pub_wheels_cmd = self.create_publisher(WheelsCmdStamped, self.args.publish_topic)
self.get_logger().info('[%s] Initialized.' % self.node_name)
self.printValues()
"""
def saveCalibration(self):
# Write to yaml
data = {
"calibration_time": time.strftime("%Y-%m-%d-%H-%M-%S"),
"gain": self.gain,
"trim": self.trim,
"baseline": self.baseline,
"radius": self.radius,
"k": self.k,
"limit": self.limit,
}
# Write to file
file_name = self.getFilePath(self.veh_name)
with open(file_name, 'w') as outfile:
outfile.write(yaml.dump(data, default_flow_style=False))
# Printout
self.printValues()
self.get_logger().info("[%s] Saved to %s" %(self.node_name, file_name))
"""
def cbSrvSaveCalibration(self, req):
return EmptyResponse()
def cbSrvSetGain(self, req):
self.gain = req.value
self.printValues()
return SetValueResponse()
def cbSrvSetTrim(self, req):
self.trim = req.value
self.printValues()
return SetValueResponse()
def cbSrvSetBaseline(self, req):
self.baseline = req.value
self.printValues()
return SetValueResponse()
def cbSrvSetRadius(self, req):
self.radius = req.value
self.printValues()
return SetValueResponse()
def cbSrvSetK(self, req):
self.k = req.value
self.printValues()
return SetValueResponse()
def cbSrvSetLimit(self, req):
self.limit = self.setLimit(req.value)
self.printValues()
return SetValueResponse()
def setLimit(self, value):
if value > self.limit_max:
self.get_logger().warn("[%s] limit (%s) larger than max at %s" % (self.node_name, value, self.limit_max))
limit = self.limit_max
elif value < self.limit_min:
self.get_logger().warn("[%s] limit (%s) smaller than allowable min at %s" % (self.node_name, value, self.limit_min))
limit = self.limit_min
else:
limit = value
return limit
def printValues(self):
self.get_logger().info("[%s] gain: %s trim: %s baseline: %s radius: %s k: %s limit: %s" % (self.node_name, self.gain, self.trim, self.baseline, self.radius, self.k, self.limit))
def car_cmd_callback(self, msg_car_cmd):
k_r = self.k
k_l = self.k
k_r_inv = (self.gain + self.trim) / k_r
k_l_inv = (self.gain - self.trim) / k_l
omega_r = (msg_car_cmd.v + 0.5 * msg_car_cmd.omega * self.baseline) / self.radius
omega_l = (msg_car_cmd.v - 0.5 * msg_car_cmd.omega * self.baseline) / self.radius
# conversion from motor rotation rate to duty cycle
# u_r = (gain + trim) (v + 0.5 * omega * b) / (r * k_r)
u_r = omega_r * k_r_inv
# u_l = (gain - trim) (v - 0.5 * omega * b) / (r * k_l)
u_l = omega_l * k_l_inv
# limiting output to limit, which is 1.0 for the duckiebot
u_r_limited = max(min(u_r, self.limit), -self.limit)
u_l_limited = max(min(u_l, self.limit), -self.limit)
# Put the wheel commands in a message and publish
msg_wheels_cmd = WheelsCmdStamped()
msg_wheels_cmd.header.stamp = msg_car_cmd.header.stamp
msg_wheels_cmd.vel_right = u_r_limited * WHEEL_GAIN
msg_wheels_cmd.vel_left = u_l_limited * WHEEL_GAIN
self.pub_wheels_cmd.publish(msg_wheels_cmd)
def loginfo(self, s):
self.get_logger().info('%s' % (s))
def main(args=None):
if args is None:
args = sys.argv
rclpy.init(args=args)
parser = argparse.ArgumentParser()
parser.add_argument("--publish_topic",
type=str,
default="/wheels_cmd",
help="topic name to publish wheels command on")
parser.add_argument("--subscribe_topic",
type=str,
default="/car_cmd",
help="topic name to subscribe to for car commands")
args = parser.parse_args()
node = InverseKinematicsNode(args)
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# main.py
class PyList(list):
def __init__(self, content=[], size=20):
self.items = [None] * size
self.numItems = 0
self.size = size
for e in content:
self.append(e)
def __contains__(self, item):
for i in range(self.numItems):
if self.items[i] == item:
return True
return False
def __eq__(self, other):
if (type(self) != type(other)):
return False
if self.numItems != other.numItems:
return False
for i in range(self.numItems):
if (self.items[i] != other.items[i]):
return False
return True
def __setitem__(self, index, val):
if index >= 0 and index <= self.numItems - 1:
self.items[index] = val
return
raise IndexError("Pylist assignment index out of range.")
def __getitem__(self, index):
if index >= 0 and index <= self.numItems - 1:
return self.items[index]
raise IndexError("Pylist index out of range.")
def append(self, item):
if self.numItems == self.size:
self.allocate()
self.items[self.numItems] = item
self.numItems += 1
def __add__(self, other):
result = PyList(size=self.numItems + other.numItems)
for i in range(self.numItems):
result.append(self.items[i])
for i in range(other.numItems):
result.append(other.items[i])
return result
def insert(self, i, x):
if self.numItems == self.size:
self.allocate()
if i < self.numItems:
for j in range(self.numItems - 1, i - 1, -1):
self.items[j + 1] = self.items[j]
self.items[i] = x
self.numItems += 1
else:
self.append(x)
def delete(self, index):
if (self.numItems == self.size // 4):
self.deallocate()
if index >= self.numItems:
raise IndexError("PyList index out of range.")
else:
for i in range(index, self.numItems - 1):
self.items[i] = self.items[i + 1]
self.numItems -= 1
return
def allocate(self):
newlength = 2 * self.size
newList = [None] * newlength
for i in range(self.numItems):
newList[i] = self.items[i]
self.items = newList
self.size = newlength
def deallocate(self):
newlength = int(self.size / 2)
newList = [None] * newlength
for i in range(self.numItems):
newList[i] = self.items[i]
self.items = newList
self.size = newlength
def delete_last(self, k):
if k > self.numItems:
self.numItems = 0
self.size = 1
self.items = [None] * self.size
return
else:
rest = self.numItems - k
self.numItems = rest
while (self.numItems <= int(self.size / 4)):
self.deallocate()
return
def src(self, e, f, g):
if self.numItems == 0:
return e
if self.numItems == 1:
return f(self.items[0])
if self.numItems > 1:
length = self.numItems
length1 = int(length / 2)
length2 = length - length1
list1 = PyList(self.items[0:length1], length1)
list2 = PyList(self.items[length1:self.numItems], length2)
return g(list1.src(e, f, g), list2.src(e, f, g))
class DoubleStack:
class __Node:
def __init__(self, val=None, next_node=None, prev_node=None):
self.val = val
self.next = next_node
self.prev = prev_node
def getNext(self):
return self.next
def getPrev(self):
return self.prev
def getVal(self):
return self.val
def setNext(self, node):
self.next = node
def setPrev(self, node):
self.prev = node
def setVal(self, val):
self.val = val
def __init__(self, content=[]):
self.head = self.__Node(None, None, None)
self.end = self.__Node(None, None, None)
self.head.setNext(self.end)
self.head.setPrev(self.head)
self.end.setPrev(self.head)
self.end.setNext(self.end)
for i in content:
self.pushback(i)
def pushback(self, val):
newNode = self.__Node(val)
newNode.setPrev(self.end.getPrev())
newNode.setNext(self.end)
(self.end.getPrev()).setNext(newNode)
self.end.setPrev(newNode)
def pushfront(self, val):
newNode = self.__Node(val)
newNode.setNext(self.head.getNext())
newNode.setPrev(self.head)
(self.head.getNext()).setPrev(newNode)
self.head.setNext(newNode)
def popback(self):
result = self.end.getPrev().getVal()
self.end.getPrev().getPrev().setNext(self.end)
self.end.setPrev(self.end.getPrev().getPrev())
return result
def popfront(self):
result = self.head.getNext().getVal()
self.head.getNext().getNext().setPrev(self.head)
self.head.setNext(self.head.getNext().getNext())
return result
def __len__(self):
temp = self.head
counter = 0
while temp.getNext() != temp:
temp = temp.getNext()
counter += 1
return counter - 1
def testSRC():
a = list(range(50))
pylist = PyList(a, 128)
#this is test 1
print("test 1")
e = 0
f = lambda x: 1
g = lambda x, y: x + y
ans = pylist.src(e, f, g)
print("the length is %d" % ans)
#this is test2
print("test 2")
e = PyList([], 0)
f = lambda x: PyList([x + 5], 1)
g = lambda x, y: x + y
ans2 = pylist.src(e, f, g)
for i in range(ans2.numItems):
print(ans2.items[i], end=' ')
print(' ')
print("the size of the list %d" % ans2.size)
print("the numItems of the list %d" % ans2.numItems)
#this is test 3
print("test 3")
e = PyList([], 0)
f = lambda x: PyList([x], 1) if x > 10 else PyList([], 0)
g = lambda x, y: x + y
ans3 = pylist.src(e, f, g)
for i in range(ans3.numItems):
print(ans3.items[i], end=' ')
print(' ')
print("the size of the list %d" % ans3.size)
print("the numItems of the list %d" % ans3.numItems)
def testEX4():
ds = DoubleStack(list(range(20)))
print("the length of ds is %d" % len(ds))
for i in range(5):
a = ds.popfront()
b = ds.popback()
print(a, ' ', b)
new_element = [23, 3343, 5, 65, 65, 36, 547, 5, 765, 757, 533552, 342]
for i in new_element:
if (i % 2) == 1:
ds.pushfront(i)
else:
ds.pushback(i)
print("the length of ds is %d" % len(ds))
for i in range(len(ds)):
c = ds.popfront()
print(c, end=' ')
def test_delete_last():
test = PyList([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
test.delete_last(7)
print(test.items)
# test.delete_last(8)
# print(test.items)
def main():
#test_delete_last()
testEX4()
#testSRC()
if __name__ == "__main__":
main() |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from Eir.utility import dist, Person, randEvent
from ..Hub.HubSIRVD import HubSIRVD
class StrongInfSIRVD(HubSIRVD):
def __init__(self, S0: int, I0: int, R0:int, V0: int, pss: float, gamma: float, eta:float, mu:float, rstart: float, side: float, days:int, alpha=2, w0=0.7, timeDelay=-1):
# error checking
self.intCheck([S0, I0, R0, V0, days])
self.floatCheck([pss, gamma, eta, mu, side, rstart, w0, alpha, timeDelay])
self.negValCheck([S0, I0, R0, V0, pss, gamma, eta, mu, side, rstart, days, w0, alpha])
super().__init__(S0=S0, I0=I0, R0=R0, V0=V0, pss=pss, gamma=gamma, eta=eta, mu=mu, rstart=rstart, side=side, days=days, alpha=alpha, w0=w0, hubConstant=1, timeDelay=timeDelay)
def _infect(self, inf: Person, sus: Person):
"""
Computes the probability of infection between an infectious persona and susceptible based on Strong Infectious Model assumptions
"""
# compute the distance between two Person objects
r = dist(inf, sus)
# make variable that can potentially be changed if someone is a super spreader
r0 = self.rstart
# if the susceptible is too far away from the infectious person
if r > r0:
return 0
# in range of the infected person
if inf.ss:
return self.w0
# return using the normal probability function if not a super spreader
return self.w0 * (1 - r / r0) ** self.alpha
def plot(self):
t = np.linspace(0, self.days, self.days+1)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_title("Strong Infectious Model SIRVD")
ax1.set_ylabel("# Susceptibles")
ax2.plot(t, self.I, label="Infected", color='g')
ax2.set_ylabel("# Active Cases")
ax3.plot(t, self.R, label="Recovered", color='c')
ax3.set_ylabel("# Recovered")
ax4.plot(t, self.V, label="Vaccinated", color='b')
ax4.set_ylabel("# Vaccinated")
ax5.set_xlabel("Days")
ax5.set_ylabel("# Dead")
ax5.plot(t, self.D, label="Dead")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
ax5.legend()
plt.show() |
import json
from django.http import Http404
from django.shortcuts import render, redirect
# Create your views here.
from AthleticTeam.settings import MEDIA_ROOT
from SinglePagesApp.forms import EditContactUsForm, EditAboutUsForm, EditHistoryForm, EditTicketsForm, EditFacilitiesForm, EditSponsorshipsForm
def history(request):
data = read_file('history.json')
if data['visible'] != '':
return render(request, 'single_pages/history.html', context={'data': data})
else:
raise Http404("History Page isn't used")
def edit_history(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('history.json', data)
return redirect('SinglePagesApp:history')
else:
name = 'history'
data = read_file('history.json')
form = EditHistoryForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def contact_us(request):
data = read_file('contact_us.json')
if data['visible'] != '':
return render(request, 'single_pages/contact_us.html', context={'data': data})
else:
raise Http404("Contact Us Page isn't used")
def about_us(request):
data = read_file('about_us.json')
if data['visible'] != '':
return render(request, 'single_pages/about_us.html', context={'data': data})
else:
raise Http404("About Us Page isn't used")
def edit_about_us(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('about_us.json', data)
return redirect('SinglePagesApp:about_us')
else:
name = 'About Us'
data = read_file('about_us.json')
form = EditAboutUsForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def edit_contact_us(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('contact_us.json', data)
return redirect('SinglePagesApp:contact_us')
else:
name = 'Contact Us'
data = read_file('contact_us.json')
form = EditContactUsForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def read_file(file_name):
file_handler = open(MEDIA_ROOT + './SinglePagesData/' + file_name, mode='r')
data = json.load(file_handler) # deserialises it
file_handler.close()
return data
def write_file(file_name, data):
file_handler = open(MEDIA_ROOT + './SinglePagesData/' + file_name, mode='w')
data = json.dumps(data)
file_handler.write(data)
file_handler.close()
def tickets(request):
data = read_file('tickets.json')
if data['visible'] != '':
return render(request, 'single_pages/tickets.html', context={'data': data})
else:
raise Http404("Tickets Page isn't used")
def edit_tickets(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('tickets.json', data)
return redirect('SinglePagesApp:tickets')
else:
name = 'Tickets'
data = read_file('tickets.json')
form = EditTicketsForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def events(request):
data = read_file('events.json')
if data['visible'] != '':
return render(request, 'single_pages/events.html', context={'data': data})
else:
raise Http404("events Page isn't used")
def edit_events(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('events.json', data)
return redirect('SinglePagesApp:events')
else:
name = 'events'
data = read_file('events.json')
form = EditHistoryForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def facilities(request):
data = read_file('facilities.json')
if data['visible'] != '':
return render(request, 'single_pages/facilities.html', context={'data': data})
else:
raise Http404("Facilities Page isn't used")
def edit_facilities(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('facilities.json', data)
return redirect('SinglePagesApp:facilities')
else:
name = 'facilities'
data = read_file('facilities.json')
form = EditFacilitiesForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def sponsorships(request):
data = read_file('sponsorships.json')
if data['visible'] != '':
return render(request, 'single_pages/sponsorships.html', context={'data': data})
else:
name = 'history'
data = read_file('history.json')
form = EditHistoryForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
def edit_sponsorships(request):
if request.method == 'POST':
data = request.POST
data = data.copy()
data.pop('csrfmiddlewaretoken', None)
write_file('sponsorships.json', data)
return redirect('SinglePagesApp:sponsorships')
else:
name = 'sponsorships'
data = read_file('sponsorships.json')
form = EditSponsorshipsForm(data)
return render(request, 'single_pages/edit.html', context={'name': name, 'form': form})
|
#!/usr/bin/env python
'''
Module for in silico digestion of WT and Variant proteins and writing to fasta files.
'''
import re
from pyteomics import fasta
from PoolSeqProGen import dna_seq
__author__ = "Rigbe G. Weldatsadik"
__copyright__ = "Copyright (c) 2020 Rigbe G. Weldatsadik"
__license__ = "Apache 2.0"
__version__ = "0.0.1"
def writewtpro_returnwtpep(k, seq_entry,gbk_file,fastaOutput,poolID):
'''
Writes the sequences of the 'wild type' proteins to the fasta output and
returns the tryptic peptides of the 'wild type' protein.
'''
entries = []
'''protein_id_cds_index=dna_seq.index_genbank_features(gbk_file,"CDS",
"protein_id")'''
protein_id_cds_index = dna_seq.index_genbank_features(gbk_file, "CDS",
"locus_tag")
index = protein_id_cds_index[k]
cds_feature = gbk_file.features[index]
try: # there are cds with no translation and protein_id
immu_tran = cds_feature.qualifiers['translation'][0]
protein_id = cds_feature.qualifiers['protein_id'][0]
except KeyError:
pass
else:
seq_entry += 1
if poolID:
header = protein_id + '|0_WT|' + \
str(seq_entry) + '_' + str(poolID)
else:
header = protein_id + '|0_WT|' + str(seq_entry)
entries.append((header, immu_tran))
fasta.write(entries, fastaOutput)
# split by K or R when they are not followd by P
peptide_list_org = re.compile(".(?:(?<).)*").findall(str(immu_tran))
return (seq_entry, peptide_list_org)
def return_varpep(k,v,gbk_file,gbk_fileSeq,geneticCodeID):
'''
Returns the variant peptides to be written to the fasta output
'''
'''protein_id_cds_index=dna_seq.index_genbank_features(gbk_file,"CDS",\
"protein_id")'''
protein_id_cds_index = dna_seq.index_genbank_features(gbk_file, "CDS",
"locus_tag")
index = protein_id_cds_index[k]
cds_feature = gbk_file.features[index]
try:
immu_tran = cds_feature.qualifiers['translation'][0]
protein_id = cds_feature.qualifiers['protein_id'][0]
except KeyError:
pass
else:
loc = cds_feature.location
strand = cds_feature.location.strand # +1 or -1
feature_seq = cds_feature.extract(gbk_file.seq)
pr_name = cds_feature.qualifiers['product'][0]
# this wont return the reverse complement for protein on the complement strand
# unlike extract method
mut_cds_seq = gbk_file[loc.start:loc.end].seq
mut_cds_len = len(mut_cds_seq)
del_range_list = []
ins_range_list = []
modified = False
# subset is in the form of (var_pos,alt,ref,effect,snp) for snp
# (var_pos,alt,ref,effect,ins) for insertion
# (var_pos,nt_after_del,alt,ref,del,effect) for deletion
for subset in v:
var_pos = subset[0]
alt_allele = subset[1]
ref = subset[2]
effect = subset[5]
# since var_pos is 1-based position from SnpEff and relative to
# the whole genome length
zero_based_pos = int(var_pos) - 1
pos_relativeto_cds = abs(loc.start - zero_based_pos)
diff = len(alt_allele) - len(ref)
if effect == "START_LOST":
modified = False
break
if diff >= 0: # ins and snp
# checking deletion range list first since if that particular
# position has been deleted, it doesn't make sense to
# insert there
in_del = dna_seq.check_if_in_del(pos_relativeto_cds, del_range_list)
if in_del == "True":
modified = False
break
elif in_del == "False":
pos_relativeto_cds = pos_relativeto_cds
else:
pos_relativeto_cds = in_del
pos_relativeto_cds = dna_seq.check_if_in_ins(
pos_relativeto_cds, ins_range_list)
# adding len(ref) to the last indices in both the insertion
# and deletion cases insures we skip and delete as many as
# the length of the ref so we avoid repeating the nt in the
# insertion case and we delete the right amount of nts in
# the deletion case. the len(alt) in the deletion case ensures
# we dont delete the trailing nt that are reported in the
# vcf but shouldnt be deleted.
if(diff > 0):
ins_range_list.append(range(pos_relativeto_cds +
len(ref), pos_relativeto_cds +
len(alt_allele)))
try:
# this will create a copy of cds_seq so we dont need to
# changs cds_seq to mutable explicitly to modify it
# this way
mut_cds_seq = mut_cds_seq[:pos_relativeto_cds] + \
alt_allele + \
mut_cds_seq[pos_relativeto_cds +
len(ref):]
except IndexError:
modified = False
break
else:
modified = True
else: # deletion
in_del = dna_seq.check_if_in_del(pos_relativeto_cds, del_range_list)
if in_del == "True":
modified = False
break
elif in_del == "False":
pos_relativeto_cds = pos_relativeto_cds
else:
pos_relativeto_cds = in_del
pos_relativeto_cds = dna_seq.check_if_in_ins(
pos_relativeto_cds, ins_range_list)
del_range_list.append(range(pos_relativeto_cds + len(alt_allele),
pos_relativeto_cds + len(ref)))
try:
mut_cds_seq = mut_cds_seq[:pos_relativeto_cds + len(alt_allele)] + \
mut_cds_seq[pos_relativeto_cds + len(ref):]
except IndexError:
modified = False
break
else:
modified = True
if modified:
if strand == -1:
cur_pos = len(mut_cds_seq) % 3
if cur_pos == 0:
initial = loc.start - 1
elif cur_pos == 1:
initial = loc.start # since there is a nt at loc.start of the cds
else:
initial = loc.start + 1
start_index, last_index = dna_seq.check_stopcodon_index_backward(
initial,gbk_fileSeq,geneticCodeID)
# the +1 since from the backward the indices are 1-based rather
# than 0-based
lengthmodified_cds_seq = gbk_fileSeq[last_index +
1:loc.start] + mut_cds_seq
lengthmodified_cds_seq = lengthmodified_cds_seq.reverse_complement()
else:
cur_pos = len(mut_cds_seq) % 3
initial = loc.end - cur_pos
start_index, last_index = dna_seq.check_stopcodon_index_forward(
initial,gbk_fileSeq,geneticCodeID)
lengthmodified_cds_seq = mut_cds_seq + gbk_fileSeq[loc.end:last_index]
mut_tran = str(lengthmodified_cds_seq.translate(
table=11, to_stop=True))
peptide_list = re.compile(".(?:(?<).)*").findall(mut_tran)
return (protein_id, peptide_list)
else:
return None
|
from pydantic import BaseModel
from typing import List
class ContainerSettings(BaseModel):
name: str
network_architecture: str
dataset_path: str
gpus: List[int]
tensorboard_port: int
api_port: int
author: str
|
#
# @lc app=leetcode id=724 lang=python3
#
# [724] Find Pivot Index
#
# https://leetcode.com/problems/find-pivot-index/description/
#
# algorithms
# Easy (40.58%)
# Total Accepted: 64.4K
# Total Submissions: 157.4K
# Testcase Example: '[1,7,3,6,5,6]'
#
# Given an array of integers nums, write a method that returns the "pivot"
# index of this array.
#
# We define the pivot index as the index where the sum of the numbers to the
# left of the index is equal to the sum of the numbers to the right of the
# index.
#
# If no such index exists, we should return -1. If there are multiple pivot
# indexes, you should return the left-most pivot index.
#
# Example 1:
#
#
# Input:
# nums = [1, 7, 3, 6, 5, 6]
# Output: 3
# Explanation:
# The sum of the numbers to the left of index 3 (nums[3] = 6) is equal to the
# sum of numbers to the right of index 3.
# Also, 3 is the first index where this occurs.
#
#
#
#
# Example 2:
#
#
# Input:
# nums = [1, 2, 3]
# Output: -1
# Explanation:
# There is no index that satisfies the conditions in the problem
# statement.
#
#
#
#
# Note:
#
#
# The length of nums will be in the range [0, 10000].
# Each element nums[i] will be an integer in the range [-1000, 1000].
#
#
#
#
#
class Solution:
def pivotIndex(self, nums: List[int]) -> int:
sums = 0
temp = 0
sums = sum(nums)
for i in range(len(nums)):
if temp*2+nums[i] == sums:
return i
temp += nums[i]
return -1
|
s = '😀'
print('U+{:X}'.format(ord(s))) |
__version__ = '1.1.0dev'
|
#
# Class that parses the CONFIG files
#
# Author: Jakub Wlodek
#
import os
import re
import installSynApps.DataModel.install_config as IC
import installSynApps.DataModel.install_module as IM
class ConfigParser:
"""
Class responsible for parsing the INSTALL_CONFIG file into an InstallConfguration object
Attributes
----------
configure_path : str
path to installSynApps configure directory
Methods
-------
check_valid_config_path()
Checks if confgure path is valid
parse_line_to_module(line : str, current_url : str, current_url_type : str)
parses module line into an InstllModule object
parse_install_config(config_filename=INSTALL_CONFIG)
main top level function that parses install config file
"""
def __init__(self, configure_path):
""" Constructor for ConfigParser """
self.configure_path = configure_path
def check_valid_config_path(self):
""" Function that checks if configure path is valid """
if os.path.exists(self.configure_path) and os.path.isdir(self.configure_path):
return True
elif os.path.exists(self.configure_path) and os.path.isfile(self.configure_path):
self.configure_path = self.configure_path.rsplit('/', 1)
return False
def parse_line_to_module(self, line, current_url, current_url_type):
"""
Function that parses a line in the INSTALL_CONFIG file to and InstallModule object
Parameters
----------
line : str
line from table in file
current_url : str
url at which module is located
current_url_type : str
either GIT_URL or WGET_URL
Returns
-------
install_module : InstallModule
module parsed from the table line
"""
line = re.sub('\t', ' ', line)
line = re.sub(' +', ' ', line)
module_components = line.split(' ')
name = module_components[0]
version = module_components[1]
rel_path = module_components[2]
repository = module_components[3]
clone = module_components[4]
build = module_components[5]
install_module = IM.InstallModule(name, version, rel_path, current_url_type, current_url, repository, clone, build)
return install_module
def parse_install_config(self, config_filename = "INSTALL_CONFIG", force_location = None):
"""
Top level install config parser function
Parses the self.path_to_configure/config_filename file
Parameters
----------
config_filename : str
defaults to INSTALL_CONFIG
Returns
-------
install_config : InstallConfiguration
valid install_config object if parse was successful, or None
"""
if os.path.exists(self.configure_path + "/" + config_filename):
install_file = open(self.configure_path + "/" + config_filename, "r")
if install_file == None:
return None
install_config = None
current_url = ""
current_url_type = ""
epics_arch = ""
install_loc = ""
line = install_file.readline()
while line:
line = line.strip()
if not line.startswith('#') and len(line) > 1:
if line.startswith("INSTALL="):
if force_location is None:
install_loc = line.split('=')[-1]
else:
install_loc = force_location
install_config = IC.InstallConfiguration(install_loc, self.configure_path)
if install_config.is_install_valid() < 0:
return None, 'Permission Error'
elif install_config.is_install_valid() == 0:
try:
os.mkdir(install_config.install_location)
except FileNotFoundError:
return None, 'Install filepath not valid'
elif line.startswith("GIT_URL") or line.startswith("WGET_URL"):
current_url = line.split('=')[1]
current_url_type = line.split('=')[0]
else:
install_module = self.parse_line_to_module(line, current_url, current_url_type)
install_config.add_module(install_module)
line = install_file.readline()
install_file.close()
# install_config.print_installation_info()
return install_config , ''
return None, 'Configure Path not found'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import subprocess
from time import sleep
import unittest
import codecs
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from lockfile import LockFile
from appium import webdriver
import difflib
import sys
import psutil
from selenium.common.exceptions import WebDriverException
from uiauto.myexceptions import *
#import Stepper
#import conf
#from io import open
from rpActions import login
from rpActions import user_info
from uiauto.smartmonkey import SmartMonkey, Stabilizer
from tools import getLast
from tools import removeLastTested
from tools import getUiautomatorConfig
import logging
from logger import MyLogger
packageName = 'ctrip.android.view'
activityName = 'ctrip.android.view.splash.CtripSplashActivity'
authorized = False
idpName = 'sina'
resultPath = './result.txt'
lockFilePath = './lock.txt'
portNum = 4723
systemPort = 8200
running_logger = MyLogger('rpConfirm').get_logger()
#g_logger = conf.g_logger
#g_result = conf.g_result
#g_conf = conf.g_config
#g_appinfo = conf.g_appinfo
def getLockFileLength():
f = open(lockFilePath,"r")
lock = LockFile(lockFilePath)
with lock:
lines = f.readlines()
f.close()
return len(lines)
def writeResult(result):
f = open(resultPath,"r")
lock = LockFile(resultPath)
with lock:
lines = f.readlines()
f.close()
resultNum = len(lines)
f = open(resultPath,"a+")
lock = LockFile(resultPath)
with lock:
if resultNum == 0:
if result == True:
f.write("True")
else:
f.write("False")
else:
if result == True:
f.write("\nTrue")
elif result == "Alice":
f.write("\nAlice")
elif result == "Eve":
f.write("\nEve")
else:
f.write("\nFalse")
f.close()
def getClickableRegion(driver):
xRegion = []
yRegion = []
S=driver.page_source
widgets = S.split('>')
for widget in widgets:
if 'clickable="true"' in widget:
x1=int(widget[widget.find('bounds="[')+9:widget.find(',',widget.find('bounds="[')+9)])
y1=int(widget[widget.find(',',widget.find('bounds="[')+9)+1:widget.find(']',widget.find('bounds="[')+9)])
temp=widget[widget.find(']',widget.find('bounds="[')+9)+2:]
x2=int(temp[:temp.find(',')])
y2=int(temp[temp.find(',')+1:temp.find(']')])
xRegion.append([x1,x2])
yRegion.append([y1,y2])
return [xRegion,yRegion]
def findElementByName(driver, keyword):
[xRegion, yRegion] = getClickableRegion(driver)
widgets = driver.page_source.split('>')
for widget in widgets:
if keyword in widget:
x1=int(widget[widget.find('bounds="[')+9:widget.find(',',widget.find('bounds="[')+9)])
y1=int(widget[widget.find(',',widget.find('bounds="[')+9)+1:widget.find(']',widget.find('bounds="[')+9)])
temp=widget[widget.find(']',widget.find('bounds="[')+9)+2:]
x2=int(temp[:temp.find(',')])
y2=int(temp[temp.find(',')+1:temp.find(']')])
x = (x1 + x2) / 2
y = (y1 + y2) / 2
if 'clickable="true"' in widget or 'long-clickable="true"' in widget:
return [True, x1, x2, y1, y2]
if (checkClickability(x,xRegion)) and (checkClickability(y,yRegion)):
return [True, x1, x2, y1, y2]
else:
continue
return [False, 0, 0, 0, 0]
def checkClickability(coordinate, regions):
for region in regions:
if coordinate >= float(region[0]) and coordinate <= float(region[1]):
return True
return False
def tapSuccess(x,y,driver): #judge whether click action works or not
s=driver.page_source
try:
driver.tap([(x,y)],1)
except WebDriverException:
return -2
else:
sleep(1)
if difflib.SequenceMatcher(None,driver.page_source,s).ratio()>=0.95: #compare two pages
try:
driver.tap([(x,y)],1) #try once again
except WebDriverException:
pass
sleep(1)
if difflib.SequenceMatcher(None,driver.page_source,s).ratio()>=0.95:
return -1
return 0
def sinaConfirm(driver):
fileLen = getLockFileLength()
while True:
if fileLen < getLockFileLength():
break
sleep(1)
f = open(lockFilePath,"r")
command = None
lock = LockFile(lockFilePath)
with lock:
lines = f.readlines()
command = lines[len(lines) - 1]
f.close()
if command == 'x':
return False
s = driver.page_source
driver.tap([(500, 900)], 1)
sleep(10)
return True
def wechatConfirm(driver):
fileLen = getLockFileLength()
while True:
if fileLen < getLockFileLength():
break
sleep(1)
f = open(lockFilePath,"r")
command = None
lock = LockFile(lockFilePath)
with lock:
lines = f.readlines()
command = lines[len(lines) - 1]
f.close()
if command == 'x':
return False
sm = SmartMonkey(driver)
sm.tap_keyword(u'确认登录')
sleep(10)
return True
def fbConfirm(driver):
fileLen = getLockFileLength()
while True:
if fileLen < getLockFileLength():
break
sleep(1)
f = open(lockFilePath,"r")
command = None
lock = LockFile(lockFilePath)
with lock:
lines = f.readlines()
command = lines[len(lines) - 1]
f.close()
if command == 'x':
return False
stab = Stabilizer(driver)
count = 0
running_logger.debug(u'Try to handle pages after status change')
err_keywords = [u'Error', u'Invalid']
try:
while driver.current_activity == 'com.facebook.FacebookActivity'\
or driver.current_activity == 'com.facebook.LoginActivity':
count += 1
source = driver.page_source
# in case of continue appears
if 'Continue' in source:
running_logger.debug(u'Try to click Continue')
stab.find_elements_by_keyword(u'Continue', clickable_only=True,\
exact=False)[-1].click()
# give all possible permisson to the app
elif 'would like to' in source:
running_logger.debug(u'Try to offer permission by clicking OK')
stab.find_elements_by_keyword(u'OK', clickable_only=True, exact=True)[-1].click()
sleep(1)
assert count <= 10
running_logger.debug(u'Get out of facebook login webview')
except:
running_logger.exception("exception in rpConfirm:")
finally:
return True
def googleConfirm(driver):
result = findElementByName(driver, "Alice")
if result[0] == False:
result = findElementByName(driver, "Eve")
x = (result[1] + result[2]) / 2
y = (result[3] + result[4]) / 2
if result[0] == False:
print("Fails to find user Account button!")
writeResult(False)
return
result = tapSuccess(x, y, driver)
if result != 0:
print("Fails to click user Account button!")
writeResult(False)
return
sleep(3)
writeResult(True)
def typeUserInfo(Username,Password,driver): #input username and keyword when login in Facebook
s=driver.page_source
thelist = s.split('>')
flag = 0
for i in range(len(thelist)):
if 'EditText' in thelist[i] and flag==0:
Cls=thelist[i][thelist[i].find("class")+7:]
Cls=Cls[:Cls.find('"')]
Ins=thelist[i][thelist[i].find("instance")+10:]
Ins=Ins[:Ins.find('"')]
passEdit=driver.find_element_by_android_uiautomator('new UiSelector().className("'+Cls+'").instance('+Ins+')')
text=passEdit.get_attribute('name')
passEdit.click() #enter the edittext widgt
driver.press_keycode(123) #move to the end ot the text
for i in range(0,len(text)+5): #delete one by one in the text part
driver.press_keycode(67)
passEdit.send_keys(Username)
flag=1
elif 'EditText' in thelist[i] and flag==1:
Cls=thelist[i][thelist[i].find("class")+7:]
Cls=Cls[:Cls.find('"')]
Ins=thelist[i][thelist[i].find("instance")+10:]
Ins=Ins[:Ins.find('"')]
passEdit=driver.find_element_by_android_uiautomator('new UiSelector().className("'+Cls+'").instance('+Ins+')')
passEdit.click()
passEdit.send_keys(Password)
flag=2
break
return 0
def typeContent(content,driver): #input username and keyword when login in Facebook
e=driver.find_elements_by_class_name('android.widget.EditText')[0]
text=e.get_attribute('name')
e.click() #enter the edittext widgt
driver.press_keycode(123) #move to the end ot the text
for i in range(0,len(text)+5): #delete one by one in the text part
driver.press_keycode(67)
e.send_keys(content)
return 0
def facebookInput(driver, name, word):
try:
if u'请输入邮箱' in driver.page_source:
typeContent(name, driver)
sm = SmartMonkey(driver)
sm.tap_keyword(u'登录')
typeContent(word, driver)
e=driver.find_elements_by_class_name('android.widget.Button')[1]
e.click()
elif 'Log in' in driver.page_source:
typeUserInfo(name,word,driver)
e=driver.find_elements_by_class_name('android.widget.Button')[-1]
e.click()
else:
typeUserInfo(name,word,driver)
e=driver.find_elements_by_class_name('android.widget.Button')[0]
e.click()
except:
return False
return True
class Ssotest(unittest.TestCase):
def setUp(self):
data = json.load(open('config.json', 'r'))
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '6.0'
desired_caps['deviceName'] = 'emulator'
desired_caps['appPackage'] = packageName
desired_caps['appActivity'] = activityName
desired_caps['disableWindowAnimation'] = True
desired_caps['autoGrantPermissions'] = True
desired_caps['systemPort'] = systemPort
desired_caps['noReset'] = True
if data['snapshot']:
desired_caps['autoLaunch'] = False
if getUiautomatorConfig():
desired_caps['automationName'] = 'UiAutomator2'
url = 'http://localhost:' + str(portNum) + '/wd/hub'
#load snapshot
try:
if data['snapshot'] == "True":
self.emulator = AndroidEmulator(data["emulator"])
tags_in_use = [ x['tag'] for x in self.emulator.list_snapshot()]
if data["snapshot_tag"] not in tags_in_use:
raise EmulatorActionException('No snapshot with tag {}'.format(data["snapshot_tag"]))
if not self.emulator.load_snapshot(data["snapshot_tag"]):
raise EmulatorActionException('Fail to load snapshot {}'.format(data["snapshot_tag"]))
else:
self.emulator = None
self.driver = webdriver.Remote(url, desired_caps)
except:
running_logger.exception('Driver init error')
raise Exception('Driver init error')
#SmartMonkey(self.driver).skip_irrelevant()
#self.home_activity = self.driver.current_activity
def tearDown(self):
# end the session
try:
self.driver.quit()
except Exception:
pass
def test_rpConfirm(self):
data = json.load(open('config.json', 'r'))
if data['snapshot'] == "True":
if data['idp'] == 'sina':
if data['user'] == "Eve":
UIAction(self.driver, idp='sina', config_file='uiaction.json').idp_set_session('sina', data["config"]["user"]["eve"]["weibo"]["session_file"])
else:
raise Exception("The user specified is not implemented yet.")
elif data['idp'] == 'wechat':
if data['user'] == "Eve":
UIAction(self.driver, idp='wechat', config_file='uiaction.json').idp_set_session('wechat', data["config"]["user"]["eve"]["wechat"]["session_file"])
else:
raise Exception("The user specified is not implemented yet.")
#print 'enter snapshot handler'
if data['idp'] != 'fb' or data['snapshot'] == "False":
result = login(self.driver, package=packageName)
else:
result = True
if result == False:
self.driver.reset()
print 'reset'
result = login(self.driver, package=packageName)
if not result:
'''
with open('appiumError.txt','a+') as f:
f.write('fuzzing fails in ' + str(getLast()) + '\n')
removeLastTested()
'''
p = psutil.Process(os.getpid())
p.terminate()
return
if idpName == 'fb':
data = json.load(open('config.json', 'r'))
if data['user'] == 'Eve':
username = data["config"]["user"]["eve"]["facebook"]["name"]
password = data["config"]["user"]["eve"]["facebook"]["password"]
elif data['user'] == 'Alice':
username = data["config"]["user"]["alice"]["facebook"]["name"]
password = data["config"]["user"]["alice"]["facebook"]["password"]
else:
running_logger.error('The user specified is not implemented yet.')
writeResult(False)
return
if not facebookInput(self.driver, username, password):
result = login(self.driver, package=packageName)
if result == False:
self.driver.reset()
result = login(self.driver, package=packageName)
if not result:
'''
with open('appiumError.txt','a+') as f:
f.write('fuzzing fails in ' + str(getLast()) + '\n')
removeLastTested()
'''
p = psutil.Process(os.getpid())
p.terminate()
return
if not facebookInput(self.driver, username, password):
'''
with open('appiumError.txt','a+') as f:
f.write('fuzzing fails in ' + str(getLast()) + '\n')
removeLastTested()
'''
p = psutil.Process(os.getpid())
p.terminate()
return
counter = 0
while 'Continue' not in self.driver.page_source:
counter = counter + 1
if (u'登录' in self.driver.page_source) and counter < 3:
try:
e=self.driver.find_elements_by_class_name('android.widget.Button')[0]
e.click()
except:
pass
sleep(1)
if counter == 10:
writeResult(False)
return
if authorized == False:
sleep(2)
if idpName == 'sina' and ('OK' in self.driver.page_source or u'确定' in self.driver.page_source):
self.driver.tap([(500, 900)], 1)
sleep(10)
# sleep(5)
elif idpName == 'wechat':
if u'确认登录' in self.driver.page_source:
sm = SmartMonkey(self.driver)
sm.tap_keyword(u'确认登录')
sleep(10)
else:
sleep(8)
elif idpName == 'fb':
stab = Stabilizer(self.driver)
count = 0
running_logger.debug(u'Try to handle pages after status change')
err_keywords = [u'Error', u'Invalid']
try:
while self.driver.current_activity == 'com.facebook.FacebookActivity'\
or self.driver.current_activity == 'com.facebook.LoginActivity':
count += 1
source = self.driver.page_source
# in case of continue appears
if 'Continue' in source:
running_logger.debug(u'Try to click Continue')
stab.find_elements_by_keyword(u'Continue', clickable_only=True,\
exact=False)[-1].click()
# give all possible permisson to the app
elif 'would like to' in source:
running_logger.debug(u'Try to offer permission by clicking OK')
stab.find_elements_by_keyword(u'OK', clickable_only=True, exact=True)[-1].click()
sleep(1)
assert count <= 10
running_logger.debug(u'Get out of facebook login webview')
except:
running_logger.exception("exception in rpConfirm:")
if idpName != 'google':
#verifyCtrip(self.driver)
#self.driver.start_activity(packageName, self.home_activity)
#user_info(self.driver, package=packageName)
writeResult(True)
return
if idpName != 'google':
writeResult(True)
if authorized:
if idpName == 'sina':
#if Stepper.actionName == 'Initialize':
# if self.driver.current_activity != '.SSOAuthorizeActivity':
# g_result.error('Alarm: the app supports webviewer only!')
result = sinaConfirm(self.driver)
if result == False:
writeResult(False)
return
else:
#verifyCtrip(self.driver)
#self.driver.start_activity(packageName, self.home_activity)
#user_info(self.driver, package=packageName)
writeResult(True)
return
elif idpName == 'wechat':
result = wechatConfirm(self.driver)
if result == False:
writeResult(False)
return
else:
#user_info(self.driver, package=packageName)
writeResult(True)
return
elif idpName == 'fb':
result = fbConfirm(self.driver)
if result == False:
writeResult(False)
return
else:
#user_info(self.driver, package=packageName)
writeResult(True)
return
if __name__ == '__main__':
if (len(sys.argv) == 4):
idpName = str(sys.argv[1])
authorized = str(sys.argv[2])
systemPort = int(sys.argv[3])
if authorized == 'True':
authorized = True
else:
authorized = False
if (len(sys.argv) == 6):
idpName = str(sys.argv[1])
authorized = str(sys.argv[2])
packageName = str(sys.argv[3])
activityName = str(sys.argv[4])
systemPort = int(sys.argv[5])
if authorized == 'True':
authorized = True
else:
authorized = False
if (len(sys.argv) == 7):
idpName = str(sys.argv[1])
authorized = str(sys.argv[2])
packageName = str(sys.argv[3])
activityName = str(sys.argv[4])
portNum = int(sys.argv[5])
systemPort = int(sys.argv[6])
if authorized == 'True':
authorized = True
else:
authorized = False
#print(getLockFileLength())
suite = unittest.TestLoader().loadTestsFromTestCase(Ssotest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import base64
def bitfield(n):
'''
Obtains the binary array from the number
'''
return [1 if digit=='1' else 0 for digit in bin(n)[2:]]
def shifting(bitlist):
'''
Obtain the number from the binary array
'''
out = 0
for bit in bitlist:
out = (out << 1) | bit
return out
def true_in_list(l):
return [i for i,v in enumerate(l) if v]
def pad_left_list(l, size, pad_value):
for n in range(len(l), size):
l = [pad_value] + l
return l
def pad_right_list(l, size, pad_value):
for n in range(len(l), size):
l = l + [pad_value]
return l
# toggleBit() returns an integer with the bit at 'offset' inverted, 0 -> 1 and 1 -> 0.
def toggleBit(int_type, offset):
mask = 1 << offset
return(int_type ^ mask)
# setBit() returns an integer with the bit at 'offset' set to 1.
def setBit(int_type, offset):
mask = 1 << offset
return(int_type | mask)
# clearBit() returns an integer with the bit at 'offset' cleared.
def clearBit(int_type, offset):
mask = ~(1 << offset)
return(int_type & mask)
|
import textfsm
import colorama
template_file = 'week4_exercise1.template'
template = open(template_file)
with open('week4_exercise1.txt') as f:
raw_text_data = f.read()
# The argumentn 'template' is a file handle and 'raw_text_data' is a string.
re_table = textfsm.TextFSM(template)
data = re_table.ParseText(raw_text_data)
print(data)
|
from setuptools import setup
setup(
name='scrapy-html-storage',
version='0.3.0',
description='Scrapy downloader middleware that stores response HTML files to disk.',
long_description=open('README.rst').read(),
url='https://github.com/povilasb/scrapy-html-storage',
author='Povilas Balciunas',
author_email='[email protected]',
license='MIT',
packages=['scrapy_html_storage'],
zip_safe=False
)
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.compute.base import NodeState, NodeLocation
from libcloud.common.types import LibcloudError, InvalidCredsError
from libcloud.common.gogrid import GoGridIpAddress
from libcloud.compute.drivers.gogrid import GoGridNodeDriver
from libcloud.compute.base import Node, NodeImage, NodeSize
from libcloud.test import MockHttp # pylint: disable-msg=E0611
from libcloud.test.compute import TestCaseMixin # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611
class GoGridTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
GoGridNodeDriver.connectionCls.conn_class = GoGridMockHttp
GoGridMockHttp.type = None
self.driver = GoGridNodeDriver("foo", "bar")
def _get_test_512Mb_node_size(self):
return NodeSize(
id="512Mb",
name=None,
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self.driver,
)
def test_create_node(self):
image = NodeImage(1531, None, self.driver)
node = self.driver.create_node(
name="test1", image=image, size=self._get_test_512Mb_node_size()
)
self.assertEqual(node.name, "test1")
self.assertTrue(node.id is not None)
self.assertEqual(node.extra["password"], "bebebe")
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
self.assertEqual(node.id, "90967")
self.assertEqual(node.extra["password"], "bebebe")
self.assertEqual(node.extra["description"], "test server")
def test_reboot_node(self):
node = Node(90967, None, None, None, None, self.driver)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_reboot_node_not_successful(self):
GoGridMockHttp.type = "FAIL"
node = Node(90967, None, None, None, None, self.driver)
try:
self.driver.reboot_node(node)
except Exception:
pass
else:
self.fail("Exception was not thrown")
def test_destroy_node(self):
node = Node(90967, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(len(images), 4)
self.assertEqual(image.name, "CentOS 5.3 (32-bit) w/ None")
self.assertEqual(image.id, "1531")
location = NodeLocation(
id="gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img",
name="test location",
country="Slovenia",
driver=self.driver,
)
images = self.driver.list_images(location=location)
image = images[0]
self.assertEqual(len(images), 4)
self.assertEqual(image.name, "CentOS 5.3 (32-bit) w/ None")
self.assertEqual(image.id, "1531")
def test_malformed_reply(self):
GoGridMockHttp.type = "FAIL"
try:
self.driver.list_images()
except LibcloudError as e:
self.assertTrue(isinstance(e, LibcloudError))
else:
self.fail("test should have thrown")
def test_invalid_creds(self):
GoGridMockHttp.type = "FAIL"
try:
self.driver.list_nodes()
except InvalidCredsError as e:
self.assertTrue(e.driver is not None)
self.assertEqual(e.driver.name, self.driver.name)
else:
self.fail("test should have thrown")
def test_node_creation_without_free_public_ips(self):
GoGridMockHttp.type = "NOPUBIPS"
try:
image = NodeImage(1531, None, self.driver)
self.driver.create_node(
name="test1", image=image, size=self._get_test_512Mb_node_size()
)
except LibcloudError as e:
self.assertTrue(isinstance(e, LibcloudError))
self.assertTrue(e.driver is not None)
self.assertEqual(e.driver.name, self.driver.name)
else:
self.fail("test should have thrown")
def test_list_locations(self):
locations = self.driver.list_locations()
location_names = [location.name for location in locations]
self.assertEqual(len(locations), 2)
for i in 0, 1:
self.assertTrue(isinstance(locations[i], NodeLocation))
self.assertTrue("US-West-1" in location_names)
self.assertTrue("US-East-1" in location_names)
def test_ex_save_image(self):
node = self.driver.list_nodes()[0]
image = self.driver.ex_save_image(node, "testimage")
self.assertEqual(image.name, "testimage")
def test_ex_edit_image(self):
image = self.driver.list_images()[0]
ret = self.driver.ex_edit_image(
image=image, public=False, ex_description="test", name="testname"
)
self.assertTrue(isinstance(ret, NodeImage))
def test_ex_edit_node(self):
node = Node(
id=90967,
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_edit_node(node=node, size=self._get_test_512Mb_node_size())
self.assertTrue(isinstance(ret, Node))
def test_ex_list_ips(self):
ips = self.driver.ex_list_ips()
expected_ips = {
"192.168.75.66": GoGridIpAddress(
id="5348099",
ip="192.168.75.66",
public=True,
state="Unassigned",
subnet="192.168.75.64/255.255.255.240",
),
"192.168.75.67": GoGridIpAddress(
id="5348100",
ip="192.168.75.67",
public=True,
state="Assigned",
subnet="192.168.75.64/255.255.255.240",
),
"192.168.75.68": GoGridIpAddress(
id="5348101",
ip="192.168.75.68",
public=False,
state="Unassigned",
subnet="192.168.75.64/255.255.255.240",
),
}
self.assertEqual(len(expected_ips), 3)
for ip in ips:
self.assertTrue(ip.ip in expected_ips)
self.assertEqual(ip.public, expected_ips[ip.ip].public)
self.assertEqual(ip.state, expected_ips[ip.ip].state)
self.assertEqual(ip.subnet, expected_ips[ip.ip].subnet)
del expected_ips[ip.ip]
self.assertEqual(len(expected_ips), 0)
def test_get_state_invalid(self):
state = self.driver._get_state("invalid")
self.assertEqual(state, NodeState.UNKNOWN)
class GoGridMockHttp(MockHttp):
fixtures = ComputeFileFixtures("gogrid")
def _api_grid_image_list(self, method, url, body, headers):
body = self.fixtures.load("image_list.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_grid_image_list_FAIL(self, method, url, body, headers):
body = "<h3>some non valid json here</h3>"
return (
httplib.SERVICE_UNAVAILABLE,
body,
{},
httplib.responses[httplib.SERVICE_UNAVAILABLE],
)
def _api_grid_server_list(self, method, url, body, headers):
body = self.fixtures.load("server_list.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
_api_grid_server_list_NOPUBIPS = _api_grid_server_list
def _api_grid_server_list_FAIL(self, method, url, body, headers):
return (httplib.FORBIDDEN, "123", {}, httplib.responses[httplib.FORBIDDEN])
def _api_grid_ip_list(self, method, url, body, headers):
body = self.fixtures.load("ip_list.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_grid_ip_list_NOPUBIPS(self, method, url, body, headers):
body = self.fixtures.load("ip_list_empty.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_grid_server_power(self, method, url, body, headers):
body = self.fixtures.load("server_power.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_grid_server_power_FAIL(self, method, url, body, headers):
body = self.fixtures.load("server_power_fail.json")
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
def _api_grid_server_add(self, method, url, body, headers):
body = self.fixtures.load("server_add.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
_api_grid_server_add_NOPUBIPS = _api_grid_server_add
def _api_grid_server_delete(self, method, url, body, headers):
body = self.fixtures.load("server_delete.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_grid_server_edit(self, method, url, body, headers):
body = self.fixtures.load("server_edit.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_support_password_list(self, method, url, body, headers):
body = self.fixtures.load("password_list.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
_api_support_password_list_NOPUBIPS = _api_support_password_list
def _api_grid_image_save(self, method, url, body, headers):
body = self.fixtures.load("image_save.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_grid_image_edit(self, method, url, body, headers):
# edit method is quite similar to save method from the response
# perspective
body = self.fixtures.load("image_save.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_common_lookup_list(self, method, url, body, headers):
_valid_lookups = ("ip.datacenter",)
lookup = parse_qs(urlparse.urlparse(url).query)["lookup"][0]
if lookup in _valid_lookups:
fixture_path = "lookup_list_%s.json" % (lookup.replace(".", "_"))
else:
raise NotImplementedError
body = self.fixtures.load(fixture_path)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == "__main__":
sys.exit(unittest.main())
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Basic test classes."""
import os
import unittest
import traceback
from tornado import ioloop
from aiida.common.exceptions import ConfigurationError, TestsNotAllowedError, InternalError
from aiida.common.lang import classproperty
from aiida.manage import configuration
from aiida.manage.manager import get_manager, reset_manager
TEST_KEYWORD = 'test_'
def check_if_tests_can_run():
"""Verify that the currently loaded profile is a test profile, otherwise raise `TestsNotAllowedError`."""
profile = configuration.PROFILE
if not profile.is_test_profile:
raise TestsNotAllowedError('currently loaded profile {} is not a valid test profile'.format(profile.name))
class AiidaTestCase(unittest.TestCase):
"""This is the base class for AiiDA tests, independent of the backend.
Internally it loads the AiidaTestImplementation subclass according to the current backend."""
_class_was_setup = False
__backend_instance = None
backend = None # type: aiida.orm.implementation.Backend
@classmethod
def get_backend_class(cls):
"""Get backend class."""
from aiida.backends.testimplbase import AiidaTestImplementation
from aiida.backends import BACKEND_SQLA, BACKEND_DJANGO
from aiida.manage.configuration import PROFILE
# Freeze the __impl_class after the first run
if not hasattr(cls, '__impl_class'):
if PROFILE.database_backend == BACKEND_SQLA:
from aiida.backends.sqlalchemy.testbase import SqlAlchemyTests
cls.__impl_class = SqlAlchemyTests
elif PROFILE.database_backend == BACKEND_DJANGO:
from aiida.backends.djsite.db.testbase import DjangoTests
cls.__impl_class = DjangoTests
else:
raise ConfigurationError('Unknown backend type')
# Check that it is of the right class
if not issubclass(cls.__impl_class, AiidaTestImplementation):
raise InternalError(
'The AiiDA test implementation is not of type '
'{}, that is not a subclass of AiidaTestImplementation'.format(cls.__impl_class.__name__)
)
return cls.__impl_class
@classmethod
def setUpClass(cls, *args, **kwargs): # pylint: disable=arguments-differ
# Note: this will raise an exception, that will be seen as a test
# failure. To be safe, you should do the same check also in the tearDownClass
# to avoid that it is run
check_if_tests_can_run()
# Force the loading of the backend which will load the required database environment
get_manager().get_backend()
cls.__backend_instance = cls.get_backend_class()()
cls.__backend_instance.setUpClass_method(*args, **kwargs)
cls.backend = cls.__backend_instance.backend
cls._class_was_setup = True
cls.clean_db()
cls.insert_data()
def setUp(self):
# Install a new IOLoop so that any messing up of the state of the loop is not propagated
# to subsequent tests.
# This call should come before the backend instance setup call just in case it uses the loop
ioloop.IOLoop().make_current()
def tearDown(self):
# Clean up the loop we created in set up.
# Call this after the instance tear down just in case it uses the loop
reset_manager()
loop = ioloop.IOLoop.current()
if not loop._closing: # pylint: disable=protected-access,no-member
loop.close()
def reset_database(self):
"""Reset the database to the default state deleting any content currently stored"""
from aiida.orm import autogroup
self.clean_db()
if autogroup.CURRENT_AUTOGROUP is not None:
autogroup.CURRENT_AUTOGROUP.clear_group_cache()
self.insert_data()
@classmethod
def insert_data(cls):
"""
This method setups the database (by creating a default user) and
inserts default data into the database (which is for the moment a
default computer).
"""
from aiida.orm import User
cls.create_user()
User.objects.reset()
cls.create_computer()
@classmethod
def create_user(cls):
cls.__backend_instance.create_user()
@classmethod
def create_computer(cls):
cls.__backend_instance.create_computer()
@classmethod
def clean_db(cls):
"""Clean up database and reset caches.
Resets AiiDA manager cache, which could otherwise be left in an inconsistent state when cleaning the database.
"""
from aiida.common.exceptions import InvalidOperation
# Note: this will raise an exception, that will be seen as a test
# failure. To be safe, you should do the same check also in the tearDownClass
# to avoid that it is run
check_if_tests_can_run()
if not cls._class_was_setup:
raise InvalidOperation('You cannot call clean_db before running the setUpClass')
cls.__backend_instance.clean_db()
reset_manager()
@classmethod
def clean_repository(cls):
"""
Cleans up file repository.
"""
from aiida.manage.configuration import get_profile
from aiida.common.exceptions import InvalidOperation
import shutil
dirpath_repository = get_profile().repository_path
base_repo_path = os.path.basename(os.path.normpath(dirpath_repository))
if TEST_KEYWORD not in base_repo_path:
raise InvalidOperation(
'Warning: The repository folder {} does not '
'seem to belong to a test profile and will therefore not be deleted.\n'
'Full repository path: '
'{}'.format(base_repo_path, dirpath_repository)
)
# Clean the test repository
shutil.rmtree(dirpath_repository, ignore_errors=True)
os.makedirs(dirpath_repository)
@classproperty
def computer(cls): # pylint: disable=no-self-argument
"""Get the default computer for this test
:return: the test computer
:rtype: :class:`aiida.orm.Computer`"""
return cls.__backend_instance.get_computer()
@classproperty
def user_email(cls): # pylint: disable=no-self-argument
return cls.__backend_instance.get_user_email()
@classmethod
def tearDownClass(cls, *args, **kwargs): # pylint: disable=arguments-differ
# Double check for double security to avoid to run the tearDown
# if this is not a test profile
from aiida.orm import autogroup
check_if_tests_can_run()
if autogroup.CURRENT_AUTOGROUP is not None:
autogroup.CURRENT_AUTOGROUP.clear_group_cache()
cls.clean_db()
cls.clean_repository()
cls.__backend_instance.tearDownClass_method(*args, **kwargs)
def assertClickSuccess(self, cli_result): # pylint: disable=invalid-name
self.assertEqual(cli_result.exit_code, 0, cli_result.output)
self.assertClickResultNoException(cli_result)
def assertClickResultNoException(self, cli_result): # pylint: disable=invalid-name
self.assertIsNone(cli_result.exception, ''.join(traceback.format_exception(*cli_result.exc_info)))
class AiidaPostgresTestCase(AiidaTestCase):
"""Setup postgres tests."""
@classmethod
def setUpClass(cls, *args, **kwargs):
"""Setup the PGTest postgres test cluster."""
from pgtest.pgtest import PGTest
cls.pg_test = PGTest()
super().setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
"""Close the PGTest postgres test cluster."""
super().tearDownClass(*args, **kwargs)
cls.pg_test.close()
|
"""
================================================================================================
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
Script which collects data from external SdamGIA Api (https://github.com/anijackich/sdamgia-api)
and executes ONLY MANUALLY IF NEEDED BUT NOT RECOMMENDED!!!!!!!!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
DO NOT UNCOMMENT SCRIPT AND EXECUTE!
================================================================================================
"""
#
#
# # Imports.
# import json
#
# from sdamgia import SdamGIA
#
# # Instance of class sdamgia (which regulates processes with external api).
# sdamgia_instance = SdamGIA()
#
# # Paths to files for different subjects (rus and math are currently here).
# file_paths = {
# 'math_lib': {
# 'math_data': '../server_api/subject_data/math_lib/math_final_tasks_bank.txt',
# 'math_statistics': '../server_api/subject_data/math_lib/math_tasks_statistics.txt'
# },
# 'rus_lib': {
# 'rus_data': '../server_api/subject_data/rus_lib/rus_final_tasks_bank.txt',
# 'rus_statistics': '../server_api/subject_data/rus_lib/rus_tasks_statistics.txt'
# }
# }
#
#
# def get_task_catalog_from_sdamgia_api(subject: str, file_path: str) -> None:
# """
# Gets data from external api and writes it to json file.
#
# :param subject: Student's subject: 'math' / 'rus'.
# :param file_path: Path to file which we will be used for writing data.
# :return: None.
# """
# data_json = []
# topic_names = []
# with open(file_path, 'w') as file:
# for topic in sdamgia_instance.get_catalog(subject):
# # Get name of theme and tasks for it.
# topic_name = topic.get('topic_name').replace('', '').replace('Задания Л', 'Другое').strip()
# topic_tasks = sdamgia_instance.search(subject, topic_name)
#
# # Avoid tasks with empty response from external api.
# if len(topic_tasks) == 0 or topic_name in topic_names:
# print(f"[INFO] Topic: {topic_name} ---> Result: no tasks!")
# continue
#
# # Add data to final list and print success result to console for comfort.
# data = {
# topic_name: topic_tasks
# }
#
# topic_names.append(topic_name)
# data_json.append(data)
# print(f"[INFO] Topic: {topic_name} ---> Result: success!")
#
# json.dump(data_json, file, indent=4, ensure_ascii=False)
#
# return None
#
#
# def get_tasks_statistics_from_data_file(file_read_path: str, file_write_path: str) -> None:
# """
# Collects statistics about current subject (informs theme -> amount of tasks in tasks pull).
#
# :param file_read_path: Path to file which we will be used for reading data.
# :param file_write_path: Path to file which we will be used for writing data.
# """
# with open(file_read_path, 'r') as read_file, open(file_write_path, 'w') as write_file:
# current_data = json.load(read_file)
#
# final_data = []
# for item in current_data:
# for theme, tasks_list in item.items():
# final_data.append(f"{theme}: {len(tasks_list)}\n")
#
# write_file.writelines(final_data)
#
# return None
#
#
# def main():
# """
# Executes all funcs in script and collects data from external SdamGIA Api.
# """
# for subject in file_paths.keys():
# file_read_path = file_paths.get(subject).get(f'{subject}_data')
# file_write_path = file_paths.get(subject).get(f'{subject}_statistics')
# get_task_catalog_from_sdamgia_api(subject, file_read_path)
# get_tasks_statistics_from_data_file(file_read_path, file_write_path)
#
#
# if __name__ == '__main__':
# main()
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RgbToBgr op in DE
"""
import numpy as np
from numpy.testing import assert_allclose
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.py_transforms_util as util
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def generate_numpy_random_rgb(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 255.
def test_rgb_bgr_hwc_py():
# Eager
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
bgr_np_pred = util.rgb_to_bgrs(rgb_np, True)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_hwc_c():
# Eager
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
rgb2bgr_op = vision.RgbToBgr()
bgr_np_pred = rgb2bgr_op(rgb_np)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_chw_py():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((3, 8, 8))
rgb_np_pred = util.rgb_to_bgrs(rgb_np, False)
rgb_np_gt = rgb_np[::-1, :, :]
assert rgb_np_pred.shape == rgb_np.shape
assert_allclose(rgb_np_pred.flatten(),
rgb_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_pipeline_py():
# First dataset
transforms1 = [py_vision.Decode(), py_vision.Resize([64, 64]), py_vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
py_vision.Decode(),
py_vision.Resize([64, 64]),
py_vision.ToTensor(),
py_vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[::-1, :, :]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
def test_rgb_bgr_pipeline_c():
# First dataset
transforms1 = [
vision.Decode(),
vision.Resize([64, 64])
]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
vision.Decode(),
vision.Resize([64, 64]),
vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[:, :, ::-1]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
if __name__ == "__main__":
test_rgb_bgr_hwc_py()
test_rgb_bgr_hwc_c()
test_rgb_bgr_chw_py()
test_rgb_bgr_pipeline_py()
test_rgb_bgr_pipeline_c()
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the region map endpoint."""
from unittest.mock import patch
from django.test import TestCase
from django.test.utils import override_settings
TEST_HTML = "./koku/masu/test/data/test_region_page.html"
class MockResponse:
"""A fake requests.Response object."""
status_code = None
text = None
def __init__(self, data, status):
"""Initialize a mock response."""
self.status_code = status
self.text = str(data)
@override_settings(ROOT_URLCONF="masu.urls")
class RegionMapAPIViewTest(TestCase):
"""Test Cases for the Region Map API."""
@classmethod
def setUpClass(cls):
"""Set up test class with shared objects."""
with open(TEST_HTML) as page:
cls.test_data = page.read()
@patch("koku.middleware.MASU", return_value=True)
@patch("masu.database.reporting_common_db_accessor.ReportingCommonDBAccessor", autospec=True)
@patch("masu.util.aws.region_map.requests.get")
def skip_test_update_region_map(self, mock_response, mock_accessor, _):
"""Test the region map endpoint."""
mock_response.return_value = MockResponse(self.test_data, 200)
response = self.client.get("/api/v1/regionmap/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_data(as_text=True), "true\n")
|
import random
import time
#malejące
ilości = []
malejące=[[]]
for i in range(0,10):
k=int(input())
ilości.append(k)
print(ilości)
for i in range(len(ilości)):
for j in range(ilości[i]):
n=j
malejące[-1].append(n)
malejące[-1].sort(reverse=True)
malejące.append([])
malejące.pop(-1)
#rosnące
rosnące=[[]]
for i in range(len(ilości)):
for j in range(ilości[i]):
n=j
rosnące[-1].append(n)
rosnące.append([])
rosnące.pop(-1)
#los
losowe = [[]]
for i in range(len(ilości)):
for j in range(ilości[i]):
n = random.randint(1, 100000)
losowe[-1].append(n)
losowe.append([])
losowe.pop(-1)
#stałe
stałe=[[]]
o=random.randint(1,100000)
for i in range(len(ilości)):
for j in range(ilości[i]):
stałe[-1].append(o)
stałe.append([])
stałe.pop(-1)
#Akszt
A=[[]]
x=1
for i in range(len(ilości)):
for j in range(ilości[i]):
if j<ilości[i]/2:
A[-1].append(j)
else:
A[-1].append(ilości[i]-x)
x=x+1
x=1
A.append([])
A.pop(-1)
#insertion
def insetrsort(list):
for i in range(1,len(list)):
j=i-1
while list[j]>list[j+1] and j>=0:
a=list[j]
list[j]=list[j+1]
list[j+1]=a
j=j-1
#selection
def selectionSort(lista):
for i in range(len(lista)):
for j in range(i+1,len(lista)):
if lista[i]>lista[j]:
a=lista[i]
lista[i]=lista[j]
lista[j]=a
#shellsort
def shell_sort(list):
size = len(list)
gap=size//2
while gap>0:
for i in range(gap, size):
zaczep=list[i]
j=i
while j>=gap and list[j-gap]> zaczep:
list[j]= list[j-gap]
j-=gap
list[j]= zaczep
gap=gap//2
M=[losowe,malejące,rosnące,stałe,A]
timesins=[[]]
for j in range(len(M)):
for i in range(0,10):
start_all = time.time()
start = time.time()
insetrsort(M[j][i])
end = time.time()
end_all = time.time()
timesins[-1].append(end - start)
timesins.append([])
timesins.pop(-1)
timessel=[[]]
for j in range(len(M)):
for i in range(0,10):
start_all = time.time()
start = time.time()
selectionSort(M[j][i])
end = time.time()
end_all = time.time()
timessel[-1].append(end - start)
timessel.append([])
timessel.pop(-1)
timesshell=[[]]
for j in range(len(M)):
for i in range(0,10):
start_all = time.time()
start = time.time()
shell_sort(M[j][i])
end = time.time()
end_all = time.time()
timesshell[-1].append(end - start)
timesshell.append([])
timesshell.pop(-1)
print(timesins)
print(timessel)
print(timesshell)
from openpyxl import Workbook
wb = Workbook()
sheet= wb.active
sheet1=wb.active
for i in range(len(timessel)):
sheet.append(timessel[i])
for i in range(len(timesins)):
sheet1.append(timesins[i])
for i in range(len(timesshell)):
sheet.append(timesshell[i])
wb.save("danedowykresó.xlsx") |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from file_system import FileNotFoundError
from in_memory_object_store import InMemoryObjectStore
from reference_resolver import ReferenceResolver
class FakeAPIDataSource(object):
def __init__(self, json_data):
self._json = json_data
def get(self, key):
if key not in self._json:
raise FileNotFoundError(key)
return self._json[key]
def GetAllNames(self):
return self._json.keys()
class APIDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0], 'test_data', 'test_json')
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def testGetLink(self):
data_source = FakeAPIDataSource(
json.loads(self._ReadLocalFile('fake_data_source.json')))
resolver = ReferenceResolver(data_source,
data_source,
InMemoryObjectStore(''))
self.assertEqual({
'href': 'foo.html#type-foo_t1',
'text': 'foo.foo_t1',
'name': 'foo_t1'
}, resolver.GetLink('foo.foo_t1', 'baz'))
self.assertEqual({
'href': 'baz.html#event-baz_e1',
'text': 'baz_e1',
'name': 'baz_e1'
}, resolver.GetLink('baz.baz_e1', 'baz'))
self.assertEqual({
'href': 'baz.html#event-baz_e1',
'text': 'baz_e1',
'name': 'baz_e1'
}, resolver.GetLink('baz_e1', 'baz'))
self.assertEqual({
'href': 'foo.html#method-foo_f1',
'text': 'foo.foo_f1',
'name': 'foo_f1'
}, resolver.GetLink('foo.foo_f1', 'baz'))
self.assertEqual({
'href': 'foo.html#property-foo_p3',
'text': 'foo.foo_p3',
'name': 'foo_p3'
}, resolver.GetLink('foo.foo_p3', 'baz'))
self.assertEqual({
'href': 'bar.bon.html#type-bar_bon_t3',
'text': 'bar.bon.bar_bon_t3',
'name': 'bar_bon_t3'
}, resolver.GetLink('bar.bon.bar_bon_t3', 'baz'))
self.assertEqual({
'href': 'bar.bon.html#property-bar_bon_p3',
'text': 'bar_bon_p3',
'name': 'bar_bon_p3'
}, resolver.GetLink('bar_bon_p3', 'bar.bon'))
self.assertEqual({
'href': 'bar.bon.html#property-bar_bon_p3',
'text': 'bar_bon_p3',
'name': 'bar_bon_p3'
}, resolver.GetLink('bar.bon.bar_bon_p3', 'bar.bon'))
self.assertEqual({
'href': 'bar.html#event-bar_e2',
'text': 'bar_e2',
'name': 'bar_e2'
}, resolver.GetLink('bar.bar_e2', 'bar'))
self.assertEqual({
'href': 'bar.html#type-bon',
'text': 'bon',
'name': 'bon'
}, resolver.GetLink('bar.bon', 'bar'))
self.assertEqual({
'href': 'foo.html#event-foo_t3-foo_t3_e1',
'text': 'foo_t3.foo_t3_e1',
'name': 'foo_t3_e1'
}, resolver.GetLink('foo_t3.foo_t3_e1', 'foo'))
self.assertEqual({
'href': 'foo.html#event-foo_t3-foo_t3_e1',
'text': 'foo_t3.foo_t3_e1',
'name': 'foo_t3_e1'
}, resolver.GetLink('foo.foo_t3.foo_t3_e1', 'foo'))
self.assertEqual({
'href': 'foo.html#event-foo_t3-foo_t3_e1',
'text': 'foo_t3.foo_t3_e1',
'name': 'foo_t3_e1'
}, resolver.GetLink('foo.foo_p1.foo_t3_e1', 'foo'))
self.assertEqual({
'href': 'bar.html#property-bar_t1-bar_t1_p1',
'text': 'bar.bar_t1.bar_t1_p1',
'name': 'bar_t1_p1'
}, resolver.GetLink('bar.bar_p3.bar_t1_p1', 'foo'))
self.assertEqual({
'href': 'bar.html#property-bar_t1-bar_t1_p1',
'text': 'bar_t1.bar_t1_p1',
'name': 'bar_t1_p1'
}, resolver.GetLink('bar_p3.bar_t1_p1', 'bar'))
self.assertEqual(
None,
resolver.GetLink('bar.bar_p3.bar_t2_p1', 'bar'))
self.assertEqual(
None,
resolver.GetLink('bar.bon.bar_e3', 'bar'))
self.assertEqual(
None,
resolver.GetLink('bar_p3', 'baz.bon'))
self.assertEqual(
None,
resolver.GetLink('falafel.faf', 'a'))
self.assertEqual(
None,
resolver.GetLink('bar_p3', 'foo'))
self.assertEqual(
'Hello <a href="bar.bon.html#property-bar_bon_p3">bar_bon_p3</a>, '
'<a href="bar.bon.html#property-bar_bon_p3">Bon Bon</a>, '
'<a href="bar.bon.html#property-bar_bon_p3">bar_bon_p3</a>',
resolver.ResolveAllLinks(
'Hello $ref:bar_bon_p3, $ref:[bar_bon_p3 Bon Bon], $ref:bar_bon_p3',
'bar.bon'))
self.assertEqual(
'I like <a href="bar.html#property-bar_t1-bar_t1_p1">food</a>.',
resolver.ResolveAllLinks('I like $ref:[bar.bar_p3.bar_t1_p1 food].',
'foo'))
self.assertEqual(
'Ref <a href="bar.html#type-bon">bon</a>',
resolver.ResolveAllLinks('Ref $ref:[bar.bon]', 'bar'))
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
import collections
import itertools
import logging
from typing import Dict, Iterator, List, Tuple
import cached_property
import pulp
from haoda import ir, util
_logger = logging.getLogger().getChild(__name__)
class SuperSourceNode(ir.Module):
"""A node representing the super source in the dataflow graph.
A super source doesn't have parent nodes.
Attributes:
fwd_nodes (Dict[Tuple[str, int], ForwardNode]): Dict mapping tuples of
(tensor name, offset) to nodes.
cpt_nodes (Dict[Tuple[str, int], ComputeNode]): Dict mapping tuples of
(stage_name, pe_id) to nodes.
super_sink (SuperSinkNode): The super sink node of this DAG.
"""
def __init__(
self,
fwd_nodes: Dict[Tuple[str, int], 'ForwardNode'],
cpt_nodes: Dict[Tuple[str, int], 'ComputeNode'],
super_sink: 'SuperSinkNode',
):
super().__init__()
self.fwd_nodes = fwd_nodes
self.cpt_nodes = cpt_nodes
self.super_sink = super_sink
@property
def graphviz(self) -> str:
output = 'digraph {\n'
for src, dst in self.dfs_edge_gen():
output += f' "{repr(src)}" -> "{repr(dst)}"\n'
output += '}\n'
return output
def verify_mode_depths(self) -> None:
latency_table = {}
lp_problem = pulp.LpProblem('verify_fifo_depths', pulp.LpMinimize)
for node in self.tpo_valid_node_gen():
if self in node.parents:
latency_table[node] = 0
else:
latency_table[node] = pulp.LpVariable(
name=f'latency_{node.name}',
lowBound=0,
cat='Integer',
)
lp_problem.extend(
parent.get_latency(node) +
latency_table[parent] <= latency_table[node]
for parent in node.parents)
lp_problem.extend(
parent.get_latency(node) + latency_table[parent] +
parent.fifo(node).depth >= latency_table[node]
for parent in node.parents)
lp_status = lp_problem.solve()
if lp_status == pulp.LpStatusOptimal:
_logger.debug('II=1 check: PASS')
elif lp_status == pulp.LpStatusInfeasible:
_logger.warn('II=1 check: FAIL')
else:
lp_status_str = pulp.LpStatus[lp_status]
_logger.error('ILP error: %s\n%s', lp_status_str, lp_problem)
raise util.InternalError('unexpected ILP status: %s' % lp_status_str)
for node in self.tpo_valid_node_gen():
if self in node.parents:
min_capacity = 0
else:
min_capacity = min(
parent.get_latency(node) + int(pulp.value(latency_table[parent])) +
parent.fifo(node).depth for parent in node.parents)
debug_enabled = _logger.isEnabledFor(logging.DEBUG)
check_failed = int(pulp.value(latency_table[node])) > min_capacity
if debug_enabled or check_failed:
(_logger.debug if debug_enabled else _logger.warn)(
'II=1 check %s: %s: latency %d %s min capacity %d',
'✖' if check_failed else '✔',
repr(node),
int(pulp.value(latency_table[node])),
'>' if check_failed else '<=',
min_capacity,
)
def update_module_depths(
self,
depths: Dict[int, int],
) -> None:
"""Update module pipeline depths and FIFO depths.
The FIFO depths are determined by solving an ILP problem:
+ Optimization objective: minimize the sum (weighted by FIFO width) of all
FIFO depths.
+ Constraints: the whole DAG can be fully pipelined without artificial
stalls.
For every non-overlapping path between a pair of nodes,
the latency of each token is the maximum minimum latency among all paths.
To enable full pipelining,
this latency must not exceed the maximum latency of any path.
The minimum latency of each path is the sum of the FIFO write latency in
each module and the number of edges (FIFOs),
since the minimum latency of a FIFO is 1.
The maximum latency of each path is the sum of the FIFO write latency in
each module and the total depth of FIFOs.
Args:
depths (Dict[int, int]): Dict mapping module ids to pipeline depths.
"""
# update module pipeline depths
for src_node, dst_node in self.bfs_valid_edge_gen():
module_id = self.module_table[src_node][1]
depth = depths.get(module_id)
if depth is not None:
fifo = src_node.fifo(dst_node)
if fifo.write_lat != depth:
_logger.debug('%s write latency changed %s -> %d', fifo,
fifo.write_lat, depth)
fifo.write_lat = depth
# set up ILP problem, variables, and objective
lp_problem = pulp.LpProblem('optimal_fifo_depths', pulp.LpMinimize)
lp_vars = {}
for src_node, dst_node in self.bfs_valid_edge_gen():
lp_vars[(src_node, dst_node)] = pulp.LpVariable(
name=f'depth_{src_node.fifo(dst_node).c_expr}',
lowBound=0,
cat='Integer',
)
lp_problem += sum(
x.fifo(y).haoda_type.width_in_bits * v for (x, y), v in lp_vars.items())
# add ILP constraints
latency_table = {
x: pulp.LpVariable(name=f'latency_{x.name}', lowBound=0, cat='Integer')
for x in self.tpo_valid_node_gen()
}
for node in self.tpo_valid_node_gen():
if self in node.parents:
latency_table[node] = 0
else:
lp_problem.extend(
parent.get_latency(node) +
latency_table[parent] <= latency_table[node]
for parent in node.parents)
lp_problem.extend(
parent.get_latency(node) + latency_table[parent] +
lp_vars[(parent, node)] >= latency_table[node]
for parent in node.parents)
# solve ILP
lp_status = lp_problem.solve()
if lp_status != pulp.LpStatusOptimal:
lp_status_str = pulp.LpStatus[lp_status]
_logger.error('ILP error: %s\n%s', lp_status_str, lp_problem)
raise util.InternalError('unexpected ILP status: %s' % lp_status_str)
# update FIFO depths
for (src_node, dst_node), lp_var in lp_vars.items():
depth = int(pulp.value(lp_var))
fifo = src_node.fifo(dst_node)
if fifo.depth != depth:
_logger.debug('%s * depth %d -> %d', fifo, fifo.depth, depth)
fifo.depth = depth
self.verify_mode_depths()
@property
def name(self):
return 'super_source'
def __repr__(self) -> str:
return '\033[35msuper source\033[0m'
@cached_property.cached_property
def module_table(self) -> Dict[ir.Node, Tuple[ir.ModuleTrait, int]]:
"""Module table maps an IR node to (module_trait, module_id).
Returns:
A dict mapping an IR node to (module_trait, module_id) tuple.
"""
self._module_traits: Dict[ir.ModuleTrait,
List[ir.Node]] = collections.OrderedDict()
module_table: Dict[ir.Node, Tuple[ir.ModuleTrait,
int]] = collections.OrderedDict()
for node in self.tpo_valid_node_gen():
self._module_traits.setdefault(ir.ModuleTrait(node), []).append(node)
for idx, module_trait in enumerate(self._module_traits):
for node in self._module_traits[module_trait]:
module_table[node] = module_trait, idx
return module_table
@cached_property.cached_property
def module_traits(self) -> Tuple[ir.ModuleTrait, ...]:
return tuple(self.module_trait_table)
@property
def module_trait_table(self) -> Dict[ir.ModuleTrait, List[ir.Node]]:
# pylint: disable=pointless-statement
self.module_table
return self._module_traits
def tpo_valid_node_gen(self) -> Iterator[ir.Module]:
"""Traverse valid descendant nodes in tpological order.
Load and store nodes are ordered in the same way as they are specified in
soda files.
Yields:
Iterator[ir.Module]: Nodes that are not super source or super sink.
"""
yield from self.load_nodes
yield from filter(
lambda x: not isinstance(x, MemoryNode) and is_valid_node(x),
super().tpo_node_gen(),
)
yield from self.store_nodes
def bfs_valid_edge_gen(self) -> Iterator[ir.Module]:
return filter(is_valid_edge, self.bfs_edge_gen())
@property
def load_nodes(self) -> Tuple['LoadNode', ...]:
return self.children
@property
def store_nodes(self) -> Tuple['StoreNode', ...]:
return self.super_sink.parents
class SuperSinkNode(ir.Module):
"""A node representing the super sink in the dataflow graph.
A super sink doesn't have child nodes.
"""
@property
def name(self):
return 'super_sink'
def __repr__(self) -> str:
return '\033[34msuper sink\033[0m'
class ForwardNode(ir.Module):
"""A node representing a forward module in the dataflow graph.
Attributes:
tensor: Tensor corresponding to this node.
offset: Int representing the offset of this tensor.
"""
def __init__(self, **kwargs):
super().__init__()
self.tensor = kwargs.pop('tensor')
self.offset = kwargs.pop('offset')
def __repr__(self):
return '\033[32mforward %s @%d\033[0m' % (self.tensor.name, self.offset)
@property
def name(self):
return '{}_offset_{}'.format(self.tensor.name, self.offset)
class ComputeNode(ir.Module):
"""A node representing a compute module in the dataflow graph.
Attributes:
tensor: Tensor corresponding to this node.
pe_id: Int representing the PE id.
fifo_map: {str: {idx: Node}}
"""
def __init__(self, **kwargs):
super().__init__()
self.tensor = kwargs.pop('tensor')
self.pe_id = kwargs.pop('pe_id')
self.fifo_map = collections.defaultdict(dict)
def __repr__(self):
return '\033[31mcompute %s #%d\033[0m' % (self.tensor.name, self.pe_id)
@property
def name(self):
return '{}_pe_{}'.format(self.tensor.name, self.pe_id)
class MemoryNode(ir.Module):
def __init__(self, var: str, bank: int):
super().__init__()
self.var = var
self.bank = bank
@property
def name(self) -> str:
return f'{self.var}_bank_{self.bank}'
def __str__(self) -> str:
return f'dram<bank {self.bank} {self.var}>'
class LoadNode(MemoryNode):
def __repr__(self) -> str:
return f'\033[33mload {self.var} bank {self.bank}\033[0m'
class StoreNode(MemoryNode):
def __repr__(self) -> str:
return f'\033[36mstore {self.var} bank {self.bank}\033[0m'
def is_valid_node(node: ir.Module) -> bool:
return not isinstance(node, (SuperSourceNode, SuperSinkNode))
def is_valid_edge(edge: Tuple[ir.Module, ir.Module]) -> bool:
return all(map(is_valid_node, edge))
# pylint: disable=too-many-branches,too-many-statements
def create_dataflow_graph(stencil):
chronological_tensors = stencil.chronological_tensors
super_source = SuperSourceNode(
fwd_nodes={},
cpt_nodes={},
super_sink=SuperSinkNode(),
)
load_nodes = {
stmt.name:
tuple(LoadNode(var=stmt.name, bank=bank) for bank in stmt.dram)
for stmt in stencil.input_stmts
}
store_nodes = {
stmt.name:
tuple(StoreNode(var=stmt.name, bank=bank) for bank in stmt.dram)
for stmt in stencil.output_stmts
}
for mem_node in itertools.chain(*load_nodes.values()):
super_source.add_child(mem_node)
for mem_node in itertools.chain(*store_nodes.values()):
mem_node.add_child(super_source.super_sink)
def color_id(node):
if isinstance(node, LoadNode):
return f'\033[33mload {node.var}[bank{node.bank}]\033[0m'
if isinstance(node, StoreNode):
return f'\033[36mstore {node.var}[bank{node.bank}]\033[0m'
if isinstance(node, ForwardNode):
return f'\033[32mforward {node.tensor.name} @{node.offset}\033[0m'
if isinstance(node, ComputeNode):
return f'\033[31mcompute {node.tensor.name} #{node.pe_id}\033[0m'
return 'unknown node'
def color_attr(node):
result = []
for k, v in node.__dict__.items():
if (node.__class__, k) in ((SuperSourceNode, 'parents'), (SuperSinkNode,
'children')):
continue
if k in ('parents', 'children'):
result.append('%s: [%s]' % (k, ', '.join(map(color_id, v))))
else:
result.append('%s: %s' % (k, repr(v)))
return '{%s}' % ', '.join(result)
def color_print(node):
return '%s: %s' % (color_id(node), color_attr(node))
print_node = color_id
if stencil.replication_factor > 1:
replicated_next_fifo = stencil.get_replicated_next_fifo()
replicated_all_points = stencil.get_replicated_all_points()
replicated_reuse_buffers = stencil.get_replicated_reuse_buffers()
def add_fwd_nodes(src_name):
dsts = replicated_all_points[src_name]
reuse_buffer = replicated_reuse_buffers[src_name][1:]
nodes_to_add = []
for dst_point_dicts in dsts.values():
for offset in dst_point_dicts:
if (src_name, offset) in super_source.fwd_nodes:
continue
fwd_node = ForwardNode(
tensor=stencil.tensors[src_name],
offset=offset,
depth=stencil.get_replicated_reuse_buffer_length(
src_name, offset))
_logger.debug('create %s', print_node(fwd_node))
init_offsets = [start for start, end in reuse_buffer if start == end]
if offset in init_offsets:
if src_name in [stencil.input.name]:
load_node_count = len(load_nodes[src_name])
load_nodes[src_name][load_node_count - 1 -
offset % load_node_count].add_child(fwd_node)
else:
(super_source.cpt_nodes[(src_name, 0)].add_child(fwd_node))
super_source.fwd_nodes[(src_name, offset)] = fwd_node
if offset in replicated_next_fifo[src_name]:
nodes_to_add.append(
(fwd_node, (src_name, replicated_next_fifo[src_name][offset])))
for src_node, key in nodes_to_add:
src_node.add_child(super_source.fwd_nodes[key])
add_fwd_nodes(stencil.input.name)
for stage in stencil.get_stages_chronologically():
cpt_node = ComputeNode(stage=stage, pe_id=0)
_logger.debug('create %s', print_node(cpt_node))
super_source.cpt_nodes[(stage.name, 0)] = cpt_node
for input_name, input_window in stage.window.items():
for i in range(len(input_window)):
offset = next(offset for offset, points in (
replicated_all_points[input_name][stage.name].items())
if points == i)
fwd_node = super_source.fwd_nodes[(input_name, offset)]
_logger.debug(' access %s', print_node(fwd_node))
fwd_node.add_child(cpt_node)
if stage.is_output():
super_source.cpt_nodes[stage.name,
0].add_child(store_nodes[stage.name][0])
else:
add_fwd_nodes(stage.name)
else:
next_fifo = stencil.next_fifo
all_points = stencil.all_points
reuse_buffers = stencil.reuse_buffers
def add_fwd_nodes(src_name):
dsts = all_points[src_name]
reuse_buffer = reuse_buffers[src_name][1:]
nodes_to_add = []
for dst_point_dicts in dsts.values():
for offset in dst_point_dicts:
if (src_name, offset) in super_source.fwd_nodes:
continue
fwd_node = ForwardNode(tensor=stencil.tensors[src_name],
offset=offset)
#depth=stencil.get_reuse_buffer_length(src_name, offset))
_logger.debug('create %s', print_node(fwd_node))
# init_offsets is the start of each reuse chain
init_offsets = [
next(end
for start, end in reuse_buffer
if start == unroll_idx)
for unroll_idx in reversed(range(stencil.unroll_factor))
]
_logger.debug('reuse buffer: %s', reuse_buffer)
_logger.debug('init offsets: %s', init_offsets)
if offset in init_offsets:
if src_name in stencil.input_names:
# fwd from external input
load_node_count = len(load_nodes[src_name])
load_nodes[src_name][load_node_count - 1 -
offset % load_node_count].add_child(fwd_node)
else:
# fwd from output of last stage
# tensor name and offset are used to find the cpt node
cpt_offset = next(
unroll_idx for unroll_idx in range(stencil.unroll_factor)
if init_offsets[unroll_idx] == offset)
cpt_node = super_source.cpt_nodes[(src_name, cpt_offset)]
cpt_node.add_child(fwd_node)
super_source.fwd_nodes[(src_name, offset)] = fwd_node
if offset in next_fifo[src_name]:
nodes_to_add.append(
(fwd_node, (src_name, next_fifo[src_name][offset])))
for src_node, key in nodes_to_add:
# fwd from another fwd node
src_node.add_child(super_source.fwd_nodes[key])
for input_name in stencil.input_names:
add_fwd_nodes(input_name)
for tensor in chronological_tensors:
if tensor.is_input():
continue
for unroll_index in range(stencil.unroll_factor):
pe_id = stencil.unroll_factor - 1 - unroll_index
cpt_node = ComputeNode(tensor=tensor, pe_id=pe_id)
_logger.debug('create %s', print_node(cpt_node))
super_source.cpt_nodes[(tensor.name, pe_id)] = cpt_node
for input_name, input_window in tensor.ld_indices.items():
for i in range(len(input_window)):
offset = next(offset for offset, points in all_points[input_name][
tensor.name].items() if pe_id in points and points[pe_id] == i)
fwd_node = super_source.fwd_nodes[(input_name, offset)]
_logger.debug(' access %s', print_node(fwd_node))
fwd_node.add_child(cpt_node)
if tensor.is_output():
for pe_id in range(stencil.unroll_factor):
super_source.cpt_nodes[tensor.name, pe_id].add_child(
store_nodes[tensor.name][pe_id % len(store_nodes[tensor.name])])
else:
add_fwd_nodes(tensor.name)
# pylint: disable=too-many-nested-blocks
for src_node in super_source.tpo_valid_node_gen():
for dst_node in filter(is_valid_node, src_node.children):
# 5 possible edge types:
# 1. load => fwd
# 2. fwd => fwd
# 3. fwd => cpt
# 4. cpt => fwd
# 5. cpt => store
if isinstance(src_node, LoadNode):
write_lat = 0
elif isinstance(src_node, ForwardNode):
write_lat = 2
elif isinstance(src_node, ComputeNode):
write_lat = src_node.tensor.st_ref.lat
else:
raise util.InternalError('unexpected source node: %s' % repr(src_node))
fifo = ir.FIFO(src_node, dst_node, depth=0, write_lat=write_lat)
lets: List[ir.Let] = []
if isinstance(src_node, LoadNode):
expr = ir.DRAMRef(
haoda_type=dst_node.tensor.haoda_type,
dram=(src_node.bank,),
var=dst_node.tensor.name,
offset=(stencil.unroll_factor - 1 - dst_node.offset) //
len(stencil.stmt_table[dst_node.tensor.name].dram),
)
elif isinstance(src_node, ForwardNode):
if isinstance(dst_node, ComputeNode):
dst = src_node.tensor.children[dst_node.tensor.name]
src_name = src_node.tensor.name
unroll_idx = dst_node.pe_id
point = all_points[src_name][dst.name][src_node.offset][unroll_idx]
idx = list(dst.ld_indices[src_name].values())[point].idx
_logger.debug('%s%s referenced by <%s> @ unroll_idx=%d is %s',
src_name, util.idx2str(idx), dst.name, unroll_idx,
print_node(src_node))
dst_node.fifo_map[src_name][idx] = fifo
delay = stencil.reuse_buffer_lengths[src_node.tensor.name]\
[src_node.offset]
offset = src_node.offset - delay
for parent in src_node.parents: # fwd node has only 1 parent
for fifo_r in parent.fifos:
if fifo_r.edge == (parent, src_node):
break
if delay > 0:
# TODO: build an index somewhere
for let in src_node.lets:
# pylint: disable=undefined-loop-variable
if isinstance(let.expr, ir.DelayedRef) and let.expr.ref == fifo_r:
var_name = let.name
var_type = let.haoda_type
break
else:
var_name = 'let_%d' % len(src_node.lets)
# pylint: disable=undefined-loop-variable
var_type = fifo_r.haoda_type
lets.append(
ir.Let(haoda_type=var_type,
name=var_name,
expr=ir.DelayedRef(delay=delay, ref=fifo_r)))
expr = ir.Var(name=var_name, idx=[])
expr.haoda_type = var_type
else:
expr = fifo_r # pylint: disable=undefined-loop-variable
elif isinstance(src_node, ComputeNode):
def replace_refs_callback(obj, args):
if isinstance(obj, ir.Ref):
_logger.debug(
'replace %s with %s',
obj,
# pylint: disable=cell-var-from-loop,undefined-loop-variable
src_node.fifo_map[obj.name][obj.idx])
# pylint: disable=cell-var-from-loop,undefined-loop-variable
return src_node.fifo_map[obj.name][obj.idx]
return obj
_logger.debug('lets: %s', src_node.tensor.lets)
lets = [_.visit(replace_refs_callback) for _ in src_node.tensor.lets]
_logger.debug('replaced lets: %s', lets)
_logger.debug('expr: %s', src_node.tensor.expr)
expr = src_node.tensor.expr.visit(replace_refs_callback)
_logger.debug('replaced expr: %s', expr)
if isinstance(dst_node, StoreNode):
dram_ref = ir.DRAMRef(
haoda_type=src_node.tensor.haoda_type,
dram=(dst_node.bank,),
var=src_node.tensor.name,
offset=(src_node.pe_id) //
len(stencil.stmt_table[src_node.tensor.name].dram),
)
dst_node.lets.append(ir.Let(haoda_type=None, name=dram_ref,
expr=fifo))
else:
raise util.InternalError('unexpected node of type %s' % type(src_node))
src_node.exprs[fifo] = expr
src_node.lets.extend(_ for _ in lets if _ not in src_node.lets)
_logger.debug('fifo [%d]: %s%s => %s', fifo.depth, color_id(src_node),
'' if fifo.write_lat is None else ' ~%d' % fifo.write_lat,
color_id(dst_node))
super_source.update_module_depths({})
return super_source
|
from decouple import config
from {{ project_name }}.settings import constants
from {{ project_name }}.settings.base import * # noqa
# MySql
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': config('DATABASE_NAME', default='{{ project_name }}_dev'),
# 'USER': config('DATABASE_USER', default='root'),
# 'PASSWORD': config('DATABASE_PASSWORD', default='root'),
# 'HOST': config('DATABASE_HOST', default='127.0.0.1'),
# 'PORT': config('DATABASE_PORT', default='3306'),
# 'CONN_MAX_AGE': 4 * constants.HOURS,
# }
# }
# Postgres
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DATABASE_NAME', default='{{ project_name }}'),
'USER': config('DATABASE_USER', default='{{ project_name }}'),
'PASSWORD': config('DATABASE_PASSWORD', default='{{ project_name }}'),
'HOST': config('DATABASE_HOST', default='localhost'),
'PORT': config('DATABASE_PORT', default='5432', cast=int),
}
} |
"""
Copyright 2015, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
# given a list of CSV rows, test whether any of those rows contain the specified
# field name:value entry.
def matching_row_exists(csv_rows, field_name, field_value):
return len(find_matching_rows(csv_rows, field_name, field_value, 1)) > 0
# given a list of CSV rows (strings -- i.e. the lines from a CSV document) and
# a target field name:value pair, find and return up to match_limit rows which
# contain the specified name:value entry.
def find_matching_rows(csv_rows, field_name, field_value, match_limit=float('inf')):
assert match_limit > 0, 'invalid match limit: {} (must be positive'.format(match_limit)
dict_reader = csv.DictReader(csv_rows)
dict_reader.fieldnames = [field.strip() for field in dict_reader.fieldnames]
# no reason to even look at any of the rows if the target field is missing
if field_name not in dict_reader.fieldnames:
print "WARNING: '{}' is not a field in the input file (fields found: {})".format(
field_name, dict_reader.fieldnames)
return []
# process rows until either match_limit matches have been found, or all of the
# rows have been processed
matches = []
for row in dict_reader:
if row.get(field_name) == field_value:
matches.append(row)
if len(matches) >= match_limit:
break
return matches
|
"""
DICOM data elements with the Code String (CS) value-representation (VRs)
represented as *Enum* instances.
"""
from dicom_parser.utils.code_strings.modality import Modality
from dicom_parser.utils.code_strings.patient_position import PatientPosition
from dicom_parser.utils.code_strings.scanning_sequence import ScanningSequence
from dicom_parser.utils.code_strings.sequence_variant import SequenceVariant
from dicom_parser.utils.code_strings.sex import Sex
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from userprofile.models import Status
class PaymentPackage(models.Model):
STATUS_CHOICES = (
('REG', 'Regular'),
('MBR', 'IEEE Member'),
('SFT', 'Full-time Student'),
('SMB', 'IEEE Student Member')
)
code = models.CharField(max_length=3, choices=STATUS_CHOICES)
conference_price = models.IntegerField()
early = models.BooleanField()
def __unicode__(self):
return self.get_code_display()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#SImple image segmentation using K-Means clustering algo
#color clustering
#Image segmentation from video using OpenCV and K-means clustering
import numpy as np
import cv2
import matplotlib.pyplot as plt
import collections
import pdb
from PIL import Image
from skimage import morphology
import glob
import os
from glob import glob
import matplotlib.image as mpimg
def color_quantization(image, k):
"""Performs color quantization using K-means clustering algorithm"""
# Transform image into 'data':
data = np.float32(image).reshape((-1, 3))
# print(data.shape)
# Define the algorithm termination criteria (the maximum number of iterations and/or the desired accuracy):
# In this case the maximum number of iterations is set to 20 and epsilon = 1.0
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
# Apply K-means clustering algorithm:
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
#print(center)
# At this point we can make the image with k colors
# Convert center to uint8:
center = np.uint8(center)
# Replace pixel values with their center value:
result = center[label.flatten()]
result = result.reshape(img.shape)
return result
def remove_small_objects(img, min_size=150):
# find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
# connectedComponentswithStats yields every seperated component with information on each of them, such as size
# the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
sizes = stats[1:, -1]
nb_components = nb_components - 1
# your answer image
img2 = img
# for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] < min_size:
img2[output == i + 1] = 0
return img2
##############################
# Original CT image path
original_image_path = '/Users/monjoysaha/Downloads/CT_lung_segmentation-master/check/study_0255/'
# This is the path where images were saved after Heat Map generation and K-means.
Kmeans_image_path = '/Users/monjoysaha/Downloads/CT_lung_segmentation-master/check/heat_map/'
# This is the path where only segmented GGOs as an original image regions will be saved
save_GGO_path = '/Users/monjoysaha/Downloads/CT_lung_segmentation-master/check/only_GGO/'
origImags = glob(original_image_path + "/*.png")
kmasks= glob(Kmeans_image_path + "/*.png")
for kmask in kmasks:
fname_kmask = os.path.basename(kmask)
for origImag in origImags:
fname_origImag = os.path.basename(origImag)
if fname_kmask == fname_origImag:
# Converting from BGR Colours Space to HSV
#pdb.set_trace()
img = cv2.imread(kmask)
#orgImg = cv2.imread(origImag)
orgImg=mpimg.imread(origImag)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
result = color_quantization(img, k=3)
gray = cv2.cvtColor(result,cv2.COLOR_BGR2GRAY)
ret1,thresh1 = cv2.threshold(gray,150,155,cv2.THRESH_BINARY)
ret2,thresh2 = cv2.threshold(gray,50,255,cv2.THRESH_BINARY)
result = thresh2-thresh1
result = result == 255
#plt.imshow(result, cmap='gray')
#plt.show()
cleaned = morphology.remove_small_objects(result, min_size=1600, connectivity=8)
#plt.imshow(cleaned, cmap='gray')
#plt.show()
img3 = np.zeros((cleaned.shape)) # create array of size cleaned
img3[cleaned > 0] = 255
#img3= np.uint8(img3)
img3= np.float32(img3)
#cv2.imshow("cleaned", img3)
#cv2.imwrite("cleaned.jpg", img3)
#
#pdb.set_trace()
# Mask input image with binary mask
result = cv2.bitwise_and(orgImg,img3)
#cv2.imwrite('pink_flower_masked2.png', result)
mpimg.imsave(save_GGO_path+fname_origImag, result, cmap='gray')
|
#!/usr/bin/env python
# encoding: utf-8
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from v1.recipe import views
class RecipeSerializerTests(TestCase):
fixtures = [
'test/users.json',
'course_data.json',
'cuisine_data.json',
'ing_data.json',
'recipe_data.json'
]
def setUp(self):
self.factory = APIRequestFactory()
def test_view_limiter(self):
"""Test to make sure we have the right fields"""
view = views.RecipeViewSet.as_view({'get': 'list'})
request = self.factory.get('/api/v1/recipe/recipes/tasty-chili?fields=id')
response = view(request)
self.assertTrue(response.data.get('id', True))
self.assertFalse(response.data.get('title', False))
view = views.RecipeViewSet.as_view({'get': 'list'})
request = self.factory.get('/api/v1/recipe/recipes/tasty-chili?fields=id,title,photo')
response = view(request)
self.assertTrue(response.data.get('id', True))
self.assertTrue(response.data.get('title', True))
self.assertTrue(response.data.get('photo', True))
self.assertFalse(response.data.get('directions', False))
self.assertFalse(response.data.get('author', False))
|
import django_filters
from models import *
class AgentFilterSet(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_type='contains')
address = django_filters.CharFilter(lookup_type='contains')
#communities__community__name = django_filters.CharFilter(label='Community Name', lookup_type='contains')
class Meta:
model = EconomicAgent
#exclude = ('latitude', 'longitude', 'slug')
fields = ('communities__community',)
@classmethod
def queryset(cls):
return EconomicAgent.objects.select_related().all()
class CommunityAgentFilterSet(django_filters.FilterSet):
agent__name = django_filters.CharFilter(label="Agent Name", lookup_type='contains')
agent__address = django_filters.CharFilter(label="Agent Address", lookup_type='contains')
class Meta:
model = CommunityAgent
fields = ('community',)
@classmethod
def queryset(cls):
return CommunityAgent.objects.select_related().all()
class AgentFunctionResourceTypeFilterSet(django_filters.FilterSet):
agent_function__agent__name = django_filters.CharFilter(label="Agent Name", lookup_type='contains')
agent_function__agent__address = django_filters.CharFilter(label="Agent Address", lookup_type='contains')
agent_function__agent__communities__group = django_filters.AllValuesFilter(label="Agent Group")
class Meta:
model = AgentFunctionResourceType
fields = ('resource_type', 'agent_function__function')
def __init__(self, *args, **kwargs):
super(AgentFunctionResourceTypeFilterSet, self).__init__(*args, **kwargs)
qs = kwargs['queryset']
rtids = list(set(qs.values_list('resource_type', flat=True)))
rts = EconomicResourceType.objects.filter(id__in=rtids)
fnids = list(set(qs.values_list('agent_function__function', flat=True)))
fns = EconomicFunction.objects.filter(id__in=fnids)
self.filters['resource_type'].field.choices = [('', '----------')] + [(rt.id, rt.name) for rt in rts]
self.filters['agent_function__function'].field.choices = [('', '----------')] + [(fn.id, fn.name) for fn in fns]
@classmethod
def queryset(cls):
return AgentFunctionResourceType.objects.select_related().all()
@classmethod
def queryset(cls, cluster):
return AgentFunctionResourceType.objects.select_related().filter(
agent_function__function__cluster=cluster)
class FunctionResourceTypeFilterSet(django_filters.FilterSet):
function = django_filters.ChoiceFilter(label="Starting Function",)
resource_type__name = django_filters.CharFilter(label="Resource Name", lookup_type='contains')
function__aspect = django_filters.CharFilter(label="Function Aspect", lookup_type='contains')
resource_type__communities__aspect = django_filters.CharFilter(label="Resource Aspect", lookup_type='contains')
class Meta:
model = FunctionResourceType
exclude = ('resource_type', 'role', 'quantity', 'price', 'value')
def __init__(self, *args, **kwargs):
super(FunctionResourceTypeFilterSet, self).__init__(*args, **kwargs)
qs = kwargs['queryset']
fnids = list(set(qs.values_list('function', flat=True)))
fns = EconomicFunction.objects.filter(id__in=fnids)
self.filters['function'].field.choices = [('', '----------')] + [(fn.id, fn.name) for fn in fns]
@classmethod
def queryset(cls):
return FunctionResourceType.objects.select_related().all()
@classmethod
def queryset(cls, cluster):
return FunctionResourceType.objects.select_related().filter(
function__cluster=cluster)
|
import math
angulo = float(input('digite um angulo '))
seno = math.sin(math.radians(angulo))
print('o angulo {} tem o seno de {}',format(angulo,seno))
'''exercisio não concluido;
motivo : por algum motivo o compuatdor fala que o seno deve ser uma str''' |
# Numbers between 1000 and 3000 that all digits is even number
answer = []
for numbers in range(1000,3001):
numbers = str(numbers)
i = 0
candidate = 0
while i <= 3:
each_one = int(numbers[i])
if each_one % 2 == 0:
candidate += 1
i += 1
if candidate == 4:
answer.append(numbers)
else:
i += 1
print(','.join(answer))
#VERY BAD CODE HERE |
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + '../../../')
import numpy as np
from pydeformetrica.src.launch.estimate_deterministic_atlas import estimate_deterministic_atlas
from pydeformetrica.src.launch.estimate_geodesic_regression import estimate_geodesic_regression
from pydeformetrica.src.launch.compute_parallel_transport import compute_parallel_transport
from pydeformetrica.src.in_out. xml_parameters import XmlParameters
from pydeformetrica.src.support.utilities.general_settings import Settings
from pydeformetrica.src.in_out.array_readers_and_writers import *
#Functions used to script deformetrica. WORK IN PROGRESS, lots of parameters are hardcoded, cuda not managed...
def perform_registration(source_vtk, target_vtk, object_type, attachment_type, noise_std, object_id,
deformation_kernel_width, output_dir, attachment_kernel_width, subject_id='0',
deformation_kernel_type='Torch', attachment_kernel_type='Torch', freeze_cp=True, number_of_time_points=10,
control_points_on_shape=None, initial_step_size=1e-2):
"""
Performs a registration, using the given parameters. It wraps estimate deterministic_atlas.
lots of default parameters: optimization_method_type, initial_cp_spacing, use_rk2
#ACHTUNG CUDA not managed here
"""
xml_parameters = XmlParameters()
# Model parameters
xml_parameters.freeze_template = True
xml_parameters.freeze_cp = True
xml_parameters.initial_cp_spacing = deformation_kernel_width
xml_parameters.use_cuda = True
xml_parameters.dense_mode = control_points_on_shape
# Deformation parameters
xml_parameters.deformation_kernel_width = deformation_kernel_width
xml_parameters.deformation_kernel_type = deformation_kernel_type
xml_parameters.number_of_time_points = number_of_time_points
# Optimization parameters
xml_parameters.use_rk2 = True
# xml_parameters.optimization_method_type = 'ScipyLBFGS'.lower()
xml_parameters.optimization_method_type = 'ScipyLBFGS'.lower()
xml_parameters.initial_step_size = initial_step_size
xml_parameters.max_iterations = 200
xml_parameters.save_every_n_iters = 20
xml_parameters.convergence_tolerance = 1e-5
Settings().set_output_dir(output_dir)
# Deformable objects parameters
target = {object_id : target_vtk}
xml_parameters.dataset_filenames = [[target]]
xml_parameters.subject_ids = [subject_id]
template_specifications = {}
template_object = {'deformable_object_type': object_type.lower(),
'attachment_type': attachment_type.lower(),
'kernel_width' : attachment_kernel_width,
'kernel_type': attachment_kernel_type.lower(),
'noise_std' : noise_std,
'filename': source_vtk}
template_specifications[object_id] = template_object
xml_parameters.template_specifications = template_specifications
xml_parameters._further_initialization()
estimate_deterministic_atlas(xml_parameters)
control_points = os.path.join(output_dir, "DeterministicAtlas__control_points.txt")
momenta = os.path.join(output_dir, "DeterministicAtlas__momenta.txt")
return control_points, momenta
def parallel_transport(template_vtk, object_type, object_id, deformation_kernel_width,
control_points, initial_momenta, initial_momenta_to_transport,
output_dir, initial_control_points_to_transport=None):
xml_parameters = XmlParameters()
xml_parameters.deformation_kernel_width = deformation_kernel_width
xml_parameters.initial_cp_spacing = deformation_kernel_width
xml_parameters.deformation_kernel_type = 'Torch'
xml_parameters.number_of_time_points = 20
xml_parameters.concentration_of_time_points = 200
# xml_parameters.number_of_time_points = 50
# xml_parameters.concentration_of_time_points = 50
xml_parameters.tmin = 0.
xml_parameters.tmax = 1.
xml_parameters.use_rk2 = True
xml_parameters.transported_trajectory_tmin = 0
xml_parameters.transport_trajectory_t0 = 0
xml_parameters.transported_trajectory_tmax = 1.
xml_parameters.initial_control_points = control_points
xml_parameters.initial_momenta = initial_momenta
xml_parameters.initial_momenta_to_transport = initial_momenta_to_transport
xml_parameters.initial_control_points_to_transport = initial_control_points_to_transport
template_specifications = {}
template_object = {'deformable_object_type': object_type.lower(),
'attachment_type': 'Landmark'.lower(),
'kernel_width': 'not_needed',
'kernel_type': 'not_needed',
'noise_std': 1.,
'filename': template_vtk}
template_specifications[object_id] = template_object
Settings().set_output_dir(output_dir)
if not(os.path.isdir(output_dir)):
os.mkdir(output_dir)
xml_parameters.template_specifications = template_specifications
xml_parameters._further_initialization()
compute_parallel_transport(xml_parameters)
# We now extract the final file path of the parallel curve (not very generic, for personal use...)
return os.path.join(output_dir, object_id + "_parallel_curve_tp_"+
str(xml_parameters.concentration_of_time_points)+"__age_"+"1.0_.vtk")
|
import json
from unittest.mock import patch, MagicMock
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.test import Client, TestCase
from django.contrib.auth.models import Permission
from common.config import SysConfig
from sql.engines.models import ResultSet
from sql.models import Instance, ResourceGroup, ResourceGroupRelations, SqlWorkflow, QueryLog
from sql.engines.mysql import MysqlEngine
from sql import query
User = get_user_model()
class SignUpTests(TestCase):
"""注册测试"""
def setUp(self):
"""
创建默认组给注册关联用户, 打开注册
"""
archer_config = SysConfig()
archer_config.set('sign_up_enabled','true')
archer_config.get_all_config()
self.client = Client()
Group.objects.create(id=1, name='默认组')
def test_sing_up_not_username(self):
"""
用户名不能为空
"""
response = self.client.post('/signup/', data={})
data = json.loads(response.content)
content = {'status': 1, 'msg': '用户名和密码不能为空', 'data': None}
self.assertEqual(data, content)
def test_sing_up_not_password(self):
"""
密码不能为空
"""
response = self.client.post('/signup/', data={'username': 'test'})
data = json.loads(response.content)
content = {'status': 1, 'msg': '用户名和密码不能为空', 'data': None}
self.assertEqual(data, content)
def test_sing_up_2password(self):
"""
两次输入密码不一致
"""
response = self.client.post('/signup/', data={'username': 'test', 'password': '123456', 'password2': '12345'})
data = json.loads(response.content)
content = {'status': 1, 'msg': '两次输入密码不一致', 'data': None}
self.assertEqual(data, content)
def test_sing_up_duplicate_uesrname(self):
"""
用户名已存在
"""
User.objects.create(username='test', password='123456')
response = self.client.post('/signup/',
data={'username': 'test', 'password': '123456', 'password2': '123456'})
data = json.loads(response.content)
content = {'status': 1, 'msg': '用户名已存在', 'data': None}
self.assertEqual(data, content)
def test_sing_up(self):
"""
注册成功
"""
self.client.post('/signup/',
data={'username': 'test', 'password': '123456test',
'password2': '123456test', 'display': 'test', 'email': '[email protected]'})
user = User.objects.get(username='test')
self.assertTrue(user)
class QueryTest(TestCase):
def setUp(self):
self.slave1 = Instance(instance_name='test_slave_instance',type='slave', db_type='mysql',
host='testhost', port=3306, user='mysql_user', password='mysql_password')
self.slave1.save()
User = get_user_model()
self.u1 = User(username='test_user', display ='中文显示', is_active=True)
self.u1.save()
self.u2 = User(username='test_user2', display ='中文显示', is_active=True)
self.u2.save()
sql_query_perm = Permission.objects.get(codename='query_submit')
self.u2.user_permissions.add(sql_query_perm)
def testcorrectSQL(self):
c = Client()
some_sql = 'select some from some_table limit 100;'
some_db = 'some_db'
some_limit = 100
c.force_login(self.u1)
r = c.post('/query/', data={'instance_name': self.slave1.instance_name,
'sql_content': some_sql,
'db_name':some_db,
'limit_num': some_limit})
self.assertEqual(r.status_code, 403)
c.force_login(self.u2)
q_result = ResultSet(full_sql=some_sql, rows=['value'])
q_result.column_list = ['some']
mock_engine = MysqlEngine
mock_engine.query = MagicMock(return_value=q_result)
mock_engine.query_masking = MagicMock(return_value=q_result)
mock_query = query
mock_query.query_priv_check = MagicMock(return_value={'status':0, 'data':{'limit_num':100, 'priv_check':1}})
r = c.post('/query/', data={'instance_name': self.slave1.instance_name,
'sql_content': some_sql,
'db_name':some_db,
'limit_num': some_limit})
mock_engine.query.assert_called_once_with(db_name=some_db, sql=some_sql, limit_num=some_limit)
r_json = r.json()
self.assertEqual(r_json['data']['rows'], ['value'])
self.assertEqual(r_json['data']['column_list'], ['some'])
def testMasking(self):
pass
def tearDown(self):
self.u1.delete()
self.u2.delete()
self.slave1.delete()
class UserTest(TestCase):
def setUp(self):
self.u1 = User(username='test_user', display='中文显示', is_active=True)
self.u1.save()
def tearDown(self):
self.u1.delete()
def testLogin(self):
"""login 页面测试"""
c = Client()
r = c.get('/login/')
self.assertEqual(r.status_code, 200)
self.assertTemplateUsed(r, 'login.html')
c.force_login(self.u1)
# 登录后直接跳首页
r = c.get('/login/', follow=False)
self.assertRedirects(r, '/')
|
import logging
import webbrowser
from kivy.lang import Builder
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.recycleview import RecycleView
from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition
from kivy.uix.behaviors import ButtonBehavior
from kivy.properties import (
ListProperty, AliasProperty, StringProperty, NumericProperty)
from processing import Loader, Searcher
from definitions import Ingredient
Builder.load_file('gui.kv')
class ChooseFilePopup(Popup):
file_kind = StringProperty()
class SettingsPopup(Popup):
pass
class IngrReviewPopup(Popup):
recipe_id = NumericProperty()
recipe_title = StringProperty()
text_with_unknown = StringProperty()
def populate(self, data):
self.recipe_id = data['recipe_id']
self.recipe_title = data['recipe_title']
self.text_with_unknown = data['text_with_unknown']
def alert_wrong(self):
saved_title = self.title
saved_title_color = self.title_color
self.title = 'Verification failure, try a different word.'
self.title_color = (0.8, 0.2, 0.1)
Clock.schedule_once(lambda dt: scheduled(
saved_title, saved_title_color), 3)
def scheduled(saved_title, saved_title_color):
self.title = saved_title
self.title_color = saved_title_color
class SidePanel(BoxLayout):
num_pending_ingredients = NumericProperty()
def __init__(self, **kwargs):
super(SidePanel, self).__init__(**kwargs)
Clock.schedule_once(lambda dt: self.on_num_pending_ingredients(
self, self.num_pending_ingredients), 0)
def on_num_pending_ingredients(self, instance, value):
if value == 0:
self.ids.review_button.disabled = True
else:
self.ids.review_button.disabled = False
def update_load_label(self, num_new_recipes):
if num_new_recipes == 0:
self.ids.load_label.text = 'No new recipes were loaded.'
else:
self.ids.load_label.text = f'{num_new_recipes} '\
'new recipes loaded.'
Clock.schedule_once(lambda dt: self.clear_load_label(), 3)
def clear_load_label(self):
self.ids.load_label.text = 'Load recipes from file: '
class RecipeCard(ButtonBehavior, BoxLayout):
recipe_id = NumericProperty()
recipe_title = StringProperty()
recipe_url = StringProperty()
ingredients = ListProperty()
class ResultsScreen(Screen):
data = ListProperty()
class AvailableIngrItem(Button):
ingr_name = StringProperty()
class AvailableIngrRV(RecycleView):
pass
class SelectedIngrItem(Button):
ingr_name = StringProperty()
class SearchBox(RecycleView):
pass
class SearchHeader(BoxLayout):
pass
class SearchScreen(Screen):
data = ListProperty()
def get_selected_ingredients(self) -> list[str]:
return [{'ingr_name': item['ingr_name']}
for item in self.data
if item['selected']]
def get_available_ingredients(self) -> list[str]:
return [{'ingr_name': item['ingr_name']}
for item in self.data
if not item['selected']]
data_selected = AliasProperty(get_selected_ingredients, bind=['data'])
data_available = AliasProperty(get_available_ingredients, bind=['data'])
class Manager(ScreenManager):
pass
class MainBoard(RelativeLayout):
pass
class WtcApp(App):
def __init__(self, loader: Loader, searcher: Searcher, **kwargs):
super(WtcApp, self).__init__(**kwargs)
self.loader = loader
self.searcher = searcher
def build(self):
self.search_screen = SearchScreen(name='search_screen')
self.results_screen = ResultsScreen(name='results_screen')
self.transition = SlideTransition(duration=0.2, direction='left')
self.manager = Manager(transition=self.transition)
self.manager.add_widget(self.search_screen)
self.manager.add_widget(self.results_screen)
self.main_board = MainBoard()
self.main_board.add_widget(self.manager)
self.panel = SidePanel()
self.update_num_pending_ingredients()
# Clock.schedule_interval(
# lambda dt: self.update_num_pending_ingredients(), 2)
root = BoxLayout()
root.add_widget(self.panel)
root.add_widget(self.main_board)
self.load_ingredients()
self.review_popup = IngrReviewPopup()
return root
def load_ingredients(self):
ordered_ingr_names = sorted(ingr.name.capitalize()
for ingr in self.searcher.get_ingredients())
self.search_screen.data = [{
'ingr_name': ingr_name,
'selected': False
}
for ingr_name in ordered_ingr_names]
logging.info(f'Loaded {len(self.search_screen.data)} ingredient/s.')
def update_num_pending_ingredients(self):
num = self.loader.num_pending_review
self.panel.num_pending_ingredients = num
def review_next_ingr(self):
if self.loader.num_pending_review:
id, title, _, text = self.loader.next_pending_review()
self.review_popup.populate(
{
'recipe_id': id,
'recipe_title': title,
'text_with_unknown': text
}
)
self.review_popup.open()
Clock.schedule_once(lambda dt: scheduled(), 0.1)
def scheduled():
self.review_popup.ids.ingredient_textinput.focus = True
self.review_popup.ids.ingredient_textinput.text = ''
else:
self.review_popup.dismiss()
def save_ingr_review(self, recipe_id, text_with_unknown, ingr_name):
try:
self.loader.solve_unknown(
recipe_id, text_with_unknown, Ingredient(ingr_name))
except ValueError:
self.review_popup.alert_wrong()
self.load_ingredients()
self.refresh_search_data()
self.update_num_pending_ingredients()
self.review_next_ingr()
def delete_unknown(self, text_with_unknown):
self.loader.delete_unknown(text_with_unknown)
self.refresh_search_data()
self.update_num_pending_ingredients()
self.review_next_ingr()
def load_recipes(self):
successes, *_ = self.loader.load_recipes()
self.panel.update_load_label(successes)
self.update_num_pending_ingredients()
def delete_recipe(self, recipe_id):
self.loader.delete_recipe(recipe_id)
for index, dict in enumerate(self.results_screen.data):
if dict['recipe_id'] == recipe_id:
del self.results_screen.data[index]
return
def refresh_search_data(self):
data = self.search_screen.data
self.search_screen.data = []
self.search_screen.data = data
def select_ingr(self, ingr_name: str):
for index, item in enumerate(self.search_screen.data):
if item['ingr_name'] == ingr_name:
self.search_screen.data[index]['selected'] = True
self.refresh_search_data()
return
def deselect_ingr(self, ingr_name: str):
for index, item in enumerate(self.search_screen.data):
if item['ingr_name'] == ingr_name:
self.search_screen.data[index]['selected'] = False
self.refresh_search_data()
return
def search_recipes(self) -> dict:
"""Return dict of recipes containing currently selected ingredients."""
ingr_included = [item['ingr_name']
for item in self.search_screen.get_selected_ingredients()]
self.results_screen.data = [
{
'recipe_id': self.searcher.get_recipe_id(recipe.title, recipe.url),
'recipe_title': recipe.title,
'recipe_url': recipe.url,
'ingredients': recipe.ingredients_known
}
for recipe in self.searcher.get_recipes(ingr_included)
]
self.refresh_search_data()
self.transition.direction = 'left'
self.manager.current = 'results_screen'
def go_back_from_results(self):
self.transition.direction = 'right'
self.manager.current = 'search_screen'
def open_url(self, url):
webbrowser.open(url)
def start_app(loader: Loader, searcher: Searcher):
WtcApp(loader, searcher).run()
|
#!/usr/bin/python
"""
Utility to filter large collections of assorted media files, especially focused
on retaining information on previously black-/whitelisted files to avoid
having to re-filter duplicates.
"""
import argparse
import hashlib
import os
import os.path
import shutil
import threading
import sys
import mpv
from random import shuffle
from time import sleep
HASH_LENGTH = 32
SIZE_LENGTH = 8
IGNORED_EXT = set([
'.tags',
'.swf',
'.json'
])
BLACKLIST_FILE = '.blacklist'
WHITELIST_FILE = '.whitelist'
AIRLOCK_DIR = '.airlock'
GALLERY_DIR = 'Gallery'
BLACKLIST_KEY = '8'
WHITELIST_KEY = '4'
QUIT_KEY = 'q'
class Collection:
"""
Class to encapsulate a media collection and its corresponding data. Its
main property is the location of the collection, but the class offers
unified access to derived properties of a collection: Its blacklist and
whitelist files and the airlock and gallery directories. The class makes
sure these files exist for proper operation and implements __enter__ and
__exit__ functions that ensure black-/whitelists are saved upon exit.
"""
def __init__(self, col):
self.directory = col
if not os.path.isdir(self.directory):
os.mkdir(self.directory)
blacklist = os.path.join(self.directory, BLACKLIST_FILE)
if not os.path.isfile(blacklist):
open(blacklist, 'a').close()
whitelist = os.path.join(self.directory, WHITELIST_FILE)
if not os.path.isfile(whitelist):
open(whitelist, 'a').close()
airlock = os.path.join(self.directory, AIRLOCK_DIR)
if not os.path.isdir(airlock):
os.mkdir(airlock)
gallery = os.path.join(self.directory, GALLERY_DIR)
if not os.path.isdir(gallery):
os.mkdir(gallery)
self.blacklist = read_file_keys(blacklist)
self.whitelist = read_file_keys(whitelist)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
blacklist_file = os.path.join(self.directory, BLACKLIST_FILE)
whitelist_file = os.path.join(self.directory, WHITELIST_FILE)
write_file_keys(blacklist_file, self.blacklist)
write_file_keys(whitelist_file, self.whitelist)
def is_blacklisted(self, fkey):
"""
Returns True iff the file identified by the given key is blacklisted.
"""
return fkey in self.blacklist
def is_whitelisted(self, fkey):
"""
Returns True iff the file identified by the given key is whitelisted.
"""
return fkey in self.whitelist
def blacklist_file(self, fkey):
"""
Retains the file identified by the given key to be blacklisted.
"""
self.blacklist.update([fkey])
def whitelist_file(self, fkey):
"""
Retains the file identified by the given key to be whitelisted.
"""
self.whitelist.update([fkey])
def get_airlock(self):
"""
Returns the path to the airlock directory of this collection.
"""
return os.path.join(self.directory, AIRLOCK_DIR)
def get_gallery(self):
"""
Returns the path to the gallery directory of this collection.
"""
return os.path.join(self.directory, GALLERY_DIR)
def parse_command_line():
"""Parses the commandline parameters and returns a dictionary of them."""
parser = argparse.ArgumentParser()
help_str = \
'The collection folder to sort files into. ' \
'If the folder does not exist, it will be created along with the ' \
'necessary contents.'
parser.add_argument('-c', '--collection', help=help_str)
help_str = \
'The source folder to import files from. Has to exist and ' \
'has to be a folder.'
parser.add_argument('-s', '--source', help=help_str, required=False)
help_str = \
'View the gallery in random order auto skpping after the' \
'given amount of seconds'
parser.add_argument('-v', '--view', help=help_str, required=False)
return parser.parse_args()
def get_file_extension(fname):
"""Returns the given file's extension as a string, . included"""
_, ext = os.path.splitext(fname)
return ext
def hex_encode(data, length):
"""
Pads the given data to fit into the given length of hexadecimal characters
and returns it encoded as one.
"""
fmt = '{0:0' + str(length) + 'x}'
return fmt.format(data)
def get_file_hash(fname, hash_length):
"""
Computes the SHA256 hash of the file at the given path and encodes its
value to a hexadecimal string of the given length. The computed value is
returned as a string.
"""
hash_sha = hashlib.sha256()
with open(fname, 'rb') as infile:
for chunk in iter(lambda: infile.read(4096), b''):
hash_sha.update(chunk)
hash_sha = hash_sha.hexdigest()
hash_sha = int(hash_sha, 16) % (2 ** (4 * hash_length))
return hex_encode(hash_sha, hash_length)
def get_file_size(fname, size_length):
"""
Computes the size of the file at the given path and encodes it as a
hexadecimal string of the given length. The computed value is returned as a
string.
"""
size = os.path.getsize(fname)
return hex_encode(size, size_length)
def get_file_key(fname, hash_length=HASH_LENGTH, size_length=SIZE_LENGTH):
"""
Computes a likely-to-be-unique key for the given file by combining its hash
and file size and returns it.
"""
fhash = get_file_hash(fname, hash_length)
fsize = get_file_size(fname, size_length)
return fhash + fsize
def read_file_keys(fname):
"""
Reads the given file's list of file keys and returns them as a set.
"""
with open(fname, 'r') as infile:
fkeys = infile.read().split('\n')
return set(fkeys)
def write_file_keys(fname, fkeys):
"""
Writes the given set of file keys to the given file.
"""
with open(fname, 'w') as outfile:
for fkey in fkeys:
outfile.write(fkey + '\n')
def import_files(col, src):
"""
Imports files from the given src directory into the given collection's
airlock, ignoring previously blacklisted files.
"""
for root, _, files in os.walk(src, topdown=False):
for fil in sorted(files):
fname = os.path.join(root, fil)
ext = get_file_extension(fil)
fkey = get_file_key(fname)
if ext in IGNORED_EXT:
print('- Ignored: {}'.format(fname))
continue
if not col.is_blacklisted(fkey) and not col.is_whitelisted(fkey):
target = fkey + ext
target = os.path.join(col.get_airlock(), target)
if not os.path.exists(target):
shutil.copy(fname, target, follow_symlinks=True)
print('+ Copied: {} -> {}'.format(fname, target))
else:
print('- Ignored: {}'.format(fname))
def blacklist_handler(col, player):
"""
Helper function to create a blacklist handler for the given player and
collection.
"""
def handler(state, _):
"""
Retains the current file in the player in the collection's blacklist
and removes it from the airlock. The player is then advanced to the
next file.
"""
if state[0] == 'u':
fname = player.playlist[player.playlist_pos]['filename']
fkey = get_file_key(fname)
col.blacklist_file(fkey)
player.playlist_remove()
os.remove(fname)
print('Blacklisted: {}'.format(fname))
return handler
def whitelist_handler(col, player):
"""
Helper function to create a whitelist handler for the given player and
collection.
"""
def handler(state, _):
"""
Retains the current file in the player in the collection's whitelist
and moves the file from the airlock to the gallery directory of the
collection. The player is then advanced to the next file.
"""
if state[0] == 'u':
fname = player.playlist[player.playlist_pos]['filename']
fkey = get_file_key(fname)
col.whitelist_file(fkey)
player.playlist_remove()
basename = os.path.basename(fname)
shutil.move(fname, os.path.join(col.get_gallery(), basename))
print('Whitelisted: {}'.format(fname))
return handler
def quit_handler(playlist, player):
"""
Helper function to create quit handler for given player and playlist
"""
def handler(state, _):
"""
Empties the playlist and quits the player if the key this handler is
bound to is raised.
"""
if state[0] == 'u':
player.quit()
playlist.clear()
print('Quitting manually.')
return handler
def sort_airlock(col):
"""
Displays the contents of the airlock to the user, allowing them to either
blacklist a file to be ignored from the collection or whitelist and copy
them to the gallery.
"""
playlist = os.listdir(col.get_airlock())
playlist = [os.path.join(col.get_airlock(), fil) for fil in playlist]
total_count = len(playlist)
if not playlist:
print('{}: Airlock empty. Nothing to do.'.format(col.directory))
return
player = mpv.MPV(input_vo_keyboard=True)
player['loop-file'] = 'inf'
player['mute'] = True
player.register_key_binding(BLACKLIST_KEY, blacklist_handler(col, player))
player.register_key_binding(WHITELIST_KEY, whitelist_handler(col, player))
player.register_key_binding('\\', blacklist_handler(col, player))
player.register_key_binding('a', whitelist_handler(col, player))
player.register_key_binding(QUIT_KEY, quit_handler(playlist, player))
for fil in playlist:
player.playlist_append(fil)
player.playlist_pos = 0
while playlist:
print(playlist[0])
print('Progress: {}/{}'.format(len(playlist), total_count))
del playlist[0]
player.wait_for_playback()
del player
print('{}: Done sorting airlock.'.format(col.directory))
def view_collection(col, wait):
playlist = os.listdir(col.get_gallery())
playlist = [os.path.join(col.get_gallery(), fil) for fil in playlist]
shuffle(playlist)
total_count = len(playlist)
if not playlist:
print('{}: Gallery empty. Nothing to do.'.format(col.directory))
return
player = mpv.MPV(input_default_bindings=True, input_vo_keyboard=True)
player['loop-file'] = 'inf'
player['mute'] = False
#player.register_key_binding('a', blacklist_handler(col, player))
#player.register_key_binding(WHITELIST_KEY, whitelist_handler(col, player))
#player.register_key_binding(QUIT_KEY, quit_handler(playlist, player))
for fil in playlist:
player.playlist_append(fil)
player.playlist_pos = 0
def nexter():
while True:
sleep(wait)
player.playlist_next()
thread = threading.Thread(target=nexter, args={})
thread.daemon = True
thread.start()
while playlist:
print('Progress: {}/{}'.format(len(playlist), total_count))
player.wait_for_playback()
del player
print('{}: Done sorting airlock.'.format(col.directory))
if __name__ == '__main__':
ARGS = parse_command_line()
if ARGS.source:
if not os.path.isdir(ARGS.source):
print('Soure directory {} does not exist.'.format(ARGS.source))
sys.exit(-1)
with Collection(ARGS.collection) as COL:
import_files(COL, ARGS.source)
elif ARGS.view:
with Collection(ARGS.collection) as COL:
view_collection(COL, int(ARGS.view))
else:
with Collection(ARGS.collection) as COL:
sort_airlock(COL)
|
# Several variants of linear regression
import numpy as np
from scipy.stats import norm, multivariate_normal
from scipy.optimize import linprog, minimize
from numpy.linalg import inv
class LinearRegressionData:
"""
Simple class for simulating data to test linear regression methods
"""
def __init__(self, n, p):
"""
Specify the number of rows (n) and number of columns (p) of the design matrix
:param n: integer, number of rows
:param p: integer, number of columns
"""
self.n = n
self.p = p
self.X_cov = None
self.X_mean = None
self.weights = None
self.epsilon = None
self.X = None
self.y = None
def set_covariance(self):
"""
Allow users to specify a covariance matrix concisely
:return:
"""
pass
def set_weights(self):
"""
Allow users to generate weights concisely
:return:
"""
pass
def generate(self):
"""
Simulate data according to user input
:return: X, y pair of regression data
"""
z = norm.rvs(size=(self.p, self.p))
self.X_cov = np.matmul(np.transpose(z), z)
self.X_mean = np.zeros(self.p)
self.weights = np.array([5, 4, 0, 0, 0, 0, 0, 0, 0, 10])
self.epsilon = norm.rvs(size=self.n)
self.X = multivariate_normal.rvs(mean=self.X_mean, cov=self.X_cov, size=self.n)
self.y = np.matmul(self.X, self.weights) + self.epsilon
return self.X, self.y, self.weights
class OLS:
"""
Ordinary least squares fit via matrix inversion
"""
def __init__(self):
self.weights = None
self.run = False
def fit(self, X, y):
"""
Estimate the regression coefficients, given X and y
:param X: design matrix
:param y: output to be predicted
"""
self.weights = np.matmul(np.linalg.inv(np.matmul(np.transpose(X), X)),
np.matmul(np.transpose(X), y))
self.run = True
class RidgeRegression:
"""
Ridge regression fit via QR decomposition
"""
def __init__(self):
self.weights = None
self.run = False
def augment_data(self, X, y, tau):
"""
Prepare data for estimation by augmenting both matrices with the regularization prior
:param X: design matrix
:param y: output to be predicted
:param tau: regularization hyperparameter
:return: X_tilde, y_tilde
"""
n, p = np.shape(X)
lambda_prior = np.identity(p)/(np.power(tau, 2.0))
x_tilde = np.concatenate((X, np.sqrt(lambda_prior)), axis=0)
y_tilde = np.concatenate((y, np.zeros(p)))
return x_tilde, y_tilde
def fit(self, X, y, tau):
"""
Estimate the regression coefficients, given X, y, and a tuning parameter
:param X: design matrix
:param y: output to be predicted
:param tau: regularization coefficient
"""
x_tilde, y_tilde = self.augment_data(X, y, tau)
q, r = np.linalg.qr(x_tilde)
self.weights = np.matmul(np.matmul(np.linalg.inv(r), q.T), y_tilde)
self.run = True
class BayesLinearRegression:
"""
MAP estimate for Bayesian linear regression
"""
def __init__(self):
self.weights = None
self.run = False
self.n = None
self.p = None
self.w0 = None
self.v0 = None
self.a0 = None
self.b0 = None
def prior_construct(self, X, y, prior="g", g=0.05):
"""
Construct the prior parameters based on user input
:param X: design matrix
:param y: output to be predicted
:param prior: (default: "g") type of prior structure to use
"""
if prior == "g":
self.w0 = np.zeros(p)
# self.v0 = g * np.identity(self.p)
self.v0 = g * np.linalg.inv(np.matmul(np.transpose(X), X))
self.a0 = 0
self.b0 = 0
else:
print("Please provide a proper prior type")
def fit(self, X, y, prior="g", g=0.05):
"""
Estimate the regression coefficients, given X, y, and optional prior parameters
:param X: design matrix
:param y: output to be predicted
:param prior: (default: "g") type of prior structure to use
"""
# Set up the prior
self.n = np.shape(X)[0]
self.p = np.shape(X)[1]
self.prior_construct(X, y, prior=prior, g=g)
# Compute the posterior mean / mode (same for a t distribution) on the weights
self.vn = np.linalg.inv(np.linalg.inv(self.v0) + np.matmul(np.transpose(X), X))
self.pmean = (np.matmul(np.linalg.inv(self.v0), self.w0) + np.matmul(np.transpose(X), y))
self.weights = np.matmul(self.vn, self.pmean)
self.run = True
class RobustLinearRegression:
"""
Robust linear regression methods
"""
def __init__(self):
self.weights = None
self.run = False
def fit_laplace(self, X, y):
"""
Provide a method for fitting a robust regression using Laplace likelihood,
rather than Gaussian likelihood
:param X: design matrix
:param y: output to be predicted
"""
n = np.shape(X)[0]
p = np.shape(X)[1]
empty_weights = np.zeros(p)
pos_resid = np.zeros(n)
neg_resid = np.zeros(n)
x = np.concatenate((empty_weights, pos_resid, neg_resid))
c = np.concatenate((np.zeros(p), np.ones(n), np.ones(n)))
A = np.concatenate((X, np.identity(n), -np.identity(n)), axis=1)
b = y
lb = np.concatenate((np.repeat(np.NINF, p), np.repeat(0.0, 2 * n)))
ub = np.repeat(None, p + 2 * n)
bounds = np.array([tuple(row) for row in np.column_stack((lb, ub))])
lp_solve = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds)
self.weights = lp_solve.x[:p]
self.run = True
def loss_huber(self, resid, delta):
"""
Huber loss function of a regression residual, evaluated at a given delta
:param resid: residuals of a regression problem (y - Xb)
:param delta: huber loss parameter (l2 penalty for residuals
smaller than delta, l1 penalty for residuals larger than delta
:return: elementwise loss estimates
"""
l2_loss = np.power(resid, 2)/2.0
l1_loss = np.abs(resid)*delta - np.power(delta, 2)/2.0
return np.sum(np.where(np.abs(resid) <= delta, l2_loss, l1_loss))
def fit_huber(self, X, y, delta=0.1):
"""
Provide a method for fitting a robust regression using Huber loss, rather than
log likelihood
:param X: design matrix
:param y: output to be predicted
:param delta:
"""
n, p = np.shape(X)
empty_weights = np.ones(p)
huber_solve = minimize(fun=lambda wgt: self.loss_huber(y - np.matmul(X, wgt), delta=delta),
x0=empty_weights)
self.weights = huber_solve.x
self.run = True
def fit(self, X, y, method="Laplace", delta=0.1):
"""
Estimate the regression coefficients, given X and y
:param X: design matrix
:param y: output to be predicted
:param method: (default: Laplace) which type of robust linear regression to implement
"""
if method == "Laplace":
self.fit_laplace(X, y)
elif method == "Huber":
self.fit_huber(X, y, delta=delta)
else:
print("No valid method provided")
if __name__ == "__main__":
# Run `n_sim` simulations, applying each method to the synthetic data in each run
n_sim = 100
error_mat = np.zeros((n_sim, 5))
for sim in range(n_sim):
# Draw linear regression data
n = 1000
p = 10
X, y, beta = LinearRegressionData(n, p).generate()
# Fit an OLS regression
ols = OLS()
ols.fit(X, y)
ols_weights = ols.weights
# print("{} weights: {}".format("OLS", ols_weights))
error_mat[sim, 0] = np.mean(np.power(ols_weights - beta, 2.0))
# Linear regression with Laplace likelihood
robust_reg = RobustLinearRegression()
robust_reg.fit(X, y, method="Laplace")
robust_weights = robust_reg.weights
# print("{} weights: {}".format("Robust regression", robust_weights))
error_mat[sim, 1] = np.mean(np.power(robust_weights - beta, 2.0))
# Linear regression with Huber loss
huber_reg = RobustLinearRegression()
huber_reg.fit(X, y, method="Huber", delta=0.1)
huber_weights = huber_reg.weights
# print("{} weights: {}".format("Huber loss", huber_weights))
error_mat[sim, 2] = np.mean(np.power(huber_weights - beta, 2.0))
# Ridge regression
ridge_reg = RidgeRegression()
ridge_reg.fit(X, y, tau=100.0)
ridge_weights = ridge_reg.weights
# print("{} weights: {}".format("Ridge", ridge_weights))
error_mat[sim, 3] = np.mean(np.power(ridge_weights - beta, 2.0))
# Bayesian linear regression (unknown sigma^2, MAP estimate of betas)
bayes_reg = BayesLinearRegression()
bayes_reg.fit(X, y, prior="g", g=100.0)
bayes_weights = bayes_reg.weights
# print("{} weights: {}".format("Bayes", bayes_weights))
error_mat[sim, 4] = np.mean(np.power(bayes_weights - beta, 2.0))
print(np.mean(error_mat, axis=0))
|
import mysql.connector as connector
credentials = {
"username": "root",
"password": ""
}
# Establish a connection with the DBMS
conn = connector.connect(user=credentials["username"],
passwd=credentials["password"],
host="localhost")
cursor = conn.cursor(buffered=True)
sql_queries = []
# Create the DB
sql_queries.append("CREATE DATABASE IF NOT EXISTS city_lines;")
for query in sql_queries:
cursor.execute(query)
conn.commit()
# Close connection
cursor.close()
conn.close()
|
import sys
for _ in [0]*int(input()):
a,b,c=map(int,input().split())
a=abs(a)
if b>0: z=a*a-2*abs(b)
else: z=a*a+2*abs(b)
print(abs(z))
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import tempfile
from rackclient import exceptions as exc
from rackclient.tests import utils
from rackclient.tests.v1 import fakes
from rackclient.v1 import processes
class ProcesssTest(utils.TestCase):
def setUp(self):
super(ProcesssTest, self).setUp()
self.cs = fakes.FakeClient()
self.process_type = processes.Process
self.gid = '11111111'
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
def test_list(self):
processes = self.cs.processes.list(self.gid)
self.cs.assert_called('GET', '/groups/%s/processes' % self.gid)
for process in processes:
self.assertIsInstance(process, self.process_type)
def test_get(self):
pid = 'aaaaaaaa'
process = self.cs.processes.get(self.gid, pid)
self.cs.assert_called('GET', '/groups/%s/processes/%s' % (self.gid, pid))
self.assertEqual(self.gid, process.gid)
self.assertEqual(self.user_id, process.user_id)
self.assertEqual(self.project_id, process.project_id)
self.assertEqual(pid, process.pid)
self.assertEqual(None, process.ppid)
self.assertEqual('pppppppp', process.nova_instance_id)
self.assertEqual('process1', process.name)
self.assertEqual('xxxxxxxx', process.glance_image_id)
self.assertEqual('yyyyyyyy', process.nova_flavor_id)
self.assertEqual('iiiiiiii', process.keypair_id)
self.assertEqual(['jjjjjjjj', 'kkkkkkkk'], process.securitygroup_ids)
networks = [{
'network_id': 'mmmmmmmm',
'fixed': '10.0.0.2',
'floating': '1.1.1.1'
}]
self.assertEqual(networks, process.networks)
self.assertEqual('ACTIVE', process.app_status)
self.assertEqual('ACTIVE', process.status)
self.assertEqual('IyEvYmluL3NoICBlY2hvICJIZWxsbyI=', process.userdata)
args = {
'key1': 'value1',
'key2': 'value2'
}
self.assertEqual(args, process.args)
def _create_body(self, ppid=None, name=None, nova_flavor_id=None,
glance_image_id=None, keypair_id=None,
securitygroup_ids=None, userdata=None, args=None):
return {
'process': {
'ppid': ppid,
'name': name,
'nova_flavor_id': nova_flavor_id,
'glance_image_id': glance_image_id,
'keypair_id': keypair_id,
'securitygroup_ids': securitygroup_ids,
'userdata': userdata,
'args': args
}
}
def test_create(self):
userdata = '#!/bin/sh echo "Hello"'
f = tempfile.TemporaryFile()
f.write(userdata)
f.seek(0)
params = {
'ppid': '11111111',
'name':'process1',
'nova_flavor_id': 1,
'glance_image_id': '22222222',
'keypair_id': '33333333',
'securitygroup_ids': ['44444444', '55555555'],
'userdata': f,
'args': {
"key1": "value1",
"key2": "value2"
}
}
process = self.cs.processes.create(self.gid, **params)
body = self._create_body(**params)
body['process']['userdata'] = base64.b64encode(userdata)
self.cs.assert_called('POST', '/groups/%s/processes' % self.gid, body)
self.assertIsInstance(process, self.process_type)
def test_create_invalid_parameters(self):
ppid = 'aaaaaaaa'
self.assertRaises(exc.CommandError, self.cs.processes.create,
self.gid, ppid=ppid, securitygroup_ids='invalid')
self.assertRaises(exc.CommandError, self.cs.processes.create,
self.gid, ppid=ppid, args='invalid')
def _update_body(self, app_status):
return {
'process': {
'app_status': app_status
}
}
def test_update(self):
app_status = 'ACTIVE'
pid = 'aaaaaaaa'
process = self.cs.processes.update(self.gid,
pid, app_status)
body = self._update_body(app_status)
self.cs.assert_called('PUT', '/groups/%s/processes/%s' % (self.gid, pid), body)
self.assertIsInstance(process, self.process_type)
def test_delete(self):
pid = 'aaaaaaaa'
self.cs.processes.delete(self.gid, pid)
self.cs.assert_called('DELETE', '/groups/%s/processes/%s' % (self.gid, pid))
|
#!/usr/bin/env python
import argparse
import glob
import os.path
import pathlib
import re
import subprocess
import sys
import sysconfig
EXCLUDED_HEADERS = {
"bytes_methods.h",
"cellobject.h",
"classobject.h",
"code.h",
"compile.h",
"datetime.h",
"dtoa.h",
"frameobject.h",
"funcobject.h",
"genobject.h",
"longintrepr.h",
"parsetok.h",
"pyatomic.h",
"pytime.h",
"token.h",
"ucnhash.h",
}
MACOS = (sys.platform == "darwin")
def get_exported_symbols(library, dynamic=False):
# Only look at dynamic symbols
args = ["nm", "--no-sort"]
if dynamic:
args.append("--dynamic")
args.append(library)
proc = subprocess.run(args, stdout=subprocess.PIPE, universal_newlines=True)
if proc.returncode:
sys.stdout.write(proc.stdout)
sys.exit(proc.returncode)
stdout = proc.stdout.rstrip()
if not stdout:
raise Exception("command output is empty")
for line in stdout.splitlines():
# Split line '0000000000001b80 D PyTextIOWrapper_Type'
if not line:
continue
parts = line.split(maxsplit=2)
if len(parts) < 3:
continue
symbol = parts[-1]
if MACOS and symbol.startswith("_"):
yield symbol[1:]
else:
yield symbol
def check_library(stable_abi_file, library, abi_funcs, dynamic=False):
available_symbols = set(get_exported_symbols(library, dynamic))
missing_symbols = abi_funcs - available_symbols
if missing_symbols:
raise Exception(
f"""\
Some symbols from the limited API are missing: {', '.join(missing_symbols)}
This error means that there are some missing symbols among the ones exported
in the Python library ("libpythonx.x.a" or "libpythonx.x.so"). This normally
means that some symbol, function implementation or a prototype, belonging to
a symbol in the limited API has been deleted or is missing.
Check if this was a mistake and if not, update the file containing the limited
API symbols. This file is located at:
{stable_abi_file}
You can read more about the limited API and its contracts at:
https://docs.python.org/3/c-api/stable.html
And in PEP 384:
https://www.python.org/dev/peps/pep-0384/
"""
)
def generate_limited_api_symbols(args):
library = sysconfig.get_config_var("LIBRARY")
ldlibrary = sysconfig.get_config_var("LDLIBRARY")
if ldlibrary != library:
raise Exception("Limited ABI symbols can only be generated from a static build")
available_symbols = {
symbol for symbol in get_exported_symbols(library) if symbol.startswith("Py")
}
headers = [
file
for file in pathlib.Path("Include").glob("*.h")
if file.name not in EXCLUDED_HEADERS
]
stable_data, stable_exported_data, stable_functions = get_limited_api_definitions(
headers
)
stable_symbols = {
symbol
for symbol in (stable_functions | stable_exported_data | stable_data)
if symbol.startswith("Py") and symbol in available_symbols
}
with open(args.output_file, "w") as output_file:
output_file.write(f"# File generated by 'make regen-limited-abi'\n")
output_file.write(
f"# This is NOT an authoritative list of stable ABI symbols\n"
)
for symbol in sorted(stable_symbols):
output_file.write(f"{symbol}\n")
def get_limited_api_definitions(headers):
"""Run the preprocesor over all the header files in "Include" setting
"-DPy_LIMITED_API" to the correct value for the running version of the interpreter.
The limited API symbols will be extracted from the output of this command as it includes
the prototypes and definitions of all the exported symbols that are in the limited api.
This function does *NOT* extract the macros defined on the limited API
"""
preprocesor_output = subprocess.check_output(
sysconfig.get_config_var("CC").split()
+ [
# Prevent the expansion of the exported macros so we can capture them later
"-DPyAPI_FUNC=__PyAPI_FUNC",
"-DPyAPI_DATA=__PyAPI_DATA",
"-DEXPORT_DATA=__EXPORT_DATA",
"-D_Py_NO_RETURN=",
"-DSIZEOF_WCHAR_T=4", # The actual value is not important
f"-DPy_LIMITED_API={sys.version_info.major << 24 | sys.version_info.minor << 16}",
"-I.",
"-I./Include",
"-E",
]
+ [str(file) for file in headers],
text=True,
stderr=subprocess.DEVNULL,
)
stable_functions = set(
re.findall(r"__PyAPI_FUNC\(.*?\)\s*(.*?)\s*\(", preprocesor_output)
)
stable_exported_data = set(
re.findall(r"__EXPORT_DATA\((.*?)\)", preprocesor_output)
)
stable_data = set(
re.findall(r"__PyAPI_DATA\(.*?\)\s*\(?(.*?)\)?\s*;", preprocesor_output)
)
return stable_data, stable_exported_data, stable_functions
def check_symbols(parser_args):
with open(parser_args.stable_abi_file, "r") as filename:
abi_funcs = {
symbol
for symbol in filename.read().splitlines()
if symbol and not symbol.startswith("#")
}
try:
# static library
LIBRARY = sysconfig.get_config_var("LIBRARY")
if not LIBRARY:
raise Exception("failed to get LIBRARY variable from sysconfig")
if os.path.exists(LIBRARY):
check_library(parser_args.stable_abi_file, LIBRARY, abi_funcs)
# dynamic library
LDLIBRARY = sysconfig.get_config_var("LDLIBRARY")
if not LDLIBRARY:
raise Exception("failed to get LDLIBRARY variable from sysconfig")
if LDLIBRARY != LIBRARY:
check_library(
parser_args.stable_abi_file, LDLIBRARY, abi_funcs, dynamic=True
)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description="Process some integers.")
subparsers = parser.add_subparsers()
check_parser = subparsers.add_parser(
"check", help="Check the exported symbols against a given ABI file"
)
check_parser.add_argument(
"stable_abi_file", type=str, help="File with the stable abi functions"
)
check_parser.set_defaults(func=check_symbols)
generate_parser = subparsers.add_parser(
"generate",
help="Generate symbols from the header files and the exported symbols",
)
generate_parser.add_argument(
"output_file", type=str, help="File to dump the symbols to"
)
generate_parser.set_defaults(func=generate_limited_api_symbols)
args = parser.parse_args()
if "func" not in args:
parser.error("Either 'check' or 'generate' must be used")
sys.exit(1)
args.func(args)
if __name__ == "__main__":
main()
|
import cv2
import numpy as np
import argparse
def align_images(img, ref, max_matches, good_match_percent):
# Convert images to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ref_gray = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(max_matches)
keypoints_img, descriptors_img = orb.detectAndCompute(img_gray, None)
keypoints_ref, descriptors_ref = orb.detectAndCompute(ref_gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors_img, descriptors_ref, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
num_good_matches = int(len(matches) * good_match_percent)
matches = matches[:num_good_matches]
# Draw top matches
img_matches = cv2.drawMatches(img, keypoints_img, ref, keypoints_ref, matches, None)
cv2.imwrite("matches.jpg", img_matches)
# Extract location of good matches
points_img = np.zeros((len(matches), 2), dtype=np.float32)
points_ref = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points_img[i, :] = keypoints_img[match.queryIdx].pt
points_ref[i, :] = keypoints_ref[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points_img, points_ref, cv2.RANSAC)
# Use homography
height, width, channels = ref.shape
img_reg = cv2.warpPerspective(img, h, (width, height))
return img_reg, h
def verify_signatures(img_ref, img_reg):
img_diff = cv2.absdiff(img_ref, img_reg)
cv2.imwrite("img_diff.jpg", img_diff)
if __name__ == '__main__':
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--img", required=True, help="Path to the scanned image")
ap.add_argument("-r", "--img-ref", required=True, help="Path to the reference image")
ap.add_argument("--max-matches", default=500, type=int, help="Max matches for ORB feature detector")
ap.add_argument("--good-match-percent", default=0.15, type=float, help="Percent of good matches to keep")
args = ap.parse_args()
# Read reference image
print("Reading reference image : ", args.img_ref)
img_ref = cv2.imread(args.img_ref, cv2.IMREAD_COLOR)
# Read image to be aligned
print("Reading image to align : ", args.img);
img = cv2.imread(args.img, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be restored in imReg.
# The estimated homography will be stored in h.
img_reg, h = align_images(img, img_ref, args.max_matches, args.good_match_percent)
# Verify signatures
print("Verifying signatures ...")
verify_signatures(img_ref, img_reg)
# Write aligned image to disk.
out_filename = "aligned.jpg"
print("Saving aligned image : ", out_filename);
cv2.imwrite(out_filename, img_reg)
# Print estimated homography
print("Estimated homography matrix: \n", h)
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import unittest
import itertools as it
import os
from collections import Counter
from pydoop.mapreduce.api import Mapper, Reducer, Factory, JobConf
from pydoop.mapreduce.simulator import HadoopSimulatorLocal
from pydoop.mapreduce.simulator import TrivialRecordReader
from pydoop.test_utils import WDTestCase
from pydoop.utils.conversion_tables import mrv1_to_mrv2, mrv2_to_mrv1
DATA = \
"""Chapter One Down the Rabbit Hole: Alice is feeling bored while
sitting on the riverbank with her elder sister, when she notices a
talking, clothed White Rabbit with a pocket watch run past. She
follows it down a rabbit hole when suddenly she falls a long way to a
curious hall with many locked doors of all sizes. She finds a small
key to a door too small for her to fit through, but through it she
sees an attractive garden. She then discovers a bottle on a table
labelled "DRINK ME," the contents of which cause her to shrink too
small to reach the key which she has left on the table. She eats a
cake with "EAT ME" written on it in currants as the chapter closes."""
COUNTS = Counter(''.join(c for c in DATA.replace('1\t', ' ')
if c.isalnum() or c == ' ').lower().split())
class TMapper(Mapper):
def __init__(self, ctx):
super(TMapper, self).__init__(ctx)
self.ctx = ctx
def map(self, ctx):
words = ''.join(c for c in ctx.value
if c.isalnum() or c == ' ').lower().split()
for w in words:
ctx.emit(w, '1')
class TReducer(Reducer):
def __init__(self, ctx):
super(TReducer, self).__init__(ctx)
self.ctx = ctx
def reduce(self, ctx):
s = sum(it.imap(int, ctx.values))
ctx.emit(ctx.key, str(s))
class TReducerWithCounters(Reducer):
def __init__(self, ctx):
super(TReducerWithCounters, self).__init__(ctx)
self.ctx = ctx
ctx.get_counter("p", "n")
self.counters = {}
for n in COUNTS.keys():
self.counters[n] = self.ctx.get_counter("DEFAULT", n)
def reduce(self, ctx):
s = sum(it.imap(int, ctx.values))
ctx.emit(ctx.key, str(s))
counter = self.counters[ctx.key]
ctx.increment_counter(counter, s)
class TFactory(Factory):
def __init__(self, combiner=None, partitioner=None, reducer_class=TReducer,
record_writer=None, record_reader=None):
self.mclass = TMapper
self.rclass = reducer_class
self.cclass = combiner
self.pclass = partitioner
self.rwclass = record_writer
self.rrclass = record_reader
def create_mapper(self, context):
return self.mclass(context)
def create_reducer(self, context):
return self.rclass(context)
def create_combiner(self, context):
return None if not self.cclass else self.cclass(context)
def create_partitioner(self, context):
return None if not self.pclass else self.pclass(context)
def create_record_reader(self, context):
return None if not self.rrclass else self.rrclass(context)
def create_record_writer(self, context):
return None if not self.rwclass else self.rwclass(context)
class TestFramework(WDTestCase):
def setUp(self):
super(TestFramework, self).setUp()
self.fname = self._mkfn('alice.txt')
with open(self.fname, 'w') as fo:
fo.write(DATA)
def test_job_conf(self):
job_conf = {}
for k in mrv1_to_mrv2:
job_conf[k] = k
jc = JobConf(
[item for sublist in job_conf.iteritems() for item in sublist]
)
for k in mrv2_to_mrv1:
self.assertEqual(jc[k], job_conf[mrv2_to_mrv1[k]])
def test_job_conf_getters(self):
values = ['int', '1', 'float', '2.3', 'bool', 'false']
conv_values = [1, 2.3, False]
jc = JobConf(values)
for i, k in enumerate(values[::2]):
getter = getattr(jc, 'get_%s' % k)
self.assertEqual(getter(k), conv_values[i])
for jc in JobConf([]), JobConf(['x', 'foo']):
for d in False, True:
self.assertEqual(jc.get_bool('x', default=d), d)
self.assertRaises(RuntimeError, JobConf(['x', 'foo']).get_bool, 'x')
def test_map_only(self):
job_conf = {'this.is.not.used': '22'}
hs = HadoopSimulatorLocal(TFactory())
with open(self.fname, 'r') as fin:
with self._mkf('map_only.out') as fout:
hs.run(fin, fout, job_conf, 0)
self.assertTrue(os.stat(fout.name).st_size > 0)
def test_record_reader(self):
job_conf = {'this.is.not.used': '22'}
hs = HadoopSimulatorLocal(TFactory(record_reader=TrivialRecordReader))
foname = 'map_reduce.out'
with self._mkf(foname) as fout:
hs.run(None, fout, job_conf, 0)
self.assertTrue(os.stat(fout.name).st_size > 0)
def test_map_reduce(self):
job_conf = {'this.is.not.used': '22'}
hs = HadoopSimulatorLocal(TFactory())
foname = 'map_reduce.out'
with open(self.fname, 'r') as fin:
with self._mkf(foname) as fout:
hs.run(fin, fout, job_conf, 1)
self.assertTrue(os.stat(fout.name).st_size > 0)
with open(self._mkfn(foname)) as f:
for l in f:
k, c = l.strip().split()
self.assertEqual(COUNTS[k], int(c))
def test_map_reduce_with_counters(self):
job_conf = {'this.is.not.used': '22'}
hs = HadoopSimulatorLocal(TFactory(reducer_class=TReducerWithCounters))
foname = 'map_reduce.out'
with open(self.fname, 'r') as fin:
with self._mkf(foname) as fout:
hs.run(fin, fout, job_conf, 1)
self.assertTrue(os.stat(fout.name).st_size > 0)
with open(self._mkfn(foname)) as f:
for l in f:
k, c = l.strip().split()
if "COUNTER_" in k:
ck = int(k[8:]) - 1
key = COUNTS.keys()[ck]
self.assertEqual(COUNTS[key], int(c))
else:
self.assertEqual(COUNTS[k], int(c))
def test_map_combiner_reduce(self):
job_conf = {'this.is.not.used': '22'}
hs = HadoopSimulatorLocal(TFactory(combiner=TReducer))
foname = 'map_combiner_reduce.out'
with open(self.fname, 'r') as fin:
with self._mkf(foname) as fout:
hs.run(fin, fout, job_conf, 1)
self.assertTrue(os.stat(fout.name).st_size > 0)
with open(self._mkfn(foname)) as f:
for l in f:
k, c = l.strip().split()
self.assertEqual(COUNTS[k], int(c))
def suite():
suite_ = unittest.TestSuite()
suite_.addTest(TestFramework('test_job_conf'))
suite_.addTest(TestFramework('test_job_conf_getters'))
suite_.addTest(TestFramework('test_map_only'))
suite_.addTest(TestFramework('test_map_reduce'))
suite_.addTest(TestFramework('test_map_combiner_reduce'))
suite_.addTest(TestFramework('test_record_reader'))
suite_.addTest(TestFramework('test_map_reduce_with_counters'))
return suite_
if __name__ == '__main__':
_RUNNER = unittest.TextTestRunner(verbosity=2)
_RUNNER.run((suite()))
|
"""ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class docker_jira(ShutItModule):
def build(self, shutit):
shutit.install('apache2 postgresql wget openjdk-7-jre')
shutit.send('mkdir -p /opt/atlassian')
shutit.send('cd /opt/atlassian')
f = 'atlassian-jira-6.4.1-x64.bin'
shutit.get_url(f,['https://www.atlassian.com/software/jira/downloads/binary/'])
shutit.multisend('sh ./' + f,{', Cancel':'o','Express Install':'1','i, Enter':'i'})
return True
def module():
return docker_jira(
'shutit.tk.docker_jira.docker_jira', 782914092.00,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
|
import wx
import wx.grid
from .widgets import TabPage
class AslAnalysis(TabPage):
"""
Tab page containing data analysis options
"""
def __init__(self, parent, idx, n):
TabPage.__init__(self, parent, "Analysis", idx, n)
self.distcorr_choices = ["Fieldmap", "Calibration image"]
self.section("Basic analysis options")
self.outdir_picker = self.file_picker("Output Directory", dir=True)
self.mask_picker = self.file_picker("Brain Mask", optional=True)
self.wp_cb = self.checkbox("Analysis which conforms to 'White Paper' (Alsop et al 2014)", handler=self.wp_changed)
self.section("Initial parameter values")
self.bat_num = self.number("Arterial Transit Time (s)", min=0,max=2.5,initial=1.3)
self.t1_num = self.number("T1 (s)", min=0,max=3,initial=1.3)
self.t1b_num = self.number("T1b (s)", min=0,max=3,initial=1.65)
self.ie_num = self.number("Inversion Efficiency", min=0,max=1,initial=0.85)
self.section("Analysis Options")
self.spatial_cb = self.checkbox("Adaptive spatial regularization on perfusion", initial=True)
self.infer_t1_cb = self.checkbox("Incorporate T1 value uncertainty")
self.macro_cb = self.checkbox("Include macro vascular component")
self.fixbolus_cb = self.checkbox("Fix label duration", initial=True)
self.pv_cb = self.checkbox("Partial Volume Correction")
self.mc_cb = self.checkbox("Motion Correction")
self.sizer.AddGrowableCol(1, 1)
#sizer.AddGrowableRow(5, 1)
self.SetSizer(self.sizer)
self.next_prev()
def outdir(self): return self.outdir_picker.GetPath()
def mask(self):
if self.mask_picker.checkbox.IsChecked(): return self.mask_picker.GetPath()
else: return None
def wp(self): return self.wp_cb.IsChecked()
def bat(self): return self.bat_num.GetValue()
def t1(self): return self.t1_num.GetValue()
def t1b(self): return self.t1b_num.GetValue()
def ie(self): return self.ie_num.GetValue()
def spatial(self): return self.spatial_cb.IsChecked()
def infer_t1(self): return self.infer_t1_cb.IsChecked()
def macro(self): return self.macro_cb.IsChecked()
def fixbolus(self): return self.fixbolus_cb.IsChecked()
def pv(self): return self.pv_cb.IsChecked()
def mc(self): return self.mc_cb.IsChecked()
def update(self, event=None):
self.mask_picker.Enable(self.mask_picker.checkbox.IsChecked())
self.t1_num.Enable(not self.wp())
self.bat_num.Enable(not self.wp())
TabPage.update(self)
def wp_changed(self, event):
if self.wp():
self.t1_num.SetValue(1.65)
self.bat_num.SetValue(0)
else:
self.t1_num.SetValue(1.3)
self.bat_num.SetValue(1.3)
self.calibration.update()
self.update()
def labelling_changed(self, pasl):
if pasl:
self.bat_num.SetValue(0.7)
self.ie_num.SetValue(0.98)
else:
self.bat_num.SetValue(1.3)
self.ie_num.SetValue(0.85)
|
# coding=utf8
import requests
import numpy as np
from config import modelo
print(modelo)
ACCIONES = ['EXITO'] ##, 'ECOPETROL', 'BCOLOMBIA', 'CORFICOLCF', 'PFBCOLOM','GRUPOSURA', 'PFAVAL', 'NUTRESA', 'PFGRUPSURA',
##'ISA', 'CEMARGOS', 'GRUPOARGOS', 'PFGRUPOARG', 'PFDAVVNDA', 'ICOLCAP', 'EEB', 'CLH', 'CELSIA', 'PFCEMARGOS',
##'PFAVH', 'GRUPOAVAL', 'CNEC', 'HCOLSEL', 'BOGOTA', 'ICOLRISK', 'ETB', 'MINEROS', 'BBVACOL', 'CONCONCRET',
##'PFCARPAK', 'BVC', 'ENKA', 'ELCONDOR', 'PFCORFICOL', 'CARTON', 'FABRICATO', 'COLTEJER']
#def organizar_matriz(prices, modelo):
# matriz = np.empty([365*2, len(modelo)+1])
# for row in (matriz):
# matriz[row, 0] = prices[row]
# matriz[row, 1] = prices[row + 1]
# matriz[row, 2] = prices[row + 2]
# matriz[row, 3] = prices[row + 3]
# return matriz
for ACCION in ACCIONES:
r = requests.get(('https://www.bvc.com.co/mercados/GraficosServlet?home=no&tipo=ACCION&mercInd=RV&nemo={0}&tiempo=ayer').format(ACCION))
datos = r.content
print(datos)
matriz = np.empty([len(datos), 1])
i = 0
for line in datos.split(b'\n'):
movimiento_diario = line.split(b',')
try:
matriz[i, 0] = movimiento_diario[1]
i = i + 1
except:
pass
#nueva_matriz = organizar_matriz(prices=matriz, modelo=modelo)
#print(nueva_matriz)
#results = sm.OLS(y, X).fit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-06-07 16:35:27
# @Author : Yunyu2019 ([email protected])
# @Link : ${link}
# @descp : The document description
# use Pillow
import os
import argparse
from PIL import Image
def isImage(filename):
allows=('.jpg','.jpeg','.png')
if not filename.endswith(allows) or not os.path.isfile(filename):
msg='{0} is not exist or not a image file (jpg|jpeg|png).'.format(filename)
raise argparse.ArgumentTypeError(msg)
return filename
def get_char(r, b, g,alpha=256):
if alpha == 0:
return ' '
ascii_char = list("$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. ")
length = len(ascii_char)
gray = int(0.2126 * r + 0.7152 * g + 0.0722 * b)
unit = (256.0 + 1)/length
return ascii_char[int(gray/unit)]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert a color image to strings image',prog='image2string')
parser.add_argument('-f','--file',required=True,type=isImage,help='source image file path,allow jpg|jpeg|png') # 输入文件
parser.add_argument('-o','--output',default='output.txt',help='the output file path') # 输出文件
args = parser.parse_args()
infile = args.file
output = args.output
img = Image.open(infile)
w,h=img.size
if h>100:
w = int((100/h)*w)
h = int(100 / 2)
im = img.resize((w,h),Image.NEAREST)
txt = ""
for i in range(h):
for j in range(w):
txt += get_char(*im.getpixel((j, i)))
txt += '\n'
print(txt)
# 字符画输出到文件
with open(output,'w') as fp:
fp.write(txt)
|
from ithz.fetchrss import refreshRSS
def do(id):
if id=="rss":
refreshRSS()
|
import tensorflow as tf
import numpy as np
from PIL import Image
import os
os.sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import lattice_filter_op_loader
module = lattice_filter_op_loader.module
theta_alpha = 8.0
theta_beta = 0.125
im = Image.open('Images/input.bmp')
rgb = np.array(im)
grey = np.array(0.2989 * rgb[:,:,0] + 0.5870 * rgb[:,:,1] + 0.1140 * rgb[:,:,2]).astype(np.uint8)
grey = np.expand_dims(np.expand_dims(grey, axis=0), axis=-1)
tf_input_image = tf.constant(grey/255.0, dtype=tf.float32)
tf_reference_image = tf.constant(np.expand_dims(rgb/255.0, axis=0), dtype=tf.float32)
output = module.lattice_filter(tf_input_image, tf_reference_image, theta_alpha=theta_alpha, theta_beta=theta_beta)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
o = np.round(sess.run(output) * 255).astype(np.uint8)
im = Image.fromarray(np.squeeze(grey))
im.save('Images/gray_original.bmp')
im = Image.fromarray(np.squeeze(o))
im.save('Images/filtered_grey.bmp')
|
import json
import time
import requests as r
class DataRecord:
"""to record the data about xingtong"""
def __init__(self, url, filename):
self.url = url
self.filename = filename
self.record = {}
def get_follower(self):
response = r.get(self.url)
if response.status_code == 200:
response_dict = response.json()
response_dict = response_dict['data']['follower']
return response_dict
else:
return 0
def time_follower_dict(self):
follower = DataRecord.get_follower(self)
if len(self.record) != 0:
self.record = {}
self.record[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())] = follower
return self.record
bili_url = 'https://api.bilibili.com/x/relation/stat?vmid=401315430'
file_path = 'D:/BVideoFiles/22886883-星瞳_Official/record_fans_num.json'
'''get返回dict: {'code': 0, 'message': '0', 'ttl': 1,
'data': {'mid': 401315430, 'following': 20, 'whisper': 0, 'black': 0,'follower': 160679}} '''
if __name__ == '__main__':
count = 1
Data = DataRecord(bili_url, file_path)
while 1:
if count:
ctime = time.strftime("%S", time.localtime())
if ctime == '00':
count -= 1
else:
with open(file_path, 'r') as file:
data_load = list(json.load(file))
data_load.append(Data.time_follower_dict())
with open(file_path, 'w') as f:
json.dump(data_load, f, indent=4)
time.sleep(600)
|
# -*- coding: utf-8 -*-
# This file as well as the whole tspreprocess package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), 2017
from __future__ import absolute_import, division
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from tspreprocess.compress.compress import compress
from unittest import TestCase
class CompressTestCase(TestCase):
def setUp(self):
cid = np.repeat([10, 500], 10)
ckind = np.repeat(["a", "b", "a", "b"], 5)
csort = [1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10]
cval = [11, 9, 67, 45, 30,
58, 62, 19, 56, 29,
0, 27, 36, 43, 33,
2, 24, 71, 41, 28]
self.df = pd.DataFrame({"id": cid, "kind": ckind, "sort": csort, "val": cval})
self.col_naming = {"column_id": "id", "column_kind": "kind", "column_sort": "sort", "column_value": "val"}
# gives the following DataFrame
"""
id kind sort val
0 10 a 1 11
1 10 a 2 9
2 10 a 3 67
3 10 a 4 45
4 10 a 5 30
5 10 b 6 58
6 10 b 7 62
7 10 b 8 19
8 10 b 9 56
9 10 b 10 29
10 500 a 1 0
11 500 a 2 27
12 500 a 3 36
13 500 a 4 43
14 500 a 5 33
15 500 b 6 2
16 500 b 7 24
17 500 b 8 71
18 500 b 9 41
19 500 b 10 28
"""
def test_compress_max(self):
dd = compress(self.df,
compression_functions={"maximum": None},
interval_length=2,
**self.col_naming)
expected_dd = pd.DataFrame({"id": ["10"] * 3 + ["500"] * 3,
"sort": ["bin_0", "bin_1", "bin_2"] * 2,
"a_maximum": [11., 67., 30., 27., 43., 33.],
"b_maximum": [62., 56., 29., 24., 71., 28.]})
expected_dd = expected_dd[dd.columns]
expected_dd.sort_values(by=["id", "sort"], inplace=True)
assert_frame_equal(expected_dd, dd)
def test_compress_min(self):
dd = compress(self.df,
compression_functions={"minimum": None},
interval_length=2,
**self.col_naming)
expected_dd = pd.DataFrame({"id": ["10"] * 3 + ["500"] * 3,
"sort": ["bin_0", "bin_1", "bin_2"] * 2,
"a_minimum": [9., 45., 30., 0., 36., 33.],
"b_minimum": [58., 19., 29., 2., 41., 28.]})
expected_dd = expected_dd[dd.columns]
expected_dd.sort_values(by=["id", "sort"], inplace=True)
assert_frame_equal(expected_dd, dd)
# todo: we need tests for the other tsfresh formats, maybe just restructure the DF from above
|
import random
from PathsModule import AggregateOutputPath
import os
import uuid
from getImage import getImage
from GenerateAdjecentShapesPoints import GenerateAdjecentShapesPoints
from blocksWorld import drawSolid
from CreateNewObject import CreateNewObject
from SaveData import Save
if __name__ == "__main__":
# The main program parameters
NumberOfImages = 1 # the number of images you want [0-100000]
NrObjects = random.randint(3,10) # the number of objects in each image [3-10]
var = 'False' # is there gap between objects or not.
OverlapRemove = 'True' # remove any new object that overlapped with existing object.
colors = ['red', 'blue', 'black', 'yellow'] # choose a set of colors that gonna be used
# create output directory
if not os.path.exists(AggregateOutputPath):
os.makedirs(AggregateOutputPath)
# Generate N images and save their information into json file that has the same name as image file
for idx in range(NumberOfImages):
tag = 'Image(' + str(idx) + ')'+ str(uuid.uuid4())
imageName = tag + '.png'
jsonfile = tag
print("Creating Image Number {} of {}".format(idx,NumberOfImages))
data = {'layer0':[{"AggregateObjectName":tag, "AggCenter" : [], "AggrObjects":[],
"orientation": 0}]}
resultFile = os.path.join(AggregateOutputPath, imageName)
image, canvas = getImage('RGB', (640, 480), 'white')
AggpointsShape, centersShape, RadiusShape, rotationShape, NrVerticesShape = GenerateAdjecentShapesPoints(NrObjects, var, OverlapRemove)
for idy, points in enumerate(AggpointsShape):
color = random.choice(colors)
drawSolid(canvas, points, color)
ObjectName = 'object_' + str(idy)
CreateNewObject(data, ObjectName, centersShape[idy], points, RadiusShape[idy], NrVerticesShape[idy], rotationShape[idy])
# for i in range(len(points) - 1):
# draw(canvas, (points[i + 0], points[i + 1]), str(i))
# for i in range(len(c) - 1):
# draw(canvas, (c[i + 0], c[i + 1]), str(i))
image.save(resultFile)
#Save(data, jsonfile) # active this line if u want to store vertices locations in each object within each image.
# the result is file contains all vertices, centers, radius, and some other information about object being drawn.
|
import unittest
class Test0006(unittest.TestCase):
def test_problem(self):
n = 100
sum_of_squares = n * (n + 1) * (2 * n + 1) / 6
square_of_sum = (n * (n + 1) / 2) * (n * (n + 1) / 2)
diff = square_of_sum - sum_of_squares
self.assertEqual(diff, 25164150) |
import asyncio
import threading
from .connection import MixerConnection
from .utils import get_channel_id
from chatrooms import lock
class MixerThread(threading.Thread):
def __init__(self, **kwargs):
super().__init__()
self.channel_id = get_channel_id(kwargs.pop("channel_name"))
self.mixer_connection = MixerConnection(self.channel_id,
kwargs.pop("oauth_token", None))
@property
def last_message(self):
"""
Pops the first text message from the queue.
:return: str, first message of the queue.
"""
try:
return self.mixer_connection.messages.popleft()
except IndexError:
return None
def run(self):
asyncio.set_event_loop(asyncio.new_event_loop())
with lock:
asyncio.get_event_loop().run_until_complete(self.mixer_connection.run())
def quit(self):
self.mixer_connection.running = False
asyncio.get_event_loop().close() |
#
# Copyright 2019 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import time
import datetime
from com.tricentis.continuousintegration.toscacijavaclient import DexDispatcher
from com.tricentis.continuousintegration.toscacijavaclient import Options
from com.tricentis.continuousintegration.toscacijavaclient import ClientProperties
from com.tricentis.continuousintegration.toscacixecution.utils import Util
class ToscaClient(object):
@staticmethod
def new_instance(container):
return ToscaClient(container["url"], container["username"], container["password"])
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
self.workdir = self._create_working_directory()
self.config_filename = '%s/testConfiguration.xml' % self.workdir
self.result_filename = '%s/result.xml' % self.workdir
def execute(self, polling_interval, client_timeout, consider_execution_result, test_configuration):
if not test_configuration:
raise Exception('Test Configuration cannot be empty.')
# write config file
with open(self.config_filename, "w") as text_file:
text_file.write(test_configuration)
args = self._create_options('Junit', self.username, self.password, self.url, consider_execution_result)
ClientProperties.LoadProperties()
options = Options(args)
dispatcher = DexDispatcher.createDispatcher(options)
print "connecting to tosca server..."
dispatcher.Connect()
print "executing tests..."
dispatcher.Execute()
# read result file and return
with open(self.result_filename, 'r') as myfile:
results = myfile.read()
return results
def _create_options(self, result_type, username, password, url, consider_execution_result):
args = []
args.append('-m')
args.append('distributed')
if self.result_filename:
args.append('-r')
args.append(str(self.result_filename))
if result_type:
args.append('-t')
args.append(str(result_type))
if self.config_filename:
args.append('-c')
args.append(str(self.config_filename))
if username:
args.append('-l')
args.append(str(username))
if password:
args.append('-p')
args.append(str(password))
if url:
args.append('-e')
args.append(str(url))
if consider_execution_result:
args.append('-x')
args.append(str(consider_execution_result))
return args
def _create_working_directory(self):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%dT%H%M%S.%f')
workdir = 'work/tosca-%s' % st
os.makedirs(workdir)
return workdir
|
#!/bin/env python
#----------------------------------------------------------------------------
# Name: utils.py
# Purpose: utils module for Synthetico
#
# Author: Marco Visibelli
# Contact: https://github.com/marcovisibelli
#
# Created: 02/04/2017
# Copyright: (c) 2018 by Marco Visibelli
# Licence: MIT license
#----------------------------------------------------------------------------
import sys, os
import pandas as pd
import numpy as np
import random
import datetime
from tqdm import tqdm
from modules.data import *
def letters(input):
valids = []
for character in input:
if character.isalpha():
valids.append(character)
valid= ''.join(valids)
valid = valid.replace("ó","o").replace("á","a").replace("é","e").replace("ô","o").replace("ç","c")
return valid
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def email(df,context):
return letters(str(df["name"]).lower()) + "." + letters(str(df["surname"]).lower()) +"@"+ letters(str(context["company"]).lower())+".com"
#this contain the variables
email_entity = email
def phone_generator(pattern):
stringa = pattern
while 'X' in stringa:
stringa = stringa.replace('X',str(random.randint(0,9)),1)
return stringa
def statistical_select(elements):
results = []
resoltion = 1000
for ele in elements:
for conte in range(0,int(resoltion*ele[1])):
results.append(ele[0])
return random.choice(results)
def random_date(start, end):
"""Generate a random datetime between `start` and `end`"""
return start + datetime.timedelta(
# Get a random amount of seconds between `start` and `end`
seconds=random.randint(0, int((end - start).total_seconds())),
)
def apply_context(selectors,element,context,current_row):
#print(element," vs ",current_row)
# applica i selettore generico
if selectors != []:
lista_selectors = [ a for a in selectors if a in context.keys()]
# per tutte le combinazioni usando il context
ritorno = []
# se rispetta tutti i check
for ele_sele in lista_selectors:
if element[ele_sele] == context[ele_sele]:
ritorno.append(element["values"])
lista_selectors_2 = [ a for a in selectors if a in current_row.keys()]
# per tutte le combinazioni usando l'ultima riga
# se rispetta tutti i check
for ele_sele in lista_selectors_2:
if element[ele_sele] == current_row[ele_sele]:
ritorno.append(element["values"])
return ritorno
else:
return element["values"]
def process_context(meta_context):
context = {}
for element in meta_context.keys():
if isinstance(meta_context[element], (list,)):
context[element] = statistical_select(meta_context[element])
else:
context[element] = meta_context[element]
return context
|
import datetime
from os import path
import numpy as np
from PINSoftware.Debugger import Debugger
from PINSoftware.Profiler import Profiler
def remove_outliers(data):
u = np.mean(data)
s = np.std(data)
return [e for e in data if (u - 2 * s <= e <= u + 2 * s)]
class DataAnalyser():
"""
This class takes care of data analysis and storage.
Once this function is created, you should call `DataAnalyser.append` to add a new data point,
use `DataAnalyser.ys` to get the raw data, `DataAnalyser.processed` to get the peak voltages,
`DataAnalyser.processed_timestamps` are timestamps corresponding to the peak voltages,
`DataAnalyser.averaged_processed_ys` are the averaged peak voltages and `DataAnalyser.averaged_processed_timestamps`
are timestamps corresponding to the averages. Lastly `DataAnalyser.markers` and `DataAnalyser.marker_timestamps`
are debug markers and their timestamps, those can be anything and are only adjustable from code, they should
not be used normally.
All the timestamps used here are based on the length of `DataAnalyser.ys` at the time. This is very
useful for two reasons, its easy to calculate so also fast. Bu mostly because later when you plot the data,
you can plot the `DataAnalyser.ys` with "x0=0" and "dx=1" and then plot the peak averaged directly and the data
will be correctly scaled on the x axis. The problem is however that this assumes that the data comes at a
precise frequency but the NI-6002 can offer that so it should be alright.
Once the `DataAnalyser.on_start` is called a profiler about irregular data is also started, each second
it prints how many irregular data issues there were.
"""
def __init__(self, data_frequency : int, plot_buffer_len : int = 200, debugger : Debugger = Debugger(),
edge_detection_threshold : float = 0.005, average_count : int = 50, correction_func=lambda x: x):
"""
`data_frequency` is the frequency of the incoming data, this is used for calculating real timestamps
and is saved if hdf5 saving is enabled.
`plot_buffer_len` determines how many datapoints should be plotted in the live plot graph (if the
server has been run with the graphing option).
`debugger` is the debugger to use.
`edge_detection_threshold`, `average_count` and `correction_func` are processing parameters. They
are described in the Help tab of the program.
`edge_detection_threshold` sets the voltage difference required to find a section transition.
`average_count` is how many peak voltages should be averaged to get the averaged peak voltages.
`correction_func` is the function to run the peak voltages through before using them. This is
to correct some systematic errors or do some calculations.
"""
self.freq = data_frequency
self.period = 1 / data_frequency
self.plot_buffer_len = plot_buffer_len
self.debugger = debugger
self.ys = [0, 0, 0]
self.markers = []
self.marker_timestamps = []
self.first_processed_timestamp = None
self.actual_append = self.actual_append_first
self.processed_ys = []
self.processed_timestamps = []
self.averaged_processed_ys = []
self.averaged_processed_timestamps = []
self.edge_detection_threshold = edge_detection_threshold
self.last_up_section = []
self.last_down_section = []
self.correction_func = correction_func
self.average_count = average_count
self.average_running_sum = 0
self.average_index = 0
self.irregular_data_prof = Profiler("Irregular data", start_delay=0)
self.ready_to_plot = True
def on_start(self):
self.irregular_data_prof.start()
def actual_append_first(self, new_processed_y):
"""
This appends the new processed value, works on the averaged processed values and
possibly appends that too. This is when the first processed value comes in.
It sets some initial values, after it runs once `DataAnalyser.actual_append_main`
is called instead.
"""
self.processed_ys.append(new_processed_y)
self.first_processed_timestamp = datetime.datetime.now().timestamp()
self.processed_timestamps.append(len(self.ys))
self.average_running_sum += new_processed_y
self.average_index += 1
self.actual_append = self.actual_append_main
def actual_append_main(self, new_processed_y):
"""
This appends the new processed value, works on the averaged processed values and
possibly appends that too. For the first processed value, `DataAnalyser.actual_append_first`
is run instead, but afterwards this is.
"""
self.processed_ys.append(new_processed_y)
self.processed_timestamps.append(len(self.ys))
self.average_running_sum += new_processed_y
self.average_index += 1
if self.average_index == self.average_count:
self.averaged_processed_ys.append(self.average_running_sum / self.average_count)
self.averaged_processed_timestamps.append(self.processed_timestamps[-1] - self.average_count / 2)
self.average_running_sum = 0
self.average_index = 0
def handle_processing(self, new_y):
"""
This is the main processing function. It gets the new y (which
at this point is not in `DataAnalyser.ys` yet) and does some processing on it.
It may add new values to `DataAnalyser.processed` and `DataAnalyser.averaged_processed_ys`
if new values were found through `DataAnalyser.actual_append`. If the data does not add up
it is added to irregular data. I won't describe the logic here as it is described in the manual and also
it may still be best to look through the code.
"""
diff = new_y - self.ys[-3]
if abs(diff) > self.edge_detection_threshold:
if diff < 0 and len(self.last_up_section) > 1 and len(self.last_down_section) > 1:
last_up_section = remove_outliers(self.last_up_section)
last_down_section = remove_outliers(self.last_down_section)
last_up_diffs = [post - pre for pre, post in zip(last_up_section, last_up_section[1:])]
avg_up_diff = sum(last_up_diffs) / len(last_up_diffs)
up_avg = sum(last_up_section) / len(last_up_section)
down_avg = sum(last_down_section) / len(last_down_section)
if up_avg >= down_avg:
spike = (up_avg - avg_up_diff * (len(self.last_up_section) / 2))
self.markers.append(spike)
self.marker_timestamps.append(len(self.ys))
processed_y = self.correction_func(spike - down_avg)
self.actual_append(processed_y)
else:
# self.markers.append(down_avg)
# self.marker_timestamps.append(len(self.ys))
# self.debugger.warning("Irregular data, something may be wrong.")
self.irregular_data_prof.add_count()
if len(self.last_up_section) > 0:
self.last_down_section = self.last_up_section
self.last_up_section = []
else:
self.last_up_section.append(new_y)
def append(self, new_y):
"""
The main apppend function through which new data is added. It just passes
the value to the processing function and appends it to `DataAnalyser.ys` in the end.
"""
self.ready_to_plot = False
self.handle_processing(new_y)
self.ys.append(new_y)
self.ready_to_plot = True
def on_stop(self):
self.irregular_data_prof.stop()
def plot(self, plt):
"""This is what plots the data on the raw data graph if graphing is enabled"""
if self.ready_to_plot:
plt.plot(self.ys[-self.plot_buffer_len:])
|
import unittest
import os
from openmdao.utils.code_utils import get_nested_calls
from openmdao.core.group import Group
class TestCodeUtils(unittest.TestCase):
def test_get_nested_calls(self):
devnull = open(os.devnull, "w")
graph = get_nested_calls(Group, '_final_setup', stream=devnull)
self.assertIn(('Group._compute_root_scale_factors', 'System._compute_root_scale_factors'),
graph.edges(), "System._compute_root_scale_factors not called by Group._compute_root_scale_factors")
if __name__ == '__main__':
unittest.main()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("RECO4")
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/Reconstruction_cff')
process.load('Configuration/StandardSequences/EndOfProcess_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.load('RecoJets.Configuration.RecoJPTJets_cff')
process.load('JetMETCorrections.Configuration.CorrectedJetProducersAllAlgos_cff')
process.load('JetMETCorrections.Configuration.CorrectedJetProducers_cff')
process.load('JetMETCorrections.Configuration.JetCorrectors_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2018_realistic', '')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
#################################################################
### For 219, file from RelVal
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_10_6_4/RelValProdTTbar_13_pmx25ns/MINIAODSIM/PUpmx25ns_106X_upgrade2018_realistic_v9-v1/10000/87AD30D2-F673-F54C-8974-CB916CC66098.root'
)
)
process.RECOoutput = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring("keep *_JetPlusTrackZSPCorJetAntiKt4PAT_*_*"),
fileName = cms.untracked.string('file:jptreco.root')
)
##########
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOoutput_step = cms.EndPath(process.RECOoutput)
process.load("RecoJets.JetPlusTracks.PATJetPlusTrackCorrections_cff")
process.p01=cms.Path(process.PATJetPlusTrackCorrectionsAntiKt4)
process.p1 =cms.Schedule(
process.p01,
process.endjob_step,
process.RECOoutput_step
)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 12 15:18:23 2020
@author: hoang
"""
import pandas as pd
data_NEC = pd.read_csv("./nec_data.csv")
def pulse(data):
"""
We read in salae data, then convert command field it into ascii
NEC IR relies on the space between pulses to encode the bit
They send data using burst of 562.5µs, followed by x ms space, which is extremely helpful for decoding
There are 4 type of space
1. 18.37microsec - this is for one pulse within the burst
2. 562 microsec - this is for bit 0
3. 1.6875milisec - this is for bit 1
4. 4.5 milisec - this is for the leading command
Parameters
----------
data : TYPE data from salae 1.2.18, which has time stamp of each transition
DESCRIPTION.
Returns
-------
None.
"""
start = False
add,cmd,count = b'',b'',0 #we use count to extract only the command part (ignore first 16 address bits)
for i in range(len(data)-1):
#I have removed the part that process the address portion.
#You can simply add it in using count < 16 in the appropriate space
pulse= data.iloc[i+1,0]-data.iloc[i,0]
if pulse < 20e-05: #short pulse within the burst, ignore
continue
elif (pulse > 0.55e-3 and pulse < 1.68e-3): #bit 0's space
if start == True and count >=16:
cmd += b'0'
count += 1
elif pulse > 1.68e-3 and pulse < 4.2e-3: #bit 1's space
if start == True and count >=16:
cmd += b'1'
count +=1
elif pulse > 4.2e-3 and pulse < 1: #lead's space
start = True
else:
if cmd != b'': #space between messages
print(chr(int(cmd[0:8],2))+chr(int(cmd[8:16],2)),end='')
add,cmd,count= b'',b'', 0
start = False
# def int2bytes(i):
# hex_string = '%x' % i
# n = len(hex_string)
# return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
# def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):
# n = int(bits, 2)
# return int2bytes(n).decode(encoding, errors)
if __name__=="__main__":
pulse(data_NEC.head(100000)) |
FOO, BAR, BAZ = range(4, 7)
|
from typing import List
from math import pi
import cellengine as ce
from cellengine.utils.generate_id import generate_id
from cellengine.payloads.gate_utils import format_common_gate
def format_quadrant_gate(
experiment_id: str,
x_channel: str,
y_channel: str,
name: str,
x: float,
y: float,
labels: List[str] = [],
skewable: bool = False,
angles: List[float] = [0, pi / 2, pi, 3 * pi / 2],
gid: str = None,
gids: List[str] = None,
locked: bool = False,
parent_population_id: str = None,
parent_population: str = None,
tailored_per_file: bool = False,
fcs_file_id: str = None,
fcs_file: str = None,
create_population: bool = True,
):
"""Formats a quadrant gate for posting to the CellEngine API.
Quadrant gates have four sectors (upper-right, upper-left, lower-left,
lower-right), each with a unique gid and name.
Args:
x_channel (str): The name of the x channel to which the gate applies.
y_channel (str): The name of the y channel to which the gate applies.
name (str): The name of the gate
x (float): The x coordinate of the center point (after the channel's scale has
been applied).
y (float): The y coordinate (after the channel's scale has been applied).
labels (list): Positions of the quadrant labels. A list of four length-2
vectors in the order: UR, UL, LL, LR. These are set automatically to
the plot corners.
skewable (bool): Whether the quadrant gate is skewable.
angles (list): List of the four angles of the quadrant demarcations
gid (str): Group ID of the gate, used for tailoring. If this is not
specified, then a new Group ID will be created. To create a
tailored gate, the gid of the global tailored gate must be specified.
gids (list): Group IDs of each sector, assigned to ``model.gids``.
locked (bool): Prevents modification of the gate via the web interface.
parent_population_id (str): ID of the parent population. Use ``None`` for
the "ungated" population. If specified, do not specify
``parent_population``.
parent_population (str): Name of the parent population. An attempt will
be made to find the population by name. If zero or more than
one population exists with the name, an error will be thrown.
If specified, do not specify ``parent_population_id``.
tailored_per_file (bool): Whether or not this gate is tailored per FCS file.
fcs_file_id (str): ID of FCS file, if tailored per file. Use ``None`` for
the global gate in a tailored gate group. If specified, do not
specify ``fcs_file``.
fcs_file (str): Name of FCS file, if tailored per file. An attempt will be made
to find the file by name. If zero or more than one file exists with
the name, an error will be thrown. Looking up files by name is
slower than using the ID, as this requires additional requests
to the server. If specified, do not specify ``fcs_file_id``.
create_population (bool): Automatically create corresponding population.
Returns:
A QuadrantGate object.
Example:
```python
cellengine.Gate.create_quadrant_gate(experimentId, x_channel="FSC-A",
y_channel="FSC-W", name="my gate", x=160000, y=200000)
experiment.create_quadrant_gate(x_channel="FSC-A",
y_channel="FSC-W", name="my gate", x=160000, y=200000)
```
"""
# set labels based on axis scale
r = ce.APIClient().get_scaleset(experiment_id, as_dict=True)
scale_min = min(x["scale"]["minimum"] for x in r["scales"])
scale_max = max(x["scale"]["minimum"] for x in r["scales"])
if labels == []:
labels = [
[scale_max, scale_max], # upper right
[scale_min, scale_max], # upper left
[scale_min, scale_min], # lower left
[scale_max, scale_min], # lower right
] # lower right
elif len(labels) == 4 and all(len(label) == 2 for label in labels):
pass
else:
raise ValueError("Labels must be a list of four length-2 lists.")
if gid is None:
gid = generate_id()
if gids is None:
gids = [
generate_id(),
generate_id(),
generate_id(),
generate_id(),
]
names = [name + append for append in [" (UR)", " (UL)", " (LL)", " (LR)"]]
model = {
"locked": locked,
"labels": labels,
"gids": gids,
"skewable": skewable,
"quadrant": {"x": x, "y": y, "angles": angles},
}
body = {
"experimentId": experiment_id,
"names": names,
"type": "QuadrantGate",
"gid": gid,
"xChannel": x_channel,
"yChannel": y_channel,
"parentPopulationId": parent_population_id,
"model": model,
}
return format_common_gate(
experiment_id,
body=body,
tailored_per_file=tailored_per_file,
fcs_file_id=fcs_file_id,
fcs_file=fcs_file,
create_population=create_population,
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-09 09:09
from __future__ import unicode_literals
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='student',
name='academic_year',
field=models.CharField(default='2017-18', max_length=7, null=True),
),
migrations.AlterField(
model_name='student',
name='blood_group',
field=models.CharField(blank=True, choices=[('a+', 'A+'), ('b+', 'B+'), ('ab', 'AB'), ('ab+', 'AB+'), ('o', 'O'), ('o+', 'O+'), ('o-', 'O-')], max_length=3, null=True),
),
migrations.AlterField(
model_name='student',
name='country',
field=django_countries.fields.CountryField(default='IN', max_length=2),
),
migrations.AlterField(
model_name='student',
name='gender',
field=models.CharField(blank=True, choices=[('m', 'Male'), ('f', 'Female'), ('o', 'Other')], max_length=1, null=True),
),
migrations.AlterField(
model_name='student',
name='programme',
field=models.CharField(choices=[('ug_cse', 'B.Tech.(CS)'), ('ug_it', 'B.Tech.(IT)'), ('pg_cs', 'M.Tech.(CS)'), ('pg_it', 'M.Tech.(IT)')], default='ug_cse', max_length=10, null=True),
),
migrations.AlterField(
model_name='student',
name='qualifying_board',
field=models.CharField(default='Central Board of Secondary Education', max_length=100, null=True),
),
migrations.AlterField(
model_name='student',
name='social_category',
field=models.CharField(choices=[('gen', 'General'), ('obc', 'OBC'), ('sc', 'SC'), ('st', 'ST'), ('pwd', 'PwD')], default='gen', max_length=3, null=True),
),
migrations.AlterField(
model_name='student',
name='state_of_origin',
field=models.CharField(default='Rajasthan', max_length=20, null=True),
),
]
|
from django.urls import path
from .views import CreateCommentView, Comments, AllCommentsView
urlpatterns = [
path('comment/', AllCommentsView.as_view()),
path('comment/<pk>/', Comments.as_view()),
path('create/comment/', CreateCommentView.as_view()),
]
|
from __future__ import absolute_import
import sys
from unittest import TestCase as BaseTestCase
from uitools import trampoline
from uitools.qt import QtCore, QtGui, Qt
from uitools.trampoline import bounce, sleep, qpath
from mayatools.test import requires_maya
try:
from maya import cmds, mel
except ImportError:
class Stub(object):
cmds = None
mel = None
utils = None
standalone = None
maya = Stub()
sys.modules['maya'] = maya
sys.modules['maya.cmds'] = None
sys.modules['maya.mel'] = None
sys.modules['maya.utils'] = None
sys.modules['maya.standalone'] = None
cmds = Stub()
has_maya = False
else:
has_maya = True
class TestCase(BaseTestCase):
@requires_maya
def setUp(self):
cmds.file(new=True, force=True)
|
import demistomock as demisto
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
def _get_incident():
return demisto.incidents()[0]
def iot_resolve_alert():
incident = _get_incident()
_id = ""
for label in incident['labels']:
if label['type'] == 'id':
_id = label['value']
break
if _id == "":
raise Exception('id was not found in the incident labels')
args = demisto.args()
close_reason = args.get('close_reason')
demisto.executeCommand('iot-security-resolve-alert', {
'id': _id,
'reason': f'resolved by XSOAR incident {incident["id"]}',
'reason_type': 'Issue Mitigated' if close_reason == "Resolved" else 'No Action Needed'
})
def main():
try:
iot_resolve_alert()
except Exception as ex:
return_error(f'Failed to execute iot-security-alert-post-processing. Error: {str(ex)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
"""
Code generation: This module is responsible for converting an SDFG into SVE code.
"""
import dace
from dace.sdfg.scope import ScopeSubgraphView
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.target import TargetCodeGenerator
from dace.codegen.targets.framecode import DaCeCodeGenerator
from dace.sdfg import nodes, SDFG, SDFGState, ScopeSubgraphView, graph as gr
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.cpp import sym2cpp
from dace import dtypes, memlet as mm
from dace.sdfg import graph, state, find_input_arraynode, find_output_arraynode
from dace.sdfg.scope import is_in_scope
import itertools
import dace.codegen.targets.sve.util as util
import copy
from six import StringIO
import dace.codegen.targets.sve.unparse
from dace import registry, symbolic, dtypes
import dace.codegen.targets.cpp as cpp
from dace.frontend.operations import detect_reduction_type
import dace.symbolic
from dace.codegen.targets.cpp import sym2cpp
from dace.sdfg import utils as sdutil
from dace.codegen.dispatcher import DefinedType
import copy
import numpy as np
def contains_any_sve(sdfg: SDFG):
for node, _ in sdfg.all_nodes_recursive():
if isinstance(node,
nodes.Map) and node.schedule == dace.ScheduleType.SVE_Map:
return True
return False
@dace.registry.autoregister_params(name='sve')
class SVECodeGenerator(TargetCodeGenerator):
target_name = 'armv8'
title = 'sve'
language = 'cpp'
def get_load_stride(self, sdfg: SDFG, state: SDFGState, node: nodes.Node,
memlet: dace.Memlet) -> symbolic.SymExpr:
"""Determines the stride of a load/store based on:
- The memlet subset
- The array strides
- The involved SVE loop stride"""
scope = util.get_sve_scope(sdfg, state, node)
if scope is None:
raise NotImplementedError('Not in an SVE scope')
sve_param = scope.map.params[-1]
sve_range = scope.map.range[-1]
sve_sym = dace.symbolic.symbol(sve_param)
array = sdfg.arrays[memlet.data]
# 1. Flatten the subset to a 1D-offset (using the array strides)
offset_1 = memlet.subset.at([0] * len(array.strides), array.strides)
if not offset_1.has(sve_sym):
raise util.NotSupportedError("SVE param does not occur in subset")
# 2. Replace the SVE loop param with its next (possibly strided) value
offset_2 = offset_1.subs(sve_sym, sve_sym + sve_range[2])
# 3. The load stride is the difference between both
stride = (offset_2 - offset_1).simplify()
return stride
def add_header(self, function_stream: CodeIOStream):
if self.has_generated_header:
return
self.has_generated_header = True
function_stream.write('#include <arm_sve.h>\n')
# TODO: Find this automatically at compile time
function_stream.write(f'#define {util.REGISTER_BYTE_SIZE} 64\n')
def __init__(self, frame_codegen: DaCeCodeGenerator, sdfg: dace.SDFG):
dace.SCOPEDEFAULT_SCHEDULE[
dace.ScheduleType.SVE_Map] = dace.ScheduleType.Sequential
dace.SCOPEDEFAULT_STORAGE[
dace.ScheduleType.SVE_Map] = dace.StorageType.CPU_Heap
self.has_generated_header = False
self.frame = frame_codegen
self.dispatcher = frame_codegen._dispatcher
self.dispatcher.register_map_dispatcher(dace.ScheduleType.SVE_Map, self)
self.dispatcher.register_node_dispatcher(
self, lambda state, sdfg, node: is_in_scope(
state, sdfg, node, [dace.ScheduleType.SVE_Map]))
#self.dispatcher.register_state_dispatcher(self, lambda sdfg, state: contains_any_sve(sdfg))
self.cpu_codegen = self.dispatcher.get_generic_node_dispatcher()
self.state_gen = self.dispatcher.get_generic_state_dispatcher()
for src_storage, dst_storage in itertools.product(
dtypes.StorageType, dtypes.StorageType):
self.dispatcher.register_copy_dispatcher(src_storage, dst_storage,
dace.ScheduleType.SVE_Map,
self)
def create_empty_definition(self,
conn: dace.typeclass,
edge: gr.MultiConnectorEdge[mm.Memlet],
callsite_stream: CodeIOStream,
output: bool = False,
is_code_code: bool = False):
""" Creates a simple variable definition `type name;`, which works for both vectors and regular data types. """
var_name = None
var_type = None
var_ctype = None
if output:
var_name = edge.dst_conn
else:
var_name = edge.src_conn
if is_code_code:
# For edges between Tasklets (Code->Code), we use the data as name because these registers are temporary and shared
var_name = edge.data.data
if isinstance(conn, dtypes.vector):
# Creates an SVE register
if conn.type not in util.TYPE_TO_SVE:
raise util.NotSupportedError('Data type not supported')
# In case of a WCR, we must initialize it with the identity value.
# This is to prevent cases in a conditional WCR, where we don't write and it is filled with garbage.
# Currently, the initial is 0, because product reduction is not supported in SVE.
init_str = ''
if edge.data.wcr:
init_str = ' = {}(0)'.format(util.instr('dup', type=conn.type))
var_type = conn.type
var_ctype = util.TYPE_TO_SVE[var_type]
callsite_stream.write('{} {}{};'.format(var_ctype, var_name,
init_str))
else:
raise NotImplementedError(
f'Output into scalar or pointer is not supported ({var_name})')
self.dispatcher.defined_vars.add(var_name, var_type, var_ctype)
def generate_node(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
node: nodes.Node, function_stream: CodeIOStream,
callsite_stream: CodeIOStream):
self.add_header(function_stream)
# Reset the mappings
self.stream_associations = dict()
# Create empty shared registers for outputs into other tasklets
for edge in dfg.out_edges(node):
if isinstance(edge.dst, dace.nodes.Tasklet):
self.create_empty_definition(node.out_connectors[edge.src_conn],
edge,
callsite_stream,
is_code_code=True)
callsite_stream.write('{')
# Create input registers (and fill them accordingly)
for edge in dfg.in_edges(node):
if isinstance(edge.src, nodes.Tasklet):
# Copy from tasklet is treated differently (because it involves a shared register)
# Changing src_node to a Tasklet will trigger a different copy
self.dispatcher.dispatch_copy(edge.src, node, edge, sdfg, dfg,
state_id, function_stream,
callsite_stream)
else:
# Copy from some array (or stream)
src_node = find_input_arraynode(dfg, edge)
self.dispatcher.dispatch_copy(src_node, node, edge, sdfg, dfg,
state_id, function_stream,
callsite_stream)
# Keep track of (edge, node) that need a writeback
requires_wb = []
# Create output registers
for edge in dfg.out_edges(node):
if isinstance(edge.dst, nodes.Tasklet):
# Output into another tasklet again is treated differently similar to the input registers
self.dispatcher.dispatch_output_definition(
node, edge.dst, edge, sdfg, dfg, state_id, function_stream,
callsite_stream)
requires_wb.append((edge, node))
else:
dst_node = find_output_arraynode(dfg, edge)
dst_desc = dst_node.desc(sdfg)
# Streams neither need an output register (pushes can happen at any time in a tasklet) nor a writeback
if isinstance(dst_desc, dace.data.Stream):
# We flag the name of the stream variable
self.stream_associations[edge.src_conn] = (dst_node.data,
dst_desc.dtype)
else:
self.dispatcher.dispatch_output_definition(
node, dst_node, edge, sdfg, dfg, state_id,
function_stream, callsite_stream)
requires_wb.append((edge, dst_node))
# Generate tasklet code
if isinstance(node, nodes.Tasklet):
self.unparse_tasklet(sdfg, dfg, state_id, node, function_stream,
callsite_stream)
# Write back output registers to memory
for edge, dst_node in requires_wb:
self.write_back(sdfg, dfg, state_id, node, dst_node, edge,
function_stream, callsite_stream)
callsite_stream.write('}')
def generate_scope(self, sdfg: dace.SDFG, scope: ScopeSubgraphView,
state_id: int, function_stream: CodeIOStream,
callsite_stream: CodeIOStream):
entry_node = scope.source_nodes()[0]
loop_type = list(set([sdfg.arrays[a].dtype for a in sdfg.arrays]))[0]
ltype_size = loop_type.bytes
long_type = copy.copy(dace.int64)
long_type.ctype = 'int64_t'
self.counter_type = {
1: dace.int8,
2: dace.int16,
4: dace.int32,
8: long_type
}[ltype_size]
callsite_stream.write('{')
# Define all input connectors of the map entry
state_dfg = sdfg.node(state_id)
for e in dace.sdfg.dynamic_map_inputs(state_dfg, entry_node):
if e.data.data != e.dst_conn:
callsite_stream.write(
self.cpu_codegen.memlet_definition(
sdfg, e.data, False, e.dst_conn,
e.dst.in_connectors[e.dst_conn]), sdfg, state_id,
entry_node)
# We only create an SVE do-while in the innermost loop
for param, rng in zip(entry_node.map.params, entry_node.map.range):
begin, end, stride = (sym2cpp(r) for r in rng)
self.dispatcher.defined_vars.enter_scope(sdfg)
# Check whether we are in the innermost loop
if param != entry_node.map.params[-1]:
# Default C++ for-loop
callsite_stream.write(
f'for(auto {param} = {begin}; {param} <= {end}; {param} += {stride}) {{'
)
else:
# Generate the SVE loop header
# The name of our loop predicate is always __pg_{param}
self.dispatcher.defined_vars.add('__pg_' + param,
DefinedType.Scalar, 'svbool_t')
# Declare our counting variable (e.g. i) and precompute the loop predicate for our range
callsite_stream.write(
f'''{self.counter_type} {param} = {begin};
svbool_t __pg_{param} = svwhilele_b{ltype_size * 8}({param}, ({self.counter_type}) {end});
do {{''', sdfg, state_id, entry_node)
# Dispatch the subgraph generation
self.dispatcher.dispatch_subgraph(sdfg,
scope,
state_id,
function_stream,
callsite_stream,
skip_entry_node=True,
skip_exit_node=True)
# Close the loops from above (in reverse)
for param, rng in zip(reversed(entry_node.map.params),
reversed(entry_node.map.range)):
# The innermost loop is SVE and needs a special while-footer, otherwise we just add the closing bracket
if param != entry_node.map.params[-1]:
# Close the default C++ for-loop
callsite_stream.write('}')
else:
# Generate the SVE loop footer
_, end, stride = (sym2cpp(r) for r in rng)
# Increase the counting variable (according to the number of processed elements)
# Then recompute the loop predicate and test for it
callsite_stream.write(
f'''{param} += svcntp_b{ltype_size * 8}(__pg_{param}, __pg_{param}) * {stride};
__pg_{param} = svwhilele_b{ltype_size * 8}({param}, ({self.counter_type}) {end});
}} while(svptest_any(svptrue_b{ltype_size * 8}(), __pg_{param}));''',
sdfg, state_id, entry_node)
self.dispatcher.defined_vars.exit_scope(sdfg)
callsite_stream.write('}')
def copy_memory(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
src_node: nodes.Node, dst_node: nodes.Node,
edge: gr.MultiConnectorEdge[mm.Memlet],
function_stream: CodeIOStream,
callsite_stream: CodeIOStream):
# We should always be in an SVE scope
scope = util.get_sve_scope(sdfg, dfg, dst_node)
if scope is None:
raise NotImplementedError('Not in an SVE scope')
in_conn = dst_node.in_connectors[edge.dst_conn]
if isinstance(src_node, dace.nodes.Tasklet):
# Copy from tasklet is just copying the shared register
# Use defined_vars to get the C++ type of the shared register
callsite_stream.write(
f'{self.dispatcher.defined_vars.get(edge.data.data)[1]} {edge.dst_conn} = {edge.data.data};'
)
return
if not isinstance(src_node, dace.nodes.AccessNode):
raise util.NotSupportedError(
'Copy neither from Tasklet nor AccessNode')
src_desc = src_node.desc(sdfg)
if isinstance(src_desc, dace.data.Stream):
# A copy from a stream will trigger a vector pop
raise NotImplementedError()
# FIXME: Issue when we can pop different amounts of data!
# If we limit to the smallest amount, certain data will be lost (never processed)
"""
# SVE register where the stream will be popped to
self.create_empty_definition(in_conn, edge, callsite_stream, output=True)
var_name = edge.dst_conn
callsite_stream.write(
f'{util.TYPE_TO_SVE[in_conn.type]} {var_name};')
callsite_stream.write('{')
callsite_stream.write('// Stream pop')
# Pop into local buffer
# 256 // in_conn.vtype.bytes
n_vec = f'{util.REGISTER_BYTE_SIZE} / {in_conn.vtype.bytes}'
callsite_stream.write(f'{in_conn.vtype.ctype} __tmp[{n_vec}];')
callsite_stream.write(
f'size_t __cnt = {edge.data.data}.pop_try(__tmp, {n_vec});')
# Limit the loop predicate
loop_pred = util.get_loop_predicate(sdfg, dfg, dst_node)
callsite_stream.write(
f'{loop_pred} = svand_z({loop_pred}, {loop_pred}, svwhilelt_b{in_conn.vtype.bytes * 8}(0ll, __cnt));')
# Transfer to register
callsite_stream.write(f'{var_name} = svld1({loop_pred}, __tmp);')
callsite_stream.write('}')
"""
return
if isinstance(in_conn, dtypes.vector):
# Copy from vector, so we can use svld
if in_conn.type not in util.TYPE_TO_SVE:
raise NotImplementedError(
f'Data type {in_conn.type} not supported')
self.dispatcher.defined_vars.add(edge.dst_conn, dtypes.vector,
in_conn.ctype)
# Determine the stride of the load and use a gather if applicable
stride = self.get_load_stride(sdfg, dfg, dst_node, edge.data)
# First part of the declaration is `type name`
load_lhs = '{} {}'.format(util.TYPE_TO_SVE[in_conn.type],
edge.dst_conn)
ptr_cast = ''
if in_conn.type == np.int64:
ptr_cast = '(int64_t*) '
elif in_conn.type == np.uint64:
ptr_cast = '(uint64_t*) '
# Regular load and gather share the first arguments
load_args = '{}, {}'.format(
util.get_loop_predicate(sdfg, dfg, dst_node), ptr_cast +
cpp.cpp_ptr_expr(sdfg, edge.data, DefinedType.Pointer))
if stride == 1:
callsite_stream.write('{} = svld1({});'.format(
load_lhs, load_args))
else:
callsite_stream.write(
'{} = svld1_gather_index({}, svindex_s{}(0, {}));'.format(
load_lhs, load_args,
util.get_base_type(in_conn).bytes * 8, sym2cpp(stride)))
else:
# Any other copy (e.g. pointer or scalar) is handled by the default CPU codegen
self.cpu_codegen.copy_memory(sdfg, dfg, state_id, src_node,
dst_node, edge, function_stream,
callsite_stream)
def define_out_memlet(self, sdfg: SDFG, dfg: state.StateSubgraphView,
state_id: int, src_node: nodes.Node,
dst_node: nodes.Node, edge: graph.MultiConnectorEdge,
function_stream: CodeIOStream,
callsite_stream: CodeIOStream):
scope = util.get_sve_scope(sdfg, dfg, src_node)
if scope is None:
raise NotImplementedError('Not in an SVE scope')
self.create_empty_definition(src_node.out_connectors[edge.src_conn],
edge, callsite_stream)
def write_back(self, sdfg: SDFG, dfg: state.StateSubgraphView,
state_id: int, src_node: nodes.Node, dst_node: nodes.Node,
edge: graph.MultiConnectorEdge,
function_stream: CodeIOStream,
callsite_stream: CodeIOStream):
scope = util.get_sve_scope(sdfg, dfg, src_node)
if scope is None:
raise NotImplementedError('Not in an SVE scope')
out_conn = src_node.out_connectors[edge.src_conn]
if out_conn.type not in util.TYPE_TO_SVE:
raise NotImplementedError(
f'Data type {out_conn.type} not supported')
if edge.data.wcr is None:
# No WCR required
if isinstance(dst_node, dace.nodes.Tasklet):
# Writeback into a tasklet is just writing into the shared register
callsite_stream.write(f'{edge.data.data} = {edge.src_conn};')
return
if isinstance(out_conn, dtypes.vector):
# If no WCR, we can directly store the vector (SVE register) in memory
# Determine the stride of the store and use a scatter load if applicable
stride = self.get_load_stride(sdfg, dfg, src_node, edge.data)
ptr_cast = ''
if out_conn.type == np.int64:
ptr_cast = '(int64_t*) '
elif out_conn.type == np.uint64:
ptr_cast = '(uint64_t*) '
store_args = '{}, {}'.format(
util.get_loop_predicate(sdfg, dfg, src_node),
ptr_cast +
cpp.cpp_ptr_expr(sdfg, edge.data, DefinedType.Pointer),
)
if stride == 1:
callsite_stream.write(
f'svst1({store_args}, {edge.src_conn});')
else:
callsite_stream.write(
f'svst1_scatter_index({store_args}, svindex_s{util.get_base_type(out_conn).bytes * 8}(0, {sym2cpp(stride)}), {edge.src_conn});'
)
else:
raise NotImplementedError('Writeback into non-vector')
else:
# TODO: Check what are we WCR'ing in?
# Since we have WCR, we must determine a suitable SVE reduce instruction
# Check whether it is a known reduction that is possible in SVE
reduction_type = detect_reduction_type(edge.data.wcr)
if reduction_type not in util.REDUCTION_TYPE_TO_SVE:
raise util.NotSupportedError('Unsupported reduction in SVE')
# If the memlet contains the innermost SVE param, we have a problem, because
# SVE doesn't support WCR stores. This would require unrolling the loop.
if scope.params[-1] in edge.data.free_symbols:
raise util.NotSupportedError(
'SVE loop param used in WCR memlet')
# WCR on vectors works in two steps:
# 1. Reduce the SVE register using SVE instructions into a scalar
# 2. WCR the scalar to memory using DaCe functionality
sve_reduction = '{}({}, {})'.format(
util.REDUCTION_TYPE_TO_SVE[reduction_type],
util.get_loop_predicate(sdfg, dfg, src_node), edge.src_conn)
ptr_cast = ''
if out_conn.type == np.int64:
ptr_cast = '(long long*) '
elif out_conn.type == np.uint64:
ptr_cast = '(unsigned long long*) '
wcr_expr = self.cpu_codegen.write_and_resolve_expr(
sdfg,
edge.data,
edge.data.wcr_nonatomic,
None,
ptr_cast + sve_reduction,
dtype=out_conn.vtype)
callsite_stream.write(wcr_expr + ';')
def unparse_tasklet(self, sdfg: SDFG, dfg: state.StateSubgraphView,
state_id: int, node: nodes.Node,
function_stream: CodeIOStream,
callsite_stream: CodeIOStream):
state_dfg: SDFGState = sdfg.nodes()[state_id]
callsite_stream.write('\n///////////////////')
callsite_stream.write(f'// Tasklet code ({node.label})')
# Determine all defined symbols for the Unparser (for inference)
# Constants and other defined symbols
defined_symbols = state_dfg.symbols_defined_at(node)
defined_symbols.update({
k: v.dtype if hasattr(v, 'dtype') else dtypes.typeclass(type(v))
for k, v in sdfg.constants.items()
})
# All memlets of that node
memlets = {}
for edge in state_dfg.all_edges(node):
u, uconn, v, vconn, _ = edge
if u == node and uconn in u.out_connectors:
defined_symbols.update({uconn: u.out_connectors[uconn]})
elif v == node and vconn in v.in_connectors:
defined_symbols.update({vconn: v.in_connectors[vconn]})
body = node.code.code
for stmt in body:
stmt = copy.deepcopy(stmt)
result = StringIO()
dace.codegen.targets.sve.unparse.SVEUnparser(
sdfg, stmt, result, body, memlets,
util.get_loop_predicate(sdfg, dfg, node), self.counter_type,
defined_symbols, self.stream_associations)
callsite_stream.write(result.getvalue(), sdfg, state_id, node)
callsite_stream.write('///////////////////\n\n')
|
import pickle
import numpy as np
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from exercise_code.transforms import Transforms
import pandas as pd
def preprocess_y(data):
mat_train_y = np.matrix(y_raw)
prepro_y_test = MinMaxScaler()
prepro_y_test.fit(mat_train_y)
scaled_data = pd.DataFrame(prepro_y_test.transform(mat_train_y))
y_row_vector = np.array(scaled_data)
return y_row_vector.reshape(-1,1)
df_test= pd.read_csv('datasets/house_price_data.csv')
# Remove id column
df_test = df_test.drop('Id', 1)
y_raw = df_test.SalePrice
X_raw = df_test.drop('SalePrice', axis=1)
# center the SalePrice values
y_test_preprocessed = preprocess_y(y_raw)
def evaluate_regression_model(model_path):
modeldict = pickle.load(open(model_path, 'rb'))
fc_net = modeldict['fully_connected_net']
if fc_net is None:
raise ValueError('The model you have saved is of the type None. Please check')
transforms = modeldict['transforms']
# Apply the transformations on the input data
transformed_X = transforms.apply_transforms_on_test_data(test_dataset_X=X_raw)
if not transformed_X.shape[0]== X_raw.shape[0]:
raise ValueError('Invalid Transform function. You should not remove the data elements')
X_test = transformed_X.reshape(transformed_X.shape[0],-1)
y_pred =fc_net.loss(X_test)
mse_loss = np.sqrt(metrics.mean_squared_error(y_test_preprocessed, y_pred))
score = 0.001/mse_loss # improve
return score
|
from manim import *
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
square = Square()
square.flip(RIGHT)
square.rotate(-3 * TAU / 8)
circle.set_fill(PINK, opacity=0.5)
self.play(ShowCreation(square))
self.play(Transform(square, circle))
self.play(FadeOut(square))
|
from extractFeatures import ExtractFeatures
import nltk.classify.util
from nltk.tokenize import word_tokenize
from nltk.classify import NaiveBayesClassifier
class SentiNaiveBayesClassifier:
def __init__(self):
self.classifier = None
'''
Train a naivebayes classifer with the training data.
'''
def train(self):
# Get data from the extracter
extracter = ExtractFeatures('./data/reviewsData.txt')
tokenizedData = extracter.getTokenizedData()
trainingData = tokenizedData['train']
print ''
print 'Training Naive Bayes Classifier'
print 'Training data size = ', len(trainingData)
print ''
modifiedTrainingData = [(self.word_feats(item[0]), item[1]) for item in trainingData]
self.classifier = NaiveBayesClassifier.train(modifiedTrainingData)
print 'Training Naive Bayes Classifier Completed'
def validateClassifier(self):
extracter = ExtractFeatures('./data/reviewsData.txt')
tokenizedData = extracter.getTokenizedData()
testData = tokenizedData['test']
print ''
print 'Validating Naive Bayes Classifier'
print 'Test data size = ', len(testData)
print ''
modifiedTestData = [(self.word_feats(item[0]), item[1]) for item in testData]
print 'Accuracy: ', nltk.classify.util.accuracy(self.classifier, modifiedTestData)
def word_feats(self, words):
return dict([(word, True) for word in words])
def classify(self, statusMessage):
tokenizedwords = word_tokenize(statusMessage)
return self.classifier.classify(self.word_feats(tokenizedwords)) |
import numpy as np
from scipy.optimize import minimize
from datetime import datetime # For calibration timer
class Calibrate(object):
"""
Class for calibrating a specified rBergomi model to a Surface object.
After running the passed rBergomi instance must hold calibrated results.
Need to think about extending this more naturally for other TBSS processes
given than Y and v need amending in each case.
"""
def __init__(self, rbergomi, surface, gamma = False, b = 0.0001, seed = 0):
# Assign rBergomi and Surface instances to Calibrate
self.rbergomi = rbergomi # rBergomi instance should inherit all results
self.surface = surface # Only k and xi methods required
# Extract helpful results from Surface instance
self.k = np.array(surface._log_strike_surface()) # Tidy up
self.M = surface._maturities
self.xi = surface.xi(n = self.rbergomi.n) # Awful
self.actual_surface = np.array(surface.surface)
# Store scipy.optimize.minimize requirements
self.seed = seed # Not actually consumed by minimize
# Set seed here before the single call for increments
np.random.seed(self.seed)
self.dW1 = self.rbergomi.dW1() # Runs fast if already assigned?
self.dW2 = self.rbergomi.dW2()
self.gamma = gamma
self.Y = rbergomi.Y(self.dW1)
def run(self, rho0 = -0.5, eta0 = 1.5, maxiter = 10, method = 'L-BFGS-B',
rho_bnds = (-0.999,0.00), eta_bnds = (0.00,10.00)):
"""
Method for actually performing the calibration.
"""
# Begin timer
t0 = datetime.now()
self.rho0 = rho0
self.eta0 = eta0
self.rho_bnds = rho_bnds
self.eta_bnds = eta_bnds
self.maxiter = maxiter
self.method = method
# Specify objective function for minimisation
# Need to control this more: add weighting scheme
def rmse(x):
# Assign entries of array x
rho = x[0]
eta = x[1]
# Build appropriate paths for chosen x
# Assigning instead to self.S etc. slows a small amount
dZ = self.rbergomi.dB(self.dW1, self.dW2, rho = rho)
if self.gamma:
v = self.xi * self.rbergomi.v2(self.Y, eta = eta)
else:
v = self.xi * self.rbergomi.V(self.Y, eta = eta)
S = self.rbergomi.S(v, dZ)
# Compare implied with actual surface
# Should change to using rbergomi.IV
implied = self.rbergomi.surface(S, self.M, self.k)
rmse = self.rbergomi.rmse(self.actual_surface, implied)
return rmse
# Perform calibration
results = minimize(rmse, (self.rho0, self.eta0), method = self.method,
bounds = (self.rho_bnds, self.eta_bnds),
options = {'maxiter': self.maxiter})
# Assign results to instance
self.results = results
self.rho = results.x[0]
self.eta = results.x[1]
self.rmse = results.fun
# Now neatly present outcome
rmse = 100 * np.round(self.rmse, decimals = 5)
rho = np.round(self.rho, decimals = 5)
eta = np.round(self.eta, decimals = 5)
t1 = datetime.now()
dt = t1 - t0
self.time = np.round(dt.seconds + dt.microseconds/1e6, decimals = 3)
time = self.time
if results.success == True:
print('rmse:', rmse)
print('time:', time)
print('nit:', results.nit) # Tidy this up
print('rho:', rho)
print('eta:', eta)
else:
print('Minimum RMSE not found.')
def paths(self):
"""
Method for saving v and S paths to the Calibrate instance. Of
course this uses exact calibration results and same increments.
"""
# Reconstruct everything with solved parameters
# This is not very nice: now rbergomi.S is a method and calibrate.S
# is an array, really we want rbergomi.S to hold the array
if self.gamma:
self.v = self.xi * self.rbergomi.v2(self.Y, eta = self.eta)
else:
self.v = self.xi * self.rbergomi.V(self.Y, eta = self.eta)
self.dZ = self.rbergomi.dB(self.dW1, self.dW2, rho = self.rho)
self.S = self.rbergomi.S(self.v, self.dZ)
return (self.v, self.S)
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime
import sqlalchemy.sql.functions as func
Base = declarative_base()
class Article(Base):
"""
Data about a gazette that was scraped from the web
"""
__tablename__ = 'article'
id = Column(Integer, primary_key=True)
url = Column(String,
unique=True,
nullable=False)
publication_name = Column(String,
nullable=False)
byline = Column(String,
nullable=False,
index=True)
publication_date = Column(DateTime(timezone=True),
nullable=False)
created_at = Column(DateTime(timezone=True),
nullable=False,
server_default=func.now())
updated_at = Column(DateTime(timezone=True),
nullable=False,
server_default=func.now(),
onupdate=func.current_timestamp())
body_html = Column(String,
nullable=False)
title = Column(String,
nullable=False,
index=True)
def __repr__(self):
return "<Article(url=%r, title='%s')>" % (self.url, self.title)
|
# -*- coding: utf-8 -*-
from uuid import uuid4
from pyramid.security import Allow
from schematics.exceptions import ValidationError
from schematics.transforms import blacklist, whitelist
from schematics.types import StringType, BaseType, MD5Type
from schematics.types.compound import ModelType, DictType
from schematics.types.serializable import serializable
from zope.interface import implementer
from openprocurement.api.auth import ACCR_5
from openprocurement.api.constants import SANDBOX_MODE
from openprocurement.api.interfaces import IOPContent
from openprocurement.api.models import OpenprocurementSchematicsDocument, Model
from openprocurement.api.models import (
Revision,
IsoDateTimeType,
ListType,
BusinessOrganization,
Document,
Organization as BaseOrganization,
ContactPoint as BaseContactPoint
)
from openprocurement.api.utils import get_now
class IFramework(IOPContent):
""" Base framework marker interface """
@implementer(IFramework)
class Framework(OpenprocurementSchematicsDocument, Model):
class Options:
namespace = "Framework"
_edit_role = whitelist(
"title",
"title_en",
"title_ru",
"description",
"description_en",
"description_ru",
)
_create_role = _edit_role + whitelist("frameworkType", "frameworkDetails", "mode")
_view_role = _create_role + whitelist(
"date",
"prettyID",
"documents",
"doc_id",
"frameworkDetails",
"dateModified",
"frameworkType",
"owner",
)
roles = {
# Specify for inheritance when several framework types appear
"create": _create_role,
"edit_draft": _edit_role,
"view": _view_role,
"chronograph": whitelist("next_check"),
"chronograph_view": _view_role,
# "Administrator": whitelist("status", "mode"),
"default": blacklist("doc_id", "__parent__"), # obj.store() use default role
"plain": blacklist( # is used for getting patches
"_attachments", "revisions", "dateModified", "_id", "_rev", "doc_type", "__parent__"
),
"listing": whitelist("dateModified", "doc_id"),
"embedded": blacklist("_id", "_rev", "doc_type", "__parent__"),
}
prettyID = StringType()
title = StringType(required=True)
title_en = StringType()
title_ru = StringType()
description = StringType()
description_en = StringType()
description_ru = StringType()
date = IsoDateTimeType()
dateModified = IsoDateTimeType()
frameworkType = StringType(required=True)
if SANDBOX_MODE:
frameworkDetails = StringType()
owner = StringType()
owner_token = StringType()
mode = StringType(choices=["test"])
transfer_token = StringType()
status = StringType(choices=["draft"], default="draft")
_attachments = DictType(DictType(BaseType), default=dict()) # couchdb attachments
revisions = BaseType(default=list)
central_accreditations = (ACCR_5,)
edit_accreditations = (ACCR_5,)
def __repr__(self):
return "<%s:%r@%r>" % (type(self).__name__, self.id, self.rev)
def __local_roles__(self):
roles = dict([("{}_{}".format(self.owner, self.owner_token), "framework_owner")])
return roles
@serializable(serialized_name="id")
def doc_id(self):
"""A property that is serialized by schematics exports."""
return self._id
@serializable(serialized_name="date")
def old_date(self):
if self.__parent__ is None:
return get_now().isoformat()
root = self.get_root()
request = root.request
if not self.date and request.method == "POST":
return get_now().isoformat()
return self.date.isoformat()
def get_role(self):
root = self.__parent__
request = root.request
if request.authenticated_role == "Administrator":
role = "Administrator"
elif request.authenticated_role == "chronograph":
role = "chronograph"
else:
role = "edit_{}".format(request.context.status)
return role
def import_data(self, raw_data, **kw):
"""
Converts and imports the raw data into the instance of the model
according to the fields in the model.
:param raw_data:
The data to be imported.
"""
data = self.convert(raw_data, **kw)
del_keys = [
k for k in data.keys() if data[k] == self.__class__.fields[k].default or data[k] == getattr(self, k)
]
for k in del_keys:
del data[k]
self._data.update(data)
return self
def __acl__(self):
acl = [
(Allow, "{}_{}".format(self.owner, self.owner_token), "edit_framework"),
]
return acl
def validate_frameworkDetails(self, *args, **kw):
if self.mode and self.mode == "test" and self.frameworkDetails and self.frameworkDetails != "":
raise ValidationError("frameworkDetails should be used with mode test")
class ISubmission(IOPContent):
pass
class ContactPoint(BaseContactPoint):
def validate_telephone(self, data, value):
pass
class Organization(BaseOrganization):
contactPoint = ModelType(ContactPoint, required=True)
pass
class SubmissionBusinessOrganization(BusinessOrganization):
pass
@implementer(ISubmission)
class Submission(OpenprocurementSchematicsDocument, Model):
class Options:
namespace = "Submission"
roles = {
"create": whitelist("tenderers", "documents", "frameworkID"),
"edit": whitelist("tenderers", "status", "frameworkID"),
"edit_active": whitelist(),
"edit_bot": whitelist("status", "qualificationID"),
"default": blacklist("doc_id", "__parent__"),
"plain": blacklist( # is used for getting patches
"_attachments", "revisions", "dateModified", "_id", "_rev", "doc_type", "__parent__"
),
"view": whitelist(
"doc_id",
"owner",
"status",
"tenderers",
"documents",
"qualificationID",
"frameworkID",
"dateModified",
"date",
"datePublished",
"submissionType",
),
"embedded": blacklist("_id", "_rev", "doc_type", "__parent__"),
}
tenderers = ListType(ModelType(SubmissionBusinessOrganization, required=True), required=True, min_size=1,)
documents = ListType(ModelType(Document, required=True), default=list())
qualificationID = StringType()
frameworkID = StringType(required=True)
dateModified = IsoDateTimeType()
date = IsoDateTimeType(default=get_now)
datePublished = IsoDateTimeType()
owner = StringType()
owner_token = StringType()
transfer_token = StringType()
_attachments = DictType(DictType(BaseType), default=dict())
revisions = BaseType(default=list)
mode = StringType(choices=["test"])
def get_role(self):
role = "edit"
root = self.__parent__
auth_role = root.request.authenticated_role
if auth_role == "bots":
role = "edit_bot"
elif self.status == "active":
role = "edit_active"
return role
def __repr__(self):
return "<%s:%r@%r>" % (type(self).__name__, self.id, self.rev)
def __local_roles__(self):
roles = dict([("{}_{}".format(self.owner, self.owner_token), "submission_owner")])
return roles
@serializable(serialized_name="id")
def doc_id(self):
"""A property that is serialized by schematics exports."""
return self._id
def import_data(self, raw_data, **kw):
"""
Converts and imports the raw data into the instance of the model
according to the fields in the model.
:param raw_data:
The data to be imported.
"""
data = self.convert(raw_data, **kw)
del_keys = [
k for k in data.keys() if data[k] == self.__class__.fields[k].default or data[k] == getattr(self, k)
]
for k in del_keys:
del data[k]
self._data.update(data)
return self
def __acl__(self):
acl = [
(Allow, "{}_{}".format(self.owner, self.owner_token), "edit_submission"),
]
return acl
class IQualification(IOPContent):
pass
@implementer(IQualification)
class Qualification(OpenprocurementSchematicsDocument, Model):
class Options:
namespace = "Qualification"
roles = {
"create": whitelist("submissionID", "frameworkID", "documents"),
"edit": whitelist("status", "documents"),
"default": blacklist("doc_id", "__parent__"),
"plain": blacklist( # is used for getting patches
"_attachments", "revisions", "dateModified", "_id", "_rev", "doc_type", "__parent__"
),
"view": whitelist(
"doc_id",
"status",
"submissionID",
"frameworkID",
"documents",
"date",
"dateModified",
"qualificationType",
),
"embedded": blacklist("_id", "_rev", "doc_type", "__parent__"),
}
submissionID = StringType(required=True)
frameworkID = StringType(required=True)
date = IsoDateTimeType(default=get_now)
dateModified = IsoDateTimeType()
framework_owner = StringType()
framework_token = StringType()
documents = ListType(ModelType(Document, required=True), default=list())
_attachments = DictType(DictType(BaseType), default=dict())
revisions = BaseType(default=list)
mode = StringType(choices=["test"])
def __repr__(self):
return "<%s:%r@%r>" % (type(self).__name__, self.id, self.rev)
@serializable(serialized_name="id")
def doc_id(self):
"""A property that is serialized by schematics exports."""
return self._id
def import_data(self, raw_data, **kw):
"""
Converts and imports the raw data into the instance of the model
according to the fields in the model.
:param raw_data:
The data to be imported.
"""
data = self.convert(raw_data, **kw)
del_keys = [
k for k in data.keys() if data[k] == self.__class__.fields[k].default or data[k] == getattr(self, k)
]
for k in del_keys:
del data[k]
self._data.update(data)
return self
def __acl__(self):
acl = [
(Allow, "{}_{}".format(self.framework_owner, self.framework_token), "edit_qualification"),
]
return acl
class IAgreement(IOPContent):
""" Base interface for agreement container """
@implementer(IAgreement)
class Agreement(OpenprocurementSchematicsDocument, Model):
""" Base agreement model """
id = MD5Type(required=True, default=lambda: uuid4().hex)
agreementID = StringType()
agreementType = StringType(default="electronicCatalogue")
# maybe terminated ????
status = StringType(choices=["active", "terminated"], required=True)
date = IsoDateTimeType()
dateModified = IsoDateTimeType()
revisions = BaseType(default=list)
owner_token = StringType(default=lambda: uuid4().hex)
transfer_token = StringType(default=lambda: uuid4().hex)
owner = StringType()
mode = StringType(choices=["test"])
def import_data(self, raw_data, **kw):
"""
Converts and imports the raw data into the instance of the model
according to the fields in the model.
:param raw_data:
The data to be imported.
"""
data = self.convert(raw_data, **kw)
del_keys = [
k for k in data.keys() if data[k] == self.__class__.fields[k].default or data[k] == getattr(self, k)
]
for k in del_keys:
del data[k]
self._data.update(data)
return self
def __local_roles__(self):
return dict(
[
("{}_{}".format(self.owner, self.owner_token), "agreement_owner"),
]
)
def __acl__(self):
acl = [
(Allow, "{}_{}".format(self.owner, self.owner_token), "edit_agreement"),
]
return acl
def __repr__(self):
return "<%s:%r@%r>" % (type(self).__name__, self.id, self.rev)
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scikit-learn trainer package setup."""
import setuptools
REQUIRED_PACKAGES = [
'matplotlib>=2.2.3',
'seaborn>=0.9.0',
'scikit-learn>=0.20.2',
'pandas-gbq>=0.8.0',
'cloudml-hypertune',
'scikit-plot',
'tensorflow',
'google-api-python-client'
]
setuptools.setup(
name='custom_scikit_learn',
author='Shixin Luo',
version='v1',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
include_package_data=True,
scripts=['predictor.py'],
description='',
)
|
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from absl import app
from iree.tf.support import tf_test_utils
import tensorflow as tf
# Empty lists and dicts are currently unsupported. IREE also currently cannot
# represent multiple sequence types, so we turn all sequences into tuples.
class PyTreeModule(tf_test_utils.TestModule):
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_tuple_len_1(self):
return (0,)
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_tuple_len_2(self):
return 0, 1
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_tuple_len_3(self):
return 0, 1, 2
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_nested_pytree(self):
return {"key_a": (0, 1, 2), "key_b": (0, 1, {"key_c": (0, 1)})}
@tf_test_utils.tf_function_unit_test(input_signature=[{
"key_a": (tf.TensorSpec([]), tf.TensorSpec([]), tf.TensorSpec([])),
"key_b": (tf.TensorSpec([]), tf.TensorSpec([]), {
"key_c": (tf.TensorSpec([]), tf.TensorSpec([]))
})
}])
def input_nested_pytree(self, input_pytree):
return input_pytree
class PyTreeTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(PyTreeModule)
def main(argv):
del argv # Unused
PyTreeTest.generate_unit_tests(PyTreeModule)
tf.test.main()
if __name__ == '__main__':
app.run(main)
|
from __future__ import unicode_literals
import os
import logging
from dvc.utils.compat import urlparse
from dvc.istextfile import istextfile
from dvc.exceptions import DvcException
from dvc.remote.local import RemoteLOCAL
from dvc.output.base import OutputBase, OutputAlreadyTrackedError
logger = logging.getLogger(__name__)
class OutputLOCAL(OutputBase):
REMOTE = RemoteLOCAL
def __init__(
self,
stage,
path,
info=None,
remote=None,
cache=True,
metric=False,
persist=False,
tags=None,
):
super(OutputLOCAL, self).__init__(
stage,
path,
info,
remote=remote,
cache=cache,
metric=metric,
persist=persist,
tags=tags,
)
if remote:
p = os.path.join(
remote.prefix, urlparse(self.url).path.lstrip("/")
)
else:
p = path
if not os.path.isabs(p):
p = self.remote.to_ospath(p)
p = os.path.join(stage.wdir, p)
p = os.path.abspath(os.path.normpath(p))
self.path_info = {"scheme": "local", "path": p}
self._dir_cache = {}
def __str__(self):
return self.rel_path
@property
def is_in_repo(self):
return urlparse(self.url).scheme != "remote" and not os.path.isabs(
self.url
)
def assign_to_stage_file(self, stage):
from dvc.repo import Repo
fullpath = os.path.abspath(stage.wdir)
self.path_info["path"] = os.path.join(fullpath, self.stage_path)
self.repo = Repo(self.path)
self.stage = stage
return self
@property
def sep(self):
return os.sep
@property
def rel_path(self):
return os.path.relpath(self.path)
@property
def stage_path(self):
return os.path.relpath(self.path, self.stage.wdir)
@property
def cache(self):
return self.repo.cache.local.get(self.checksum)
@property
def is_dir_cache(self):
return self.repo.cache.local.is_dir_cache(self.checksum)
def dumpd(self):
ret = super(OutputLOCAL, self).dumpd()
if self.is_in_repo:
path = self.remote.unixpath(
os.path.relpath(self.path, self.stage.wdir)
)
else:
path = self.url
ret[self.PARAM_PATH] = path
return ret
def verify_metric(self):
if not self.metric:
return
if not os.path.exists(self.path):
return
if os.path.isdir(self.path):
msg = "directory '{}' cannot be used as metrics."
raise DvcException(msg.format(self.rel_path))
if not istextfile(self.path):
msg = "binary file '{}' cannot be used as metrics."
raise DvcException(msg.format(self.rel_path))
def save(self):
if not os.path.exists(self.path):
raise self.DoesNotExistError(self.rel_path)
if not os.path.isfile(self.path) and not os.path.isdir(self.path):
raise self.IsNotFileOrDirError(self.rel_path)
if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or (
os.path.isdir(self.path) and len(os.listdir(self.path)) == 0
):
msg = "file/directory '{}' is empty.".format(self.rel_path)
logger.warning(msg)
if not self.use_cache:
self.info = self.remote.save_info(self.path_info)
self.verify_metric()
if not self.IS_DEPENDENCY:
msg = "Output '{}' doesn't use cache. Skipping saving."
logger.info(msg.format(self.rel_path))
return
assert not self.IS_DEPENDENCY
if not self.changed():
msg = "Output '{}' didn't change. Skipping saving."
logger.info(msg.format(self.rel_path))
return
if self.is_in_repo:
if self.repo.scm.is_tracked(self.path):
raise OutputAlreadyTrackedError(self.rel_path)
if self.use_cache:
self.repo.scm.ignore(self.path)
self.info = self.remote.save_info(self.path_info)
@property
def dir_cache(self):
return self.repo.cache.local.load_dir_cache(self.checksum)
def get_files_number(self):
if self.cache is None:
return 0
if self.is_dir_cache:
return len(self.dir_cache)
return 1
|
from __future__ import print_function
import pyyed
g = pyyed.Graph()
g.add_node('foo', font_family="Zapfino")
g.add_node('foo2', shape="roundrectangle", font_style="bolditalic",
underlined_text="true")
g.add_edge('foo1', 'foo2')
g.add_node('abc', font_size="72", height="100")
g.add_node('bar', label="Multi\nline\ntext")
g.add_node('foobar', label="""Multi
Line
Text!""")
g.add_edge('foo', 'foo1', label="EDGE!", width="3.0", color="#0000FF",
arrowhead="white_diamond", arrowfoot="standard", line_type="dotted")
print(g.get_graph())
print("\n\n\n")
g = pyyed.Graph()
g.add_node('foo', font_family="Zapfino")
gg = g.add_group("MY_Group", shape="diamond")
gg.add_node('foo2', shape="roundrectangle", font_style="bolditalic",
underlined_text="true")
gg.add_node('abc', font_size="72", height="100")
g.add_edge('foo2', 'abc')
g.add_edge('foo', 'MY_Group')
print(g.get_graph())
print("\n\n\n")
g = pyyed.Graph()
g.add_node('Car', shape_fill="#EEEEEE",
node_type="UMLClassNode",
UML={"attributes": "Model\nManufacturer\nPrice",
"methods": "getModel()\ngetManufacturer()\ngetPrice()\nsetPrice()"})
g.add_node('ICar', shape_fill="#EEEEEE",
node_type="UMLClassNode",
UML={"stereotype": "interface",
"attributes": "",
"methods": "getModel()\ngetManufacturer()\ngetPrice()\nsetPrice()"})
g.add_node('Vehicle', shape_fill="#EEEEEE", node_type="UMLClassNode")
g.add_edge('Car', 'Vehicle', arrowhead="white_delta")
g.add_edge('Car', 'ICar', arrowhead="white_delta", line_type="dashed")
g.add_node('This is a note', shape_fill="#EEEEEE", node_type="UMLNoteNode")
print(g.get_graph())
|
import pandas as pd
import numpy as np
# Saiyam Lakhanpal
# Github- https://github.com/Saiyamlakhanpal
class topsis:
def __init__(self, input_file, weight_str, impact_str, out_file):
self.input_file = input_file
self.weight_str = weight_str
self.impact_str = impact_str
self.out_file = out_file
self.r_no = '101917188'
def calculate(self):
weights = self.weight_str.split(',')
try:
weights = [int(i) for i in weights]
except ValueError:
print("Weights should only be numbers\n")
exit()
impacts = self.impact_str.split(',')
for i in impacts:
if i != '+' and i != '-':
print("impacts should be either + or -")
exit()
try:
read_file = pd.read_excel(self.input_file)
read_file.to_csv(self.r_no + '-data.csv', index=None, header=True)
df = pd.read_csv(self.r_no + "-data.csv")
except FileNotFoundError:
print("File not found")
exit()
if len(df.columns) < 3:
print("Input file must contain three or more columns.\n")
exit()
check = {len(df.columns)-1, len(weights), len(impacts)}
if len(check) != 1:
print(
"Number of weights, number of impacts and number of indicators must be same.\n")
exit()
for col in df.iloc[:, 1:]:
for i in df[col]:
if isinstance(i, float) == False:
print("columns must contain numeric values only\n")
exit()
arr = np.array(df.iloc[:, 1:])
root_sum_of_squares = np.sqrt(np.sum(arr**2, axis=0))
arr = np.divide(arr, root_sum_of_squares)
arr = arr*weights
ideals = np.zeros((arr.shape[1], 2))
for i in range(len(impacts)):
l = np.zeros(2)
if impacts[i] == '+':
l[0] = max(arr[:, i])
l[1] = min(arr[:, i])
elif impacts[i] == '-':
l[0] = min(arr[:, i])
l[1] = max(arr[:, i])
ideals[i, 0] = l[0]
ideals[i, 1] = l[1]
ideals = ideals.T
distances = np.zeros((arr.shape[0], 2))
for i in range(arr.shape[0]):
best_dist = np.linalg.norm(arr[i, :] - ideals[0, :])
worst_dist = np.linalg.norm(arr[i, :] - ideals[1, :])
distances[i, 0] = best_dist
distances[i, 1] = worst_dist
performance_score = np.divide(
distances[:, 1], np.add(distances[:, 0], distances[:, 1]))
rank = np.zeros(arr.shape[0])
temp = list(performance_score)
count = 1
for i in range(len(performance_score)):
ind = np.argmax(temp)
rank[ind] = count
count += 1
temp[ind] = -99
df_out = df
df_out['Topsis Score'] = performance_score
df_out['Rank'] = rank
df_out.to_csv(self.out_file, index=None)
print("Completed succesfully! Check " +
self.out_file+" for the output\n")
|
import pytest
from rest_framework.test import APIClient
@pytest.fixture(autouse=True)
def enable_db_access(db):
pass
@pytest.fixture
def api_client():
return APIClient
|
# -*- coding: utf-8 -*-
"""
.. created on Wed Feb 14 19:40:02 2018
.. author: PE LLC [email protected]
.. copyright: 2018, Howard Dunn. Apache 2.0 v2 licensed.
"""
def have():
try:
import clr
print('Have clr as {}'.format(clr))
except Exception as ex:
print('Don"t have clr: Not in IronPython environment or ' + \
'no ip/clr access')
print('OK')
def hello():
print('hello')
if __name__ == '__main__':
have()
hello()
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from functools import partial, reduce
from typing import Any
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.types import BooleanType, StructField, StructType, to_arrow_type
from pyspark.sql.utils import AnalysisException
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function
from databricks.koalas.dask.compatibility import string_types
from databricks.koalas.dask.utils import derived_from
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.selection import SparkDataFrameLocator
from databricks.koalas.typedef import infer_pd_series_spark_type
class DataFrame(_Frame):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _sdf: Spark Column instance
:ivar _metadata: Metadata related to column names and index information.
"""
@derived_from(pd.DataFrame)
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, pd.DataFrame):
self._init_from_pandas(data)
elif isinstance(data, spark.DataFrame):
self._init_from_spark(data, index)
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
self._init_from_pandas(pdf)
def _init_from_pandas(self, pdf):
metadata = Metadata.from_pandas(pdf)
reset_index = pdf.reset_index()
reset_index.columns = metadata.all_fields
schema = StructType([StructField(name, infer_pd_series_spark_type(col),
nullable=bool(col.isnull().any()))
for name, col in reset_index.iteritems()])
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema),
metadata)
def _init_from_spark(self, sdf, metadata=None):
self._sdf = sdf
if metadata is None:
self._metadata = Metadata(column_fields=self._sdf.schema.fieldNames())
else:
self._metadata = metadata
@property
def _index_columns(self):
return [self._sdf.__getitem__(field)
for field in self._metadata.index_fields]
def _reduce_for_stat_function(self, sfun):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
:param sfun: either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
"""
from inspect import signature
exprs = []
num_args = len(signature(sfun).parameters)
for col in self.columns:
col_sdf = self._sdf[col]
col_type = self._sdf.schema[col].dataType
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
return row # Return first row as a Series
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df = df[['species', 'population']]
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]})
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df = df[['col1', 'col2']]
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently supported only when the DataFrame has a single index.
"""
from databricks.koalas.series import Series
if len(self._metadata.index_info) != 1:
raise KeyError('Currently supported only when the DataFrame has a single index.')
return Series(self._index_columns[0], self, [])
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns. By default
yields a new object.
:param keys: column label or list of column labels / arrays
:param drop: boolean, default True
Delete columns to be used as the new index
:param append: boolean, default False
Whether to append columns to existing index
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if isinstance(keys, string_types):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
columns = [column for column in self._metadata.column_fields if column not in keys]
else:
columns = self._metadata.column_fields
if append:
index_info = self._metadata.index_info + [(column, column) for column in keys]
else:
index_info = [(column, column) for column in keys]
metadata = self._metadata.copy(column_fields=columns, index_info=index_info)
if inplace:
self._metadata = metadata
else:
kdf = self.copy()
kdf._metadata = metadata
return kdf
def reset_index(self, level=None, drop=False, inplace=False):
"""For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
:param level: int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by default
:param drop: boolean, default False
Do not try to insert index into dataframe columns. This resets the index to the
default integer index.
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if len(self._metadata.index_info) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_info) > 1
if multi_index:
rename = lambda i: 'level_{}'.format(i)
else:
rename = lambda i: \
'index' if 'index' not in self._metadata.column_fields else 'level_{}'.fomat(i)
if level is None:
index_columns = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_info)]
index_info = []
else:
if isinstance(level, (int, string_types)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for l in level:
if l >= len(self._metadata.index_info):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_info), l + 1))
idx = level
elif all(isinstance(l, string_types) for l in level):
idx = []
for l in level:
try:
i = self._metadata.index_fields.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_fields[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
index_columns = []
index_info = self._metadata.index_info.copy()
for i in idx:
info = self._metadata.index_info[i]
column_field, index_name = info
index_columns.append((column_field,
index_name if index_name is not None else rename(index_name)))
index_info.remove(info)
if drop:
index_columns = []
metadata = self._metadata.copy(
column_fields=[column for column, _ in index_columns] + self._metadata.column_fields,
index_info=index_info)
columns = [name for _, name in index_columns] + self._metadata.column_fields
if inplace:
self._metadata = metadata
self.columns = columns
else:
kdf = self.copy()
kdf._metadata = metadata
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._sdf
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.all_fields])
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
# TODO: push to OSS
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
for field in sdf.schema})
if len(self._metadata.index_info) > 0:
append = False
for index_field in self._metadata.index_fields:
drop = index_field not in self._metadata.column_fields
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[self._metadata.column_fields]
index_names = self._metadata.index_names
if len(index_names) > 0:
if isinstance(pdf.index, pd.MultiIndex):
pdf.index.names = index_names
else:
pdf.index.name = index_names[0]
return pdf
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
metadata = self._metadata.copy(
column_fields=(self._metadata.column_fields +
[name for name, _ in pairs if name not in self._metadata.column_fields]))
return DataFrame(sdf, metadata)
@property
def loc(self):
return SparkDataFrameLocator(self)
def copy(self):
return DataFrame(self._sdf, self._metadata.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine rows which contain missing values are removed.
* 0, or 'index' : Drop rows which contain missing values.
.. dropna currently only works for axis=0 or axis='index'
axis=1 is yet to be implemented.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]})
>>> df = df[['name', 'toy', 'born']]
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, string_types):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._metadata.column_fields]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
:param value: scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
:param axis: {0 or `index`}
1 and `columns` are not supported.
:param inplace: boolean, default False
Fill in place (do not create a new object)
:return: :class:`DataFrame`
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... })
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._sdf.limit(n), self._metadata.copy())
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._metadata.column_fields)
@columns.setter
def columns(self, names):
old_names = self._metadata.column_fields
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._metadata.index_fields +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._sdf = sdf
self._metadata = self._metadata.copy(column_fields=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
:return: :class:`pd.Series` The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)})
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._metadata.column_fields],
index=self._metadata.column_fields)
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df = df[["Person", "Age", "Single"]]
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def unique(self):
sdf = self._sdf
return DataFrame(spark.DataFrame(sdf._jdf.distinct(), sdf.sql_ctx), self._metadata.copy())
def drop(self, labels, axis=1):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]})
>>> df = df[['x', 'y', 'z', 'w']]
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
axis = self._validate_axis(axis)
if axis == 1:
if isinstance(labels, list):
sdf = self._sdf.drop(*labels)
metadata = self._metadata.copy(
column_fields=[column for column in self._metadata.column_fields
if column not in labels])
else:
sdf = self._sdf.drop(labels)
metadata = self._metadata.copy(
column_fields=[column for column in self._metadata.column_fields
if column != labels])
return DataFrame(sdf, metadata)
raise NotImplementedError("Drop currently only works for axis=1")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']})
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by, ascending=True, inplace=False, na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 None 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 None 8 4
"""
if isinstance(by, string_types):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())
if inplace:
self._sdf = kdf._sdf
self._metadata = kdf._metadata
else:
return kdf
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._metadata.index_fields
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self[col]._scol.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self[col]._scol.isin(list(values)).alias(col) for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, string_types):
try:
return Series(self._sdf.__getitem__(key), self, self._metadata.index_info)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, string_types)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._sdf.__getitem__(key), self, self._metadata.index_info)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._sdf.filter(bcol), self._metadata.copy())
raise NotImplementedError(key)
def __repr__(self):
return repr(self.toPandas())
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._sdf = kdf._sdf
self._metadata = kdf._metadata
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return Series(self._sdf.__getattr__(key), self, self._metadata.index_info)
def __iter__(self):
return self.toPandas().__iter__()
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
def _repr_html_(self):
return self.head(max_display_count).toPandas()._repr_html_()
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
|
import json
from crispy_forms.layout import Field, Layout, Submit
from django import forms
from crispy_forms import layout
from cradmin_legacy import renderable
from cradmin_legacy.viewhelpers import crudbase
class AbstractEditableRenderer(renderable.AbstractRenderable, forms.Form):
"""
"""
template_name = "devilry_gradeform/advanced.editable.gradeform.django.html"
class AdvancedForm(forms.Form):
def __init__(self, *args, **kwargs):
super(AdvancedForm, self).__init__(*args, **kwargs)
counter = 0
while counter < 3:
self.fields['test{}'.format(counter)] = forms.CharField(label='Test field {}'.format(counter))
counter += 1
class AdvancedEditableGradeForm(AbstractEditableRenderer):
"""
"""
def __init__(self, assignment, feedbackset):
self.assignment = assignment
self.feedbackset = feedbackset
def get_template_context_object(self, request=None):
context = super(AdvancedEditableGradeForm, self).get_context_data()
context['gradeform_type'] = 'advanced'
print(self.assignment.gradeform_setup_json)
data = json.loads(self.assignment.gradeform_setup_json)
context['form'] = AdvancedForm()
return context
|
import logging
from string import Template
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
from babel.numbers import format_number
from eco_counter_bot.counters import counters as all_counters
from eco_counter_bot.models import CounterData, DataPoint, DateRange, Interval, YesterdaysResultsTweetParams
from eco_counter_bot.counter_service import NoDataFoundException, get_counts_for_period, extract_highlights
from eco_counter_bot.tweet_service import tweet_service
from eco_counter_bot.grapher import generate_yearly_plot
from eco_counter_bot.emojis import EMOJIS
logger = logging.getLogger(f"eco_counter_bot.{__name__}")
TWEET_TEMPLATE = Template(f"""Yesterday's {EMOJIS['BICYCLE']} counts ($yesterdays_date):
{EMOJIS['CHECKERED_FLAG']} Total: $count_total
{EMOJIS['MEDAL_1']} $counter_name_1: $counter_count_1
{EMOJIS['MEDAL_2']} $counter_name_2: $counter_count_2
{EMOJIS['MEDAL_3']} $counter_name_3: $counter_count_3
$year_reference year's total: $count_current_year_total
Preceding year's relative total: $count_preceding_year_total
Change: $percentage_change_emoji $percentage_change_number%
""")
def is_current_week(reference_date: date) -> bool:
reference_date_week = reference_date.isocalendar().week
todays_date_week = date.today().isocalendar().week
return reference_date_week == todays_date_week
def is_current_year(reference_date: date) -> bool:
return reference_date.year == date.today().year
def format_number_lb(number: int or float) -> str:
return format_number(number, 'lb_LU')
def to_daily_total(counter_data: CounterData) -> CounterData:
summed = [counter_data[0]]
for index, current_count in enumerate(counter_data):
if index > 0:
dp = DataPoint(
date=current_count["date"],
count=(summed[index-1]["count"] + current_count["count"])
)
summed.append(dp)
return summed
def publish_yesterdays_results() -> None:
today = date.today()
yesterday = today - timedelta(days=1)
current_week = DateRange(
start=yesterday - timedelta(days=yesterday.weekday()),
end=yesterday
)
current_year_relative = DateRange(
start=yesterday.replace(month=1, day=1),
end=yesterday
)
preceding_year_relative = DateRange(
start=yesterday.replace(year=yesterday.year - 1, month=1, day=1),
end=yesterday + relativedelta(years=-1)
)
preceding_year_full = DateRange(
start=yesterday.replace(year=yesterday.year - 1, month=1, day=1),
end=yesterday.replace(year=yesterday.year - 1, month=12, day=31)
)
try:
logger.debug("Attempting to get highlights")
current_week_counts = get_counts_for_period(all_counters, current_week, Interval.DAYS)
current_year_counts = get_counts_for_period(all_counters, current_year_relative, Interval.DAYS)
preceding_year_relative_counts = get_counts_for_period(all_counters, preceding_year_relative, Interval.MONTHS)
preceding_year_full_counts = get_counts_for_period(all_counters, preceding_year_full, Interval.DAYS)
current_week_highlights = extract_highlights(current_week_counts)
current_year_highlights = extract_highlights(current_year_counts)
preceding_year_relative_highlights = extract_highlights(preceding_year_relative_counts)
preceding_year_full_highlights = extract_highlights(preceding_year_full_counts)
except NoDataFoundException as e:
logger.warning(e, exc_info=True)
return
except Exception as e:
logger.error(f"Encountered unexpected error, aborting. {e}", exc_info=True)
return
logger.debug(f"Yesterday's date is {yesterday}")
percentage_change = (current_year_highlights["period_total_count"] - preceding_year_relative_highlights["period_total_count"]) / preceding_year_relative_highlights["period_total_count"] * 100
yesterdays_ordered_counts = current_week_highlights["most_recent_counts_sorted"]
tweet_template_params = YesterdaysResultsTweetParams(
yesterdays_date = yesterday.strftime("%d/%m"),
count_total = format_number_lb(current_week_highlights["most_recent_flattened_count"]),
counter_name_1 = yesterdays_ordered_counts[0]["counter"]["name"],
counter_name_2 = yesterdays_ordered_counts[1]["counter"]["name"],
counter_name_3 = yesterdays_ordered_counts[2]["counter"]["name"],
counter_count_1 = format_number_lb(yesterdays_ordered_counts[0]["count"]),
counter_count_2 = format_number_lb(yesterdays_ordered_counts[1]["count"]),
counter_count_3 = format_number_lb(yesterdays_ordered_counts[2]["count"]),
year_reference = "This" if is_current_year(yesterday) else "Last",
count_current_year_total = format_number_lb(current_year_highlights["period_total_count"]),
count_preceding_year_total = format_number_lb(preceding_year_relative_highlights["period_total_count"]),
percentage_change_emoji = EMOJIS["DOWN_RIGHT_ARROW"] if percentage_change < 0 else EMOJIS["UP_RIGHT_ARROW"],
percentage_change_number = format_number_lb(round(percentage_change, 1))
)
logger.debug(f"Assembled tweet params: {tweet_template_params}")
tweet_message = TWEET_TEMPLATE.substitute(tweet_template_params)
logger.info(f"Assembled tweet message: {tweet_message}")
preceding_year_daily_total = to_daily_total(preceding_year_full_highlights["flattened_counts"])
current_year_daily_total = to_daily_total(current_year_highlights["flattened_counts"])
generate_yearly_plot(preceding_year_daily_total, current_year_daily_total)
try:
logger.debug("Attempting to tweet")
tweet_service.tweet_thread(tweet_message, media_filename="tmp/daily_fig.png")
except Exception as e:
logger.error(f"Error while tweeting: {e}", exc_info=True)
|
Subsets and Splits