filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_19420
|
from typing import Optional
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.messages.flat import GameTickPacket, FieldInfo
from rlbot.utils.structures.quick_chats import QuickChats
from tmcp import TMCPMessage, ActionType
from strategy.objective import Objective
from utility.easing import lin_fall
from utility.rlmath import clip
from utility.vec import Vec3, Mat33, euler_to_rotation, angle_between, norm
MAX_SPEED = 2300
BOOST_ACCEL = 1060
THROTTLE_AIR_ACCEL = 66
BOOST_PR_SEC = 33
GRAVITY = Vec3(z=-650)
JUMP_SPEED = 291.667
JUMP_ACCEL = 1458.3333
JUMP_MIN_DUR = 0.025
JUMP_MAX_DUR = 0.2
class Field:
WIDTH = 8192
WIDTH2 = WIDTH / 2
LENGTH = 10240
LENGTH2 = LENGTH / 2
HEIGHT = 2044
class Ball:
RADIUS = 92
def __init__(self, pos=Vec3(), vel=Vec3(), ang_vel=Vec3(), time=0.0):
self.pos = pos
self.vel = vel
self.ang_vel = ang_vel
self.time = time
# self.last_touch # TODO
# self.last_bounce # TODO
class Car:
def __init__(self, index=-1, name="Unknown", team=0, pos=Vec3(), vel=Vec3(), ang_vel=Vec3(), rot=Mat33(), time=0.0):
self.id = index
self.name = name
self.team = team
self.team_sign = -1 if team == 0 else 1
self.pos = pos
self.vel = vel
self.rot = rot
self.ang_vel = ang_vel
self.time = time
self.is_demolished = False
self.jumped = False
self.double_jumped = False
self.on_ground = True
self.supersonic = False
self.last_quick_chat = None
self.last_expected_time_till_reach_ball = 3
self.last_input = SimpleControllerState()
# Analytic info
self.effective_pos = pos # A point a bit in front of them
self.objective = Objective.UNKNOWN
self.possession = 0
self.onsite = False
self.reach_ball_time = 0
@property
def forward(self) -> Vec3:
return self.rot.col(0)
@property
def left(self) -> Vec3:
return self.rot.col(1)
@property
def up(self) -> Vec3:
return self.rot.col(2)
def got_it_according_to_quick_chat_01(self, time) -> float:
if self.last_quick_chat is None:
return 1.0
if self.last_quick_chat.message in QuickChat.I_GOT_IT_MESSAGES:
return 1.0 + QuickChat.RELEVANCE * lin_fall(time - self.last_quick_chat.time, QuickChat.RELEVANCE_DURATION)
if self.last_quick_chat.message in QuickChat.YOU_GOT_IT_MESSAGES:
return 1.0 - QuickChat.RELEVANCE * lin_fall(time - self.last_quick_chat.time, QuickChat.RELEVANCE_DURATION)
return 1.0
def is_following_up_according_to_quick_chat_01(self, time) -> float:
if self.last_quick_chat is None:
return 1.0
if self.last_quick_chat.message in QuickChat.WONT_FOLLOW_UP_MESSAGES:
return 1.0 - QuickChat.RELEVANCE * lin_fall(time - self.last_quick_chat.time, QuickChat.RELEVANCE_DURATION)
return 1.0
class QuickChat:
I_GOT_IT_MESSAGES = [
QuickChats.Information_IGotIt
]
YOU_GOT_IT_MESSAGES = [
QuickChats.Information_NeedBoost,
QuickChats.Information_TakeTheShot,
QuickChats.Information_Defending,
QuickChats.Information_GoForIt,
QuickChats.Information_AllYours,
]
WONT_FOLLOW_UP_MESSAGES = [
QuickChats.Information_NeedBoost,
QuickChats.Information_Defending
]
RELEVANCE_DURATION = 2.5 # RL is a fast game
RELEVANCE = 0.3 # Percentage
def __init__(self, sender, sender_team, message, time):
self.sender = sender
self.sender_team = sender_team
self.message = message
self.time = time
class BoostPad:
def __init__(self, index, pos, is_big, is_active, timer):
self.index = index
self.pos = pos
self.is_active = is_active
self.timer = timer
self.is_big = is_big
class Goal:
WIDTH = 1900
WIDTH2 = 1900 / 2
HEIGHT = 640
DEPTH = 880
def __init__(self, team: int):
team_sign = -1 if team == 0 else 1
self.pos = Vec3(0, team_sign * Field.LENGTH2, 0)
self.right_post = Vec3(-(Goal.WIDTH2 - 30) * team_sign, team_sign * Field.LENGTH2, 0)
self.left_post = Vec3((Goal.WIDTH2 - 30) * team_sign, team_sign * Field.LENGTH2, 0)
self.front = self.pos * 0.86 # A spot in front the goal
class GameInfo:
def __init__(self, index, team):
self.team = team
self.index = index
self.team_sign = -1 if team == 0 else 1
self.dt = 0.016666
self.time = 0
self.is_kickoff = False
self.last_kickoff_end_time = 0
self.time_since_last_kickoff = 0
self.ball = Ball()
self.boost_pads = []
self.small_boost_pads = []
self.big_boost_pads = []
self.convenient_boost_pad = None
self.convenient_boost_pad_score = 0
self.my_car = Car()
self.cars = []
self.teammates = []
self.team_cars = [] # Includes us
self.opponents = []
self.goals = [Goal(0), Goal(1)]
self.own_goal = self.goals[team]
self.opp_goal = self.goals[not team]
self.field_info_loaded = False
def read_field_info(self, field_info: FieldInfo):
if field_info is None or field_info.num_boosts == 0:
return
self.boost_pads = []
self.small_boost_pads = []
self.big_boost_pads = []
for i in range(field_info.num_boosts):
pad = field_info.boost_pads[i]
pos = Vec3(pad.location)
pad = BoostPad(i, pos, pad.is_full_boost, True, 0.0)
self.boost_pads.append(pad)
if pad.is_big:
self.big_boost_pads.append(pad)
else:
self.small_boost_pads.append(pad)
self.convenient_boost_pad = self.boost_pads[0]
self.convenient_boost_pad_score = 0
self.field_info_loaded = True
def read_packet(self, packet: GameTickPacket):
# Game state
self.dt = packet.game_info.seconds_elapsed - self.time
self.time = packet.game_info.seconds_elapsed
self.is_kickoff = packet.game_info.is_kickoff_pause
if self.is_kickoff:
self.last_kickoff_end_time = self.time
self.time_since_last_kickoff = self.time - self.last_kickoff_end_time
# Read ball
ball_phy = packet.game_ball.physics
self.ball.pos = Vec3(ball_phy.location)
self.ball.vel = Vec3(ball_phy.velocity)
self.ball.ang_vel = Vec3(ball_phy.angular_velocity)
self.ball.t = self.time
# self.ball.step(dt)
# Read cars
for i in range(0, packet.num_cars):
game_car = packet.game_cars[i]
car_phy = game_car.physics
car = self.cars[i] if i < len(self.cars) else Car()
car.pos = Vec3(car_phy.location)
car.vel = Vec3(car_phy.velocity)
car.ang_vel = Vec3(car_phy.angular_velocity)
car.rot = euler_to_rotation(Vec3(car_phy.rotation.pitch, car_phy.rotation.yaw, car_phy.rotation.roll))
car.is_demolished = game_car.is_demolished
car.on_ground = game_car.has_wheel_contact
car.supersonic = game_car.is_super_sonic
car.jumped = game_car.jumped
car.double_jumped = game_car.double_jumped
car.boost = game_car.boost
car.time = self.time
# car.extrapolate(dt)
if len(self.cars) <= i:
# First time we see this car
car.index = i
car.team = game_car.team
car.name = game_car.name
self.cars.append(car)
if game_car.team == self.team:
if i == self.index:
self.my_car = car
else:
self.teammates.append(car)
self.team_cars.append(car)
else:
self.opponents.append(car)
# Read boost pads
for i in range(0, len(self.boost_pads)):
boost_pad = packet.game_boosts[i]
self.boost_pads[i].is_active = boost_pad.is_active
self.boost_pads[i].timer = boost_pad.timer
self.convenient_boost_pad_score = 0
for pad in self.boost_pads:
pad_state = packet.game_boosts[pad.index]
pad.is_active = pad_state.is_active
pad.timer = pad_state.timer
score = self.get_boost_pad_convenience_score(pad)
if score > self.convenient_boost_pad_score:
self.convenient_boost_pad = pad
# self.time += dt
def get_boost_pad_convenience_score(self, pad):
if not pad.is_active:
return 0
car_to_pad = pad.pos - self.my_car.pos
angle = angle_between(self.my_car.forward, car_to_pad)
# Pads behind the car is bad
if abs(angle) > 1.3:
return 0
dist = norm(car_to_pad)
dist_score = 1 - clip((abs(dist) / 2500)**2, 0, 1)
angle_score = 1 - clip((abs(angle) / 3), 0, 1)
return dist_score * angle_score * (0.8, 1)[pad.is_big]
def closest_enemy(self, pos: Vec3):
enemy = None
dist = -1
for e in self.opponents:
d = norm(e.pos - pos)
if enemy is None or d < dist:
enemy = e
dist = d
return enemy, dist
def handle_quick_chat(self, sender, sender_team, message):
if 0 <= sender < len(self.cars):
car = self.cars[sender]
car.last_quick_chat = QuickChat(sender, sender_team, message, self.time)
def handle_tmcp_message(self, message: TMCPMessage):
if 0 <= message.index < len(self.cars):
# We transform the message into a quick chat message
mapping = {
ActionType.BALL: QuickChats.Information_IGotIt,
ActionType.READY: QuickChats.Information_AllYours,
ActionType.BOOST: QuickChats.Information_NeedBoost,
ActionType.DEMO: QuickChats.Information_TakeTheShot,
ActionType.DEFEND: QuickChats.Information_Defending
}
quickchat_version = mapping[message.action_type]
car = self.cars[message.index]
car.last_quick_chat = QuickChat(message.index, car.team, quickchat_version, self.time - 0.008333)
def tcmp_to_quick_chat(tmcp: ActionType):
return {
ActionType.BALL: QuickChats.Information_IGotIt,
ActionType.READY: QuickChats.Information_AllYours,
ActionType.BOOST: QuickChats.Information_NeedBoost,
ActionType.DEMO: QuickChats.Information_TakeTheShot,
ActionType.DEFEND: QuickChats.Information_Defending
}[tmcp]
def quick_chat_to_tcmp(qc_msg) -> Optional[ActionType]:
mapping = {
QuickChats.Information_IGotIt: ActionType.BALL,
QuickChats.Information_AllYours: ActionType.WAIT,
QuickChats.Information_NeedBoost: ActionType.BOOST,
QuickChats.Information_TakeTheShot: ActionType.DEMO,
QuickChats.Information_Defending: ActionType.DEFEND
}
return mapping[qc_msg] if qc_msg in mapping else None
def is_near_wall(point: Vec3, offset: float=110) -> bool:
return abs(point.x) > Field.WIDTH - offset or abs(point.y) > Field.LENGTH - offset # TODO Add diagonal walls
|
the-stack_0_19421
|
"""This module contains the general information for VnicIScsiBootParams ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class VnicIScsiBootParamsConsts():
INT_ID_NONE = "none"
OWNER_MANAGEMENT = "management"
OWNER_PHYSICAL_DEFAULT_CONFIG = "physical-default-config"
OWNER_PHYSICAL_INHERIT = "physical-inherit"
OWNER_POLICY = "policy"
OWNER_SYSTEM = "system"
OWNER_TIER = "tier"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
class VnicIScsiBootParams(ManagedObject):
"""This is VnicIScsiBootParams class."""
consts = VnicIScsiBootParamsConsts()
naming_props = set([])
mo_meta = MoMeta("VnicIScsiBootParams", "vnicIScsiBootParams", "iscsi-boot-params", VersionMeta.Version111a, "InputOutput", 0x1f, [], ["admin", "ls-compute", "ls-config", "ls-network", "ls-server", "ls-storage"], [u'lsServer'], [u'vnicIScsiBootVnic'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"owner": MoPropertyMeta("owner", "owner", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["management", "physical-default-config", "physical-inherit", "policy", "system", "tier"], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"intId": "int_id",
"name": "name",
"owner": "owner",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.descr = None
self.int_id = None
self.name = None
self.owner = None
self.policy_level = None
self.policy_owner = None
self.status = None
ManagedObject.__init__(self, "VnicIScsiBootParams", parent_mo_or_dn, **kwargs)
|
the-stack_0_19422
|
# Copyright 2020 Florian Wagner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asn1crypto import x509,pem
from base64 import b64decode
from urllib.parse import unquote_to_bytes
BEGIN_LEN = 27
END_LEN = 25
def decode_header(hdr):
if hdr.startswith(b'-----BEGIN'):
if hdr[10:13] == b'%20':
return unquote_to_bytes(hdr)
return (
hdr[:BEGIN_LEN] +
hdr[BEGIN_LEN:-END_LEN].replace(b' ', b'\n') +
hdr[-END_LEN:]
)
try:
return b64decode(hdr, validate=True)
except:
return None
def load_certificate(data):
try:
_,_,data = pem.unarmor(data)
finally:
try:
return x509.Certificate.load(data)
except:
return None
|
the-stack_0_19423
|
from . import db
class BaseModel(db.Model):
__abstract__ = True
__schemaname__ = ''
__table_args__ = {'autoload': True,
'autoload_with': db.engine, 'extend_existing': True}
@classmethod
def get_schema(cls):
from api import schemas
return getattr(schemas, cls.__schemaname__)
|
the-stack_0_19424
|
# -*- coding: utf-8 -*-
'''
Management of OpenStack Neutron Security Groups
===============================================
.. versionadded:: 2018.3.0
:depends: shade
:configuration: see :py:mod:`salt.modules.neutronng` for setup instructions
Example States
.. code-block:: yaml
create security group;
neutron_secgroup.present:
- name: security_group1
- description: "Very Secure Security Group"
delete security group:
neutron_secgroup.absent:
- name_or_id: security_group1
- project_name: Project1
create security group with optional params:
neutron_secgroup.present:
- name: security_group1
- description: "Very Secure Security Group"
- project_id: 1dcac318a83b4610b7a7f7ba01465548
create security group with optional params:
neutron_secgroup.present:
- name: security_group1
- description: "Very Secure Security Group"
- project_name: Project1
'''
from __future__ import absolute_import, print_function, unicode_literals
__virtualname__ = 'neutron_secgroup'
def __virtual__():
if 'neutronng.list_subnets' in __salt__:
return __virtualname__
return (False, 'The neutronng execution module failed to load:\
shade python module is not available')
def present(name, auth=None, **kwargs):
'''
Ensure a security group exists.
You can supply either project_name or project_id.
Creating a default security group will not show up as a change;
it gets created through the lookup process.
name
Name of the security group
description
Description of the security group
project_name
Name of Project
project_id
ID of Project
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['neutronng.setup_clouds'](auth)
if 'project_name' in kwargs:
kwargs['project_id'] = kwargs['project_name']
del kwargs['project_name']
project = __salt__['keystoneng.project_get'](
name=kwargs['project_id'])
if project is None:
ret['result'] = False
ret['comment'] = "project does not exist"
return ret
secgroup = __salt__['neutronng.security_group_get'](
name=name, filters={'tenant_id': project.id})
if secgroup is None:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Security Group will be created.'
return ret
secgroup = __salt__['neutronng.security_group_create'](**kwargs)
ret['changes'] = secgroup
ret['comment'] = 'Created security group'
return ret
changes = __salt__['neutronng.compare_changes'](secgroup, **kwargs)
if changes:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = changes
ret['comment'] = 'Security Group will be updated.'
return ret
__salt__['neutronng.security_group_update'](secgroup=secgroup, **changes)
ret['changes'].update(changes)
ret['comment'] = 'Updated security group'
return ret
def absent(name, auth=None, **kwargs):
'''
Ensure a security group does not exist
name
Name of the security group
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['neutronng.setup_clouds'](auth)
kwargs['project_id'] = __salt__['keystoneng.project_get'](
name=kwargs['project_name'])
secgroup = __salt__['neutronng.security_group_get'](
name=name,
filters={'project_id': kwargs['project_id']}
)
if secgroup:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = {'id': secgroup.id}
ret['comment'] = 'Security group will be deleted.'
return ret
__salt__['neutronng.security_group_delete'](name=secgroup)
ret['changes']['id'] = name
ret['comment'] = 'Deleted security group'
return ret
|
the-stack_0_19426
|
from ansible.module_utils.basic import AnsibleModule
import os
import string
import pymysql
import io
import configparser
def main():
module = AnsibleModule(
argument_spec = dict(
mysql_dbname = dict(type='str', default='_dbispconfig'),
mysql_user = dict(type='str', default='_ispconfig'),
mysql_pass = dict(required=True, type='str', no_log=True),
server = dict(required=True, type="str"),
ini_group = dict(required=True, type="str"),
ini_option = dict(required=True, type='str'),
ini_value = dict(type='str', default="")
)
)
c = pymysql.connect(
host='localhost',
user=module.params['mysql_user'],
password=module.params['mysql_pass'],
db=module.params['mysql_dbname'],
charset='utf8',
cursorclass=pymysql.cursors.DictCursor
)
cursor = c.cursor()
sql = 'SELECT config FROM sys_ini WHERE sysini_id = 1'
cursor.execute(sql)
raw_ini = cursor.fetchone()['config']
ini = configparser.ConfigParser()
ini.read_string(raw_ini)
changed = False
ini_group = module.params['ini_group']
ini_option = module.params['ini_option']
ini_value = module.params['ini_value']
try:
current_value = ini[ini_group][ini_option]
except:
current_value = None
if current_value != ini_value:
changed = True
ini[ini_group][ini_option] = ini_value
stream = io.StringIO()
ini.write(stream, space_around_delimiters=False)
raw_ini = stream.getvalue().replace("\'", '\\\'').replace("\"", "\\\"")
sql = 'UPDATE sys_ini SET config = "%s" WHERE sysini_id = 1' % raw_ini
cursor.execute(sql)
c.commit()
cursor.close()
c.close()
if changed is None:
raise module.fail_json(msg='bug: no changed value was set')
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
the-stack_0_19427
|
from abc import ABC, abstractmethod
from typing import Generic, Optional
from .state import State
from .typing import TReturn
class ProcessAPI(ABC, Generic[TReturn]):
#
# State
#
@property
@abstractmethod
def state(self) -> State:
...
@state.setter
def state(self, value: State) -> State:
raise NotImplementedError
@abstractmethod
async def wait_for_state(self, state: State) -> None:
...
#
# PID
#
@property
@abstractmethod
def pid(self) -> int:
...
@pid.setter
def pid(self, value: int) -> None:
raise NotImplementedError
@abstractmethod
async def wait_pid(self) -> int:
...
#
# Return Value
#
@property
@abstractmethod
def return_value(self) -> TReturn:
...
@return_value.setter
def return_value(self, value: TReturn) -> None:
raise NotImplementedError
@abstractmethod
async def wait_return_value(self) -> TReturn:
...
#
# Return Code
#
@property
@abstractmethod
def returncode(self) -> int:
...
@returncode.setter
def returncode(self, value: int) -> None:
raise NotImplementedError
@abstractmethod
async def wait_returncode(self) -> int:
...
#
# Error
#
@property
@abstractmethod
def error(self) -> Optional[BaseException]:
...
@error.setter
def error(self, value: BaseException) -> None:
raise NotImplementedError
@abstractmethod
async def wait_error(self) -> Optional[BaseException]:
...
#
# Result
#
@property
@abstractmethod
def result(self) -> TReturn:
...
@abstractmethod
async def wait_result(self) -> TReturn:
...
#
# Lifecycle management APIs
#
@abstractmethod
async def wait(self) -> None:
...
@abstractmethod
def poll(self) -> Optional[int]:
...
@abstractmethod
def kill(self) -> None:
...
@abstractmethod
def terminate(self) -> None:
...
@abstractmethod
def send_signal(self, sig: int) -> None:
...
|
the-stack_0_19428
|
import csv
import itertools
import operator
import numpy as np
import nltk
import os
from rdkit import Chem
from rdkit.Chem import Draw
from IPython import display
##import matplotlib.pyplot as plt
from rdkit.Chem import Descriptors
def zinc_data_with_bracket():
sen_space=[]
#f = open('/Users/yang/smiles.csv', 'rb')
#f = open('/Users/yang/LSTM-chemical-project/smile_trainning.csv', 'rb')
f = open('/home/yang/LSTM-chemical-project/data/250k_rndm_zinc_drugs_clean.smi', 'rb')
reader = csv.reader(f)
for row in reader:
#word_space[row].append(reader[row])
#print word_sapce
sen_space.append(row)
#print sen_space
f.close()
word1=sen_space[0]
word_space=list(word1[0])
end="\n"
zinc_processed=[]
organic_smile=[]
t=0
for i in range(len(sen_space)):
word1=sen_space[i]
m = Chem.MolFromSmiles(word1[0])
Chem.Kekulize(m)
s=Chem.MolToSmiles(m,kekuleSmiles=True)
zinc_processed.append(s)
#word_space=list(word1[0])
#print len(zinc_processed)
while t <len(zinc_processed):
#print t
word2=zinc_processed[t]
word_space=list(word2)
word=[]
organic_smile.append(word_space)
t=t+1
#print len(organic_smile)
#print organic_smile
return organic_smile
def zinc_processed_with_bracket(sen_space):
#print sen_space
all_smile=[]
length=[]
end="\n"
element_table=["C","N","B","O","P","S","F","Cl","Br","I","(",")","=","#"]
ring=["1","2","3","4","5","6","7","8","9","10"]
for i in range(len(sen_space)):
#word1=sen_space[i]
word_space=sen_space[i]
word=[]
#word_space.insert(0,end)
j=0
while j<len(word_space):
word_space1=[]
#word_space1.append(word_space[j])
if word_space[j]=="[":
word_space1.append(word_space[j])
j=j+1
while word_space[j]!="]":
word_space1.append(word_space[j])
j=j+1
word_space1.append(word_space[j])
word_space2=''.join(word_space1)
word.append(word_space2)
j=j+1
else:
word_space1.append(word_space[j])
if j+1<len(word_space):
word_space1.append(word_space[j+1])
word_space2=''.join(word_space1)
else:
word_space1.insert(0,word_space[j-1])
word_space2=''.join(word_space1)
if word_space2 not in element_table:
word.append(word_space[j])
j=j+1
else:
word.append(word_space2)
j=j+2
word.append(end)
word.insert(0,"&")
len1=len(word)
length.append(len1)
all_smile.append(list(word))
#print all_smile
val=["\n"]
for i in range(len(all_smile)):
for j in range(len(all_smile[i])):
if all_smile[i][j] not in val:
val.append(all_smile[i][j])
#print val
#val.remove("\n")
#val.insert(0,"\n")
#print val
#print all_smile[0]
#print all_smile[1]
#print all_smile[2]
#print len(all_smile)
#print max(length)
#print len(val)
return val, all_smile
def zinc_logp(smile):
logp_value=[]
compound=[]
for i in range(len(smile)):
middle=[]
for j in range(len(smile[i])):
middle.append(smile[i][j])
com=''.join(middle)
compound.append(com)
for i in range(len(compound)):
m = Chem.MolFromSmiles(compound[i])
logp=Descriptors.MolLogP(m)
logp_value.append(logp)
ma=6.66134
print(max(logp_value))
print(logp_value)
def zinc_data_with_bracket_original():
sen_space=[]
#f = open('/Users/yang/smiles.csv', 'rb')
#f = open('/Users/yang/LSTM-chemical-project/smile_trainning.csv', 'rb')
f = open('../data/250k_rndm_zinc_drugs_clean.smi', 'rt')
reader = csv.reader(f)
for row in reader:
#word_space[row].append(reader[row])
#print word_sapce
sen_space.append(row)
#print sen_space
f.close()
word1=sen_space[0]
word_space=list(word1[0])
end="\n"
zinc_processed=[]
organic_smile=[]
t=0
for i in range(len(sen_space)):
word1=sen_space[i]
#m = Chem.MolFromSmiles(word1[0])
#Chem.Kekulize(m)
#s=Chem.MolToSmiles(m,kekuleSmiles=True)
zinc_processed.append(word1[0])
#word_space=list(word1[0])
#print len(zinc_processed)
#while t <len(zinc_processed):
# #print t
# word2=zinc_processed[t]
# word_space=list(word2)
# word=[]
# organic_smile.append(word_space)
# t=t+1
#print len(organic_smile)
#print organic_smile
#print zinc_processed[0]
return zinc_processed
#hi=organic()
#organic_logp(hi)
#hi=zinc_data_with_bracket_original()
#zinc_logp(hi)
#zinc_processed_with_bracket(hi)
|
the-stack_0_19429
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 11:05:20 2020
@author: ravi
"""
for t in range(int(input())):
s = input()
print(s == s[::-1] and "YES" or "NO")
|
the-stack_0_19430
|
"""Pavilion Test configurations, like the base Pavilion configuration,
utilize the YamlConfig library to define the config structure. Because of the
dynamic nature of test configs, there are a few extra complications this module
handles that are documented below.
"""
from collections import OrderedDict
import re
import yaml_config as yc
class TestConfigError(ValueError):
"""An exception specific to errors in configuration."""
TEST_NAME_RE_STR = r'^[a-zA-Z_][a-zA-Z0-9_-]*$'
TEST_NAME_RE = re.compile(TEST_NAME_RE_STR)
KEY_NAME_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9_-]*$')
VAR_KEY_NAME_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*$')
VAR_NAME_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*[?+]?$')
class PathCategoryElem(yc.CategoryElem):
"""This is for category elements that need a valid unix path regex."""
_NAME_RE = re.compile(r".+$")
class VariableElem(yc.CategoryElem):
"""This is for values in the 'variables' section of a test config.
A variable entry can be either a single string value or an
arbitrary dictionary of strings. If we get a single value, we'll return it
instead of a dict. Pavilion's variable handling code handles the
normalization of these values.
"""
_NAME_RE = VAR_KEY_NAME_RE
def __init__(self, name=None, **kwargs):
"""Just like a CategoryElem, but the sub_elem must be a StrElem
and it can't have defaults."""
super(VariableElem, self).__init__(name=name,
sub_elem=yc.StrElem(),
defaults=None,
**kwargs)
def normalize(self, value):
"""Normalize to either a dict of strings or just a string."""
if not isinstance(value, dict):
return yc.StrElem().normalize(value)
return super().normalize(value)
def validate(self, value, partial=False):
"""Check for a single item and return it, otherwise return a dict."""
if isinstance(value, str):
return value
return super().validate(value, partial=partial)
class CondCategoryElem(yc.CategoryElem):
"""Allow any key. They'll be validated later."""
_NAME_RE = re.compile(r'^.*$')
class EvalCategoryElem(yc.CategoryElem):
"""Allow keys that start with underscore. Lowercase only."""
_NAME_RE = re.compile(r'[a-z_][a-z0-9_]*')
class VarKeyCategoryElem(yc.CategoryElem):
"""Allow Pavilion variable name like keys."""
# Allow names that have multiple, dot separated components, potentially
# including a '*'.
_NAME_RE = re.compile(r'^(?:[a-zA-Z][a-zA-Z0-9_-]*)'
r'(?:\.|[a-zA-Z][a-zA-Z0-9_-]*)*')
class ResultParserCatElem(yc.CategoryElem):
_NAME_RE = re.compile(
r'^[a-zA-Z_]\w*(\s*,\s*[a-zA-Z_]\w*)*$'
)
class VarCatElem(yc.CategoryElem):
"""For describing how the variables section itself works.
Just like a regular category elem (any conforming key, but values must
be the same type), but with some special magic when merging values.
:cvar _NAME_RE: Unlike normal categoryElem keys, these can have dashes.
"""
_NAME_RE = VAR_NAME_RE
def merge(self, old, new):
"""Merge, but allow for special keys that change our merge behavior.
'key?: value'
Allows values from lower levels in the config stack to override this
one. The value is only used if no other value is given.
'key+: value/s'
The values are appended to the list of whatever is given by lower
levels of the config stack.
"""
base = old.copy()
for key, value in new.items():
# Handle special key properties
if key[-1] in '?+':
bkey = key[:-1]
new_vals = new[key]
if key.endswith('?'):
if new_vals is None:
raise TestConfigError(
"Key '{key}' in variables section must have a "
"value, either set as the default at this level or "
"provided by an underlying host or mode config."
.format(key=key)
)
# Use the new value only if there isn't an old one.
base[bkey] = base.get(bkey, new[key])
elif key.endswith('+'):
if new_vals is None:
raise TestConfigError(
"Key '{key}' in variables section is in extend "
"mode, but provided no values."
.format(key=key))
# Appending the additional (unique) values
base[bkey] = base.get(bkey, self._sub_elem.type())
for item in new_vals:
if item not in base[bkey]:
base[bkey].append(item)
elif key in old:
base[key] = self._sub_elem.merge(old[key], new[key])
else:
base[key] = new[key]
return base
class EnvCatElem(yc.CategoryElem):
"""A category element that ensures environment variables retain their
order."""
_NAME_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*$')
type = OrderedDict
class TestConfigLoader(yc.YamlConfigLoader):
"""This class describes a test section in a Pavilion config file. It is
expected to be added to by various plugins.
:cvar list(yc.YamlConfig) ELEMENTS: Each YamlConfig instance in this
list defines a key for the test config.
- Each element must result in a string (which is why you see a lot of StrElem
below), or a structure that contains only strings at the lowest layer.
- So lists of dicts of strings are fine, etc.
- yc.RegexElem also produces a string.
- Everything should have a sensible default.
- An empty config should be a valid test.
- For bool values, accept ['true', 'false', 'True', 'False'].
- They should be checked with val.lower() == 'true', etc.
- Every element must have a useful 'help_text'.
"""
ELEMENTS = [
yc.StrElem(
'name', hidden=True, default='<unnamed>',
help_text="The base name of the test. Value added automatically."),
yc.StrElem(
'suite', hidden=True, default='<no_suite>',
help_text="The name of the suite. Value added automatically."),
yc.StrElem(
'suite_path', hidden=True, default='<no_suite>',
help_text="Path to the suite file. Value added automatically."),
yc.StrElem(
'host', hidden=True, default='<unknown>',
help_text="Host (typically sys.sys_name) for which this test was "
"created. Value added automatically."
),
yc.ListElem(
'modes', hidden=True, sub_elem=yc.StrElem(),
help_text="Modes used in the creation of this test. Value is added "
"automatically."
),
yc.RegexElem(
'inherits_from', regex=TEST_NAME_RE_STR,
help_text="Inherit from the given test section, and override "
"parameters those specified in this one. Lists are "
"overridden entirely"),
yc.StrElem(
'subtitle',
help_text="An extended title for this test. Required for "
"permuted tests."),
yc.StrElem(
'group', default=None,
help_text="The group under which to build and run tests. "
"Defaults to the group specified in pavilion.yaml."
),
yc.RegexElem(
'umask', regex=r'[0-7]{3}', default=None,
help_text="The octal umask to apply to files created during the "
"build and run processes. Defaults to the umask in "
"pavilion.yaml."
),
yc.KeyedElem(
'maintainer',
help_text="Information about who maintains this test.",
elements=[
yc.StrElem('name', default='unknown',
help_text="Name or organization of the maintainer."),
yc.StrElem('email',
help_text="Email address of the test maintainer."),
]
),
yc.StrElem(
'summary',
help_text="Summary of the purpose of this test."
),
yc.StrElem(
'doc',
help_text="Detailed documentation string for this test."
),
yc.ListElem(
'permute_on', sub_elem=yc.StrElem(),
help_text="List of permuted variables. For every permutation of "
"the values of these variables, a new virtual test will "
"be generated."
),
VarCatElem(
'variables', sub_elem=yc.ListElem(sub_elem=VariableElem()),
help_text="Variables for this test section. These can be "
"inserted strings anywhere else in the config through "
"the string syntax. They keys 'var', 'per', 'pav', "
"'sys' and 'sched' reserved. Each value may be a "
"single or list of strings key/string pairs."),
yc.RegexElem('scheduler', regex=r'\w+', default="raw",
help_text="The scheduler class to use to run this test."),
CondCategoryElem(
'only_if', sub_elem=yc.ListElem(sub_elem=yc.StrElem()),
key_case=EnvCatElem.KC_MIXED,
help_text="Only run this test if each of the clauses in this "
"section evaluate to true. Each clause consists of "
"a mapping key (that can contain Pavilion variable "
"references, like '{{pav.user}}' or '{{sys.sys_arch}}'"
") and one or more regex values"
"(that much match the whole key). A clause is true "
"if the value of the Pavilion variable matches one or"
" more of the values. "
),
CondCategoryElem(
'not_if', sub_elem=yc.ListElem(sub_elem=yc.StrElem()),
key_case=EnvCatElem.KC_MIXED,
help_text="Will NOT run this test if at least one of the "
"clauses evaluates to true. Each clause consists of "
"a mapping key (that can contain Pavilion variable "
"references, like '{{pav.user}}' or "
"'{{sys.sys_arch}}') and one or more "
"regex values (that much match the whole key)."
"A clause is true if the value of "
"the Pavilion variable matches one or more of the "
" values."
),
yc.StrElem(
'compatible_pav_versions', default='',
help_text="Specify compatibile pavilion versions for this "
"specific test. Can be represented as a single "
"version, ex: 1, 1.2, 1.2.3, or a range, "
"ex: 1.2-1.3.4, etc."
),
yc.StrElem(
'test_version', default='0.0',
help_text="Documented test version."
),
yc.KeyedElem(
'build', elements=[
yc.ListElem(
'cmds', sub_elem=yc.StrElem(),
help_text='The sequence of commands to run to perform '
'the build.'),
yc.ListElem(
'prepend_cmds', sub_elem=yc.StrElem(),
help_text='Commands to run before inherited build '
'commands.'),
yc.ListElem(
'append_cmds', sub_elem=yc.StrElem(),
help_text='Commands to run after inherited build '
'commands.'),
yc.ListElem(
'copy_files', sub_elem=yc.StrElem(),
help_text="When attaching the build to a test run, copy "
"these files instead of creating a symlink."
"They may include path glob wildcards, "
"including the recursive '**'."),
PathCategoryElem(
'create_files',
key_case=PathCategoryElem.KC_MIXED,
sub_elem=yc.ListElem(sub_elem=yc.StrElem()),
help_text="File(s) to create at path relative to the test's"
"test source directory"),
EnvCatElem(
'env', sub_elem=yc.StrElem(), key_case=EnvCatElem.KC_MIXED,
help_text="Environment variables to set in the build "
"environment."),
yc.ListElem(
'extra_files', sub_elem=yc.StrElem(),
help_text='File(s) to copy into the build environment. '
'Relative paths searched for in ~/.pavilion, '
'$PAV_CONFIG. Absolute paths are ok, '
'but not recommended.'),
yc.ListElem(
'modules', sub_elem=yc.StrElem(),
help_text="Modules to load into the build environment."),
yc.StrElem(
'on_nodes', default='False',
choices=['true', 'false', 'True', 'False'],
help_text="Whether to build on or off of the test "
"allocation."),
yc.ListElem(
'preamble', sub_elem=yc.StrElem(),
help_text="Setup commands for the beginning of the build "
"script. Added to the beginning of the run "
"script. These are generally expected to "
"be host rather than test specific."),
yc.StrElem(
'source_path',
help_text="Path to the test source. It may be a directory, "
"compressed file, compressed or "
"uncompressed archive (zip/tar), and is handled "
"according to the internal (file-magic) type. "
"For relative paths Pavilion looks in the "
"test_src directory "
"within all known config directories. If this "
"is left blank, Pavilion will always assume "
"there is no source to build."),
yc.StrElem(
'source_url',
help_text='Where to find the source on the internet. By '
'default, Pavilion will try to download the '
'source from the given URL if the source file '
'can\'t otherwise be found. You must give a '
'source path so Pavilion knows where to store '
'the file (relative paths will be stored '
'relative to the local test_src directory.'),
yc.StrElem(
'source_download', choices=['never', 'missing', 'latest'],
default='missing',
help_text="When to attempt to download the test source.\n"
" never - The url is for reference only.\n"
" missing - (default) Download if the source "
"can't be found.\n"
" latest - Always try to fetch the latest "
"source, tracking changes by "
"file size/timestamp/hash."
),
yc.StrElem(
'specificity',
default='',
help_text="Use this string, along with variables, to "
"differentiate builds. A common example would be "
"to make per-host specific by using the "
"sys.sys_name variable. Note _deferred_ system "
"variables aren't a good idea hereas configs are "
"compiled on the host that launches the test."),
yc.StrElem(
'timeout',
default='30',
help_text="Time (in seconds) that a build can continue "
"without generating new output before it is "
"cancelled. Can be left empty for no timeout."),
yc.StrElem(
'verbose', choices=['true', 'True', 'False', 'false'],
default='False',
help_text="Echo commands (including sourced files) in the"
" build log, and print the modules loaded and "
"environment before the cmds run."),
yc.StrElem(
'timeout_file', default=None,
help_text='Specify a different file to follow for build '
'timeouts.'),
],
help_text="The test build configuration. This will be "
"used to dynamically generate a build script for "
"building the test."),
yc.KeyedElem(
'run', elements=[
yc.ListElem('cmds', sub_elem=yc.StrElem(),
help_text='The sequence of commands to run to run '
'the test.'),
yc.ListElem(
'prepend_cmds', sub_elem=yc.StrElem(),
help_text='Commands to run before inherited build '
'commands.'),
yc.ListElem(
'append_cmds', sub_elem=yc.StrElem(),
help_text='Commands to run after inherited build '
'commands.'),
PathCategoryElem(
'create_files',
key_case=PathCategoryElem.KC_MIXED,
sub_elem=yc.ListElem(sub_elem=yc.StrElem()),
help_text="File(s) to create at path relative to the test's"
"test source directory"),
EnvCatElem(
'env', sub_elem=yc.StrElem(), key_case=EnvCatElem.KC_MIXED,
help_text="Environment variables to set in the run "
"environment."),
yc.ListElem(
'modules', sub_elem=yc.StrElem(),
help_text="Modules to load into the run environment."),
yc.ListElem(
'preamble', sub_elem=yc.StrElem(),
help_text="Setup commands for the beginning of the build "
"script. Added to the beginning of the run "
"script. These are generally expected to "
"be host rather than test specific."),
yc.StrElem(
'timeout', default='300',
help_text="Time that a build can continue without "
"generating new output before it is cancelled. "
"Can be left empty for no timeout."),
yc.StrElem(
'verbose', choices=['true', 'True', 'False', 'false'],
default='False',
help_text="Echo commands (including sourced files) in the "
"build log, and print the modules loaded and "
"environment before the cmds run."),
yc.StrElem(
'timeout_file', default=None,
help_text='Specify a different file to follow for run '
'timeouts.'),
],
help_text="The test run configuration. This will be used "
"to dynamically generate a run script for the "
"test."),
EvalCategoryElem(
'result_evaluate',
sub_elem=yc.StrElem(),
help_text="The keys and values in this section will also "
"be added to the result json. The values are "
"expressions (like in {{<expr>}} in normal Pavilion "
"strings). Other result values (including those "
"from result parsers and other evaluations are "
"available to reference as variables."),
]
# We'll append the result parsers separately, to have an easy way to
# access it.
_RESULT_PARSERS = yc.KeyedElem(
'result_parse', elements=[],
help_text="Result parser configurations go here. Each parser config "
"can occur by itself or as a list of configs, in which "
"case the parser will run once for each config given. The "
"output of these parsers will be added to the final "
"result json data.")
ELEMENTS.append(_RESULT_PARSERS)
@classmethod
def add_subsection(cls, subsection):
"""Use this method to add additional sub-sections to the config.
:param yc.ConfigElem subsection: A yaml config element to add. Keyed
elements are expected, though any ConfigElem based instance
(whose leave elements are StrElems) should work.
"""
if not isinstance(subsection, yc.ConfigElement):
raise ValueError("Tried to add a subsection to the config, but it "
"wasn't a yaml_config ConfigElement instance (or "
"an instance of a ConfigElement child "
"class).")
name = subsection.name
names = [el.name for el in cls.ELEMENTS]
if name in names:
raise ValueError("Tried to add a subsection to the config called "
"{0}, but one already exists.".format(name))
try:
cls.check_leaves(subsection)
except ValueError as err:
raise ValueError("Tried to add result parser named '{}', but "
"leaf element '{}' was not string based."
.format(name, err.args[0]))
cls.ELEMENTS.append(subsection)
@classmethod
def remove_subsection(cls, subsection_name):
"""Remove a subsection from the config. This is really only for use
in plugin deactivate methods."""
for section in list(cls.ELEMENTS):
if subsection_name == section.name:
cls.ELEMENTS.remove(section)
return
@classmethod
def add_result_parser_config(cls, name, config_items):
"""Add the given list of config items as a result parser
configuration named 'name'. Throws errors for invalid configuraitons.
"""
# Validate the config.
required_keys = {
'files': False,
'action': False,
'per_file': False,
}
for item in config_items:
for req_key in required_keys.keys():
if item.name == req_key:
required_keys[req_key] = True
for req_key, found in required_keys.items():
if not found:
raise TestConfigError(
"Result parser '{}' must have a required config "
"element named '{}'".format(name, req_key))
config = yc.KeyedElem(
'result_parser_{}'.format(name),
elements=config_items
)
list_elem = ResultParserCatElem(name, sub_elem=config)
if name in [e.name for e in cls._RESULT_PARSERS.config_elems.values()]:
raise ValueError("Tried to add result parser with name '{}'"
"to the config, but one already exists."
.format(name))
try:
cls.check_leaves(config)
except ValueError as err:
raise ValueError("Tried to add result parser named '{}', but "
"leaf element '{}' was not string based."
.format(name, err.args[0]))
cls._RESULT_PARSERS.config_elems[name] = list_elem
@classmethod
def remove_result_parser_config(cls, name):
"""Remove the given result parser from the result parser configuration
section.
:param str name: The name of the parser to remove.
"""
for section in list(cls._RESULT_PARSERS.config_elems.values()):
if section.name == name:
del cls._RESULT_PARSERS.config_elems[section.name]
return
@classmethod
def check_leaves(cls, elem):
"""Make sure all of the config elements have a string element or
equivalent as the final node.
:param yc.ConfigElement elem:
"""
# pylint: disable=protected-access
if hasattr(elem, 'config_elems'):
for sub_elem in elem.config_elems.values():
cls.check_leaves(sub_elem)
elif hasattr(elem, '_sub_elem') and elem._sub_elem is not None:
cls.check_leaves(elem._sub_elem)
elif issubclass(elem.type, str):
return
else:
raise ValueError(elem)
def TestSuiteLoader(): # pylint: disable=invalid-name
"""Create a new test suite loader instance. This is a function
masquerading as a constructor because the class has to be defined
dynamically after plugins have modified the test config.
"""
class _TestSuiteLoader(yc.CatYamlConfigLoader):
"""An actual test config file consists of multiple config sections."""
_NAME_RE = TEST_NAME_RE
# We use the list of ELEMENTS from TestConfigLoader. since this is the
# same object, subsections added to TestConfigLoader will get picked up
# here too.
BASE = yc.KeyedElem(elements=TestConfigLoader.ELEMENTS)
return _TestSuiteLoader()
|
the-stack_0_19432
|
import torch
from tfrecord.torch.dataset import MultiTFRecordDataset
from tfrecord.tools.tfrecord2idx import create_index
import tensorflow as tf
import webdataset as wds
from pathlib import Path
import argparse
import timeit
import os
parser = argparse.ArgumentParser("""Generate sharded dataset from tfrecord-files.""")
parser.add_argument("--maxsize", type=float, default=1e9)
parser.add_argument("--maxcount", type=float, default=100000)
parser.add_argument(
"--compression",
dest="compression",
action="store_true",
help="Creates compressed .tar.gz files instead of uncompressed .tar files."
)
parser.add_argument(
"--keep_keys",
type=str,
default="",
help="Only keep the columns from the comma separated keys from that argument."
)
parser.add_argument(
"--report_every",
type=int,
default="1000",
help="Report every n iterations."
)
parser.add_argument(
"--shards",
default="./shards",
help="directory where shards are written"
)
parser.add_argument(
"--shard_prefix",
default="ds_",
help="prefix of shards' filenames created in the shards-folder"
)
parser.add_argument(
"--data",
default="./tfr",
help="directory path containing tfrecord files",
)
args = parser.parse_args()
KEEP_KEYS = []
if args.keep_keys != '':
KEEP_KEYS = args.keep_keys.split(',')
assert args.maxsize > 10000000
assert args.maxcount < 1000000
assert os.path.isdir(os.path.join(args.data)), '{} does not exist.'.format(args.data)
os.makedirs(Path(args.shards), exist_ok=True)
index_path = args.data
tfrecord_pattern = args.data + '/{}.tfrecord'
index_pattern = index_path + '/{}.index'
os.makedirs(index_path, exist_ok=True)
tfrecord_files = [x[:-9] for x in os.listdir(args.data) if x.split('.')[-1] == 'tfrecord']
total_files = len(tfrecord_files)
splits = {k: 1/total_files for k in tfrecord_files}
tfrecord_index_files = [x[:-6] for x in os.listdir(index_path) if x.split('.')[-1] == 'index']
total_index_files = len(tfrecord_index_files)
TFR_MATCH_INDEX = True if len([x for x in tfrecord_files if x not in tfrecord_index_files]) == 0 else False
if not TFR_MATCH_INDEX:
print('Index files must be provided when using multiple workers, otherwise the loader may return duplicate records.')
print('Generating index files in {}...'.format(index_path))
for tfrecord_file in tfrecord_files:
create_index(args.data + '/' + tfrecord_file + '.tfrecord', index_path + '/' + tfrecord_file + '.index')
print('Finished generating index files!')
else:
print('Found matching number of index and tfrecord files.')
raw_dataset = tf.data.TFRecordDataset(args.data + '/' + [x for x in os.listdir(args.data) if x.split('.')[-1] == 'tfrecord'][0])
keys = {}
for raw_record in raw_dataset.take(1):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
for key, value in example.features.feature.items():
keys[key] = True if value.WhichOneof('kind') == 'bytes_list' else False
if len(KEEP_KEYS) > 0:
keys = {k: v for k, v in keys.items() if k in KEEP_KEYS}
assert len(keys.items()) > 0, 'No keys left to convert to WebDataset.'
def _parse_example(example_proto):
"""Return the example_proto as a tuple of the image and its label."""
return {key: example_proto[key].tobytes() for key in keys}
# return {key: example_proto[key].tobytes() if keys[key] else example_proto[key] for key in keys}
def _collate_fn(batch):
return batch[0]
dataset = MultiTFRecordDataset(tfrecord_pattern, index_pattern, splits, transform=_parse_example, infinite=False)
loader = torch.utils.data.DataLoader(dataset, batch_size=1, collate_fn=_collate_fn, drop_last=False)
# This is the output pattern under which we write shards.
pattern = os.path.join(args.shards, args.shard_prefix + f"%06d.tar" + (".gz" if args.compression else ''))
count = 0
start = timeit.default_timer()
with wds.ShardWriter(pattern, maxsize=int(args.maxsize), maxcount=int(args.maxcount)) as sink:
for i, item in enumerate(iter(loader)):
count = i
ds_key = "%09d" % i
sample = {
"__key__": ds_key,
}
for key in keys:
sample[key] = item[key]
sink.write(sample)
if count % args.report_every == 0:
print(' {:,}'.format(count), end='\r')
stop = timeit.default_timer()
print('###################################################################')
print('Finished converting {:,} samples from tfrecord files to webdataset.'.format(count))
print('Process took {:.2f} seconds to finish.'.format(stop - start))
print('###################################################################')
|
the-stack_0_19436
|
from flask import render_template
from flask_cors import CORS
import connexion
# Create the application instance
app = connexion.App(__name__, specification_dir='./')
# Read the swagger.yml file to configure the endpoints
app.add_api('swagger.yml')
CORS(app.app)
@app.route('/')
def home():
"""
This function just responds to the browser ULR
localhost:5000/
:return: the rendered template 'home.html'
"""
return render_template('home.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5002, debug=True)
|
the-stack_0_19440
|
import unittest
import os
import requests_mock
import tableauserverclient as TSC
import xml.etree.ElementTree as ET
from tableauserverclient.datetime_helpers import format_datetime
from tableauserverclient.server.endpoint.exceptions import InternalServerError
from tableauserverclient.server.request_factory import RequestFactory
from tableauserverclient.models.permissions_item import PermissionsRule
from tableauserverclient.models.user_item import UserItem
from tableauserverclient.models.group_item import GroupItem
from ._utils import asset
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_add_tags.xml')
GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id.xml')
GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_empty.xml')
GET_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get.xml')
POPULATE_CONNECTIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_connections.xml')
POPULATE_PDF = os.path.join(TEST_ASSET_DIR, 'populate_pdf.pdf')
POPULATE_PERMISSIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_permissions.xml')
POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, 'RESTAPISample Image.png')
POPULATE_VIEWS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views.xml')
POPULATE_VIEWS_USAGE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views_usage.xml')
PUBLISH_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish.xml')
PUBLISH_ASYNC_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish_async.xml')
REFRESH_XML = os.path.join(TEST_ASSET_DIR, 'workbook_refresh.xml')
UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_update.xml')
UPDATE_PERMISSIONS = os.path.join(TEST_ASSET_DIR, 'workbook_update_permissions.xml')
class WorkbookTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
# Fake sign in
self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'
self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'
self.baseurl = self.server.workbooks.baseurl
def test_get(self):
with open(GET_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_workbooks, pagination_item = self.server.workbooks.get()
self.assertEqual(2, pagination_item.total_available)
self.assertEqual('6d13b0ca-043d-4d42-8c9d-3f3313ea3a00', all_workbooks[0].id)
self.assertEqual('Superstore', all_workbooks[0].name)
self.assertEqual('Superstore', all_workbooks[0].content_url)
self.assertEqual(False, all_workbooks[0].show_tabs)
self.assertEqual(1, all_workbooks[0].size)
self.assertEqual('2016-08-03T20:34:04Z', format_datetime(all_workbooks[0].created_at))
self.assertEqual('description for Superstore', all_workbooks[0].description)
self.assertEqual('2016-08-04T17:56:41Z', format_datetime(all_workbooks[0].updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[0].project_id)
self.assertEqual('default', all_workbooks[0].project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[0].owner_id)
self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', all_workbooks[1].id)
self.assertEqual('SafariSample', all_workbooks[1].name)
self.assertEqual('SafariSample', all_workbooks[1].content_url)
self.assertEqual(False, all_workbooks[1].show_tabs)
self.assertEqual(26, all_workbooks[1].size)
self.assertEqual('2016-07-26T20:34:56Z', format_datetime(all_workbooks[1].created_at))
self.assertEqual('description for SafariSample', all_workbooks[1].description)
self.assertEqual('2016-07-26T20:35:05Z', format_datetime(all_workbooks[1].updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[1].project_id)
self.assertEqual('default', all_workbooks[1].project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[1].owner_id)
self.assertEqual(set(['Safari', 'Sample']), all_workbooks[1].tags)
def test_get_before_signin(self):
self.server._auth_token = None
self.assertRaises(TSC.NotSignedInError, self.server.workbooks.get)
def test_get_empty(self):
with open(GET_EMPTY_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_workbooks, pagination_item = self.server.workbooks.get()
self.assertEqual(0, pagination_item.total_available)
self.assertEqual([], all_workbooks)
def test_get_by_id(self):
with open(GET_BY_ID_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', text=response_xml)
single_workbook = self.server.workbooks.get_by_id('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', single_workbook.id)
self.assertEqual('SafariSample', single_workbook.name)
self.assertEqual('SafariSample', single_workbook.content_url)
self.assertEqual(False, single_workbook.show_tabs)
self.assertEqual(26, single_workbook.size)
self.assertEqual('2016-07-26T20:34:56Z', format_datetime(single_workbook.created_at))
self.assertEqual('description for SafariSample', single_workbook.description)
self.assertEqual('2016-07-26T20:35:05Z', format_datetime(single_workbook.updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', single_workbook.project_id)
self.assertEqual('default', single_workbook.project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_workbook.owner_id)
self.assertEqual(set(['Safari', 'Sample']), single_workbook.tags)
self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', single_workbook.views[0].id)
self.assertEqual('ENDANGERED SAFARI', single_workbook.views[0].name)
self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', single_workbook.views[0].content_url)
def test_get_by_id_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.get_by_id, '')
def test_refresh_id(self):
self.server.version = '2.8'
self.baseurl = self.server.workbooks.baseurl
with open(REFRESH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42/refresh',
status_code=202, text=response_xml)
self.server.workbooks.refresh('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
def test_refresh_object(self):
self.server.version = '2.8'
self.baseurl = self.server.workbooks.baseurl
workbook = TSC.WorkbookItem('')
workbook._id = '3cc6cd06-89ce-4fdc-b935-5294135d6d42'
with open(REFRESH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42/refresh',
status_code=202, text=response_xml)
self.server.workbooks.refresh(workbook)
def test_delete(self):
with requests_mock.mock() as m:
m.delete(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', status_code=204)
self.server.workbooks.delete('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
def test_delete_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.delete, '')
def test_update(self):
with open(UPDATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=response_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74', show_tabs=True)
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
single_workbook.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794'
single_workbook.name = 'renamedWorkbook'
single_workbook.data_acceleration_config = {'acceleration_enabled': True,
'accelerate_now': False,
'last_updated_at': None,
'acceleration_status': None}
single_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual('1f951daf-4061-451a-9df1-69a8062664f2', single_workbook.id)
self.assertEqual(True, single_workbook.show_tabs)
self.assertEqual('1d0304cd-3796-429f-b815-7258370b9b74', single_workbook.project_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', single_workbook.owner_id)
self.assertEqual('renamedWorkbook', single_workbook.name)
self.assertEqual(True, single_workbook.data_acceleration_config['acceleration_enabled'])
self.assertEqual(False, single_workbook.data_acceleration_config['accelerate_now'])
def test_update_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.update, single_workbook)
def test_update_copy_fields(self):
with open(POPULATE_CONNECTIONS_XML, 'rb') as f:
connection_xml = f.read().decode('utf-8')
with open(UPDATE_XML, 'rb') as f:
update_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=connection_xml)
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_connections(single_workbook)
updated_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual(single_workbook._connections, updated_workbook._connections)
self.assertEqual(single_workbook._views, updated_workbook._views)
self.assertEqual(single_workbook.tags, updated_workbook.tags)
self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags)
self.assertEqual(single_workbook._preview_image, updated_workbook._preview_image)
def test_update_tags(self):
with open(ADD_TAGS_XML, 'rb') as f:
add_tags_xml = f.read().decode('utf-8')
with open(UPDATE_XML, 'rb') as f:
update_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags', text=add_tags_xml)
m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/b', status_code=204)
m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/d', status_code=204)
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
single_workbook._initial_tags.update(['a', 'b', 'c', 'd'])
single_workbook.tags.update(['a', 'c', 'e'])
updated_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual(single_workbook.tags, updated_workbook.tags)
self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags)
def test_download(self):
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content',
headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'})
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2')
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_sanitizes_name(self):
filename = "Name,With,Commas.twbx"
disposition = 'name="tableau_workbook"; filename="{}"'.format(filename)
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content',
headers={'Content-Disposition': disposition})
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2')
self.assertEqual(os.path.basename(file_path), "NameWithCommas.twbx")
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_extract_only(self):
# Pretend we're 2.5 for 'extract_only'
self.server.version = "2.5"
self.baseurl = self.server.workbooks.baseurl
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content?includeExtract=False',
headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'},
complete_qs=True)
# Technically this shouldn't download a twbx, but we are interested in the qs, not the file
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2', include_extract=False)
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.download, '')
def test_populate_views(self):
with open(POPULATE_VIEWS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_views(single_workbook)
views_list = single_workbook.views
self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id)
self.assertEqual('GDP per capita', views_list[0].name)
self.assertEqual('RESTAPISample/sheets/GDPpercapita', views_list[0].content_url)
self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id)
self.assertEqual('Country ranks', views_list[1].name)
self.assertEqual('RESTAPISample/sheets/Countryranks', views_list[1].content_url)
self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id)
self.assertEqual('Interest rates', views_list[2].name)
self.assertEqual('RESTAPISample/sheets/Interestrates', views_list[2].content_url)
def test_populate_views_with_usage(self):
with open(POPULATE_VIEWS_USAGE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views?includeUsageStatistics=true',
text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_views(single_workbook, usage=True)
views_list = single_workbook.views
self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id)
self.assertEqual(2, views_list[0].total_views)
self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id)
self.assertEqual(37, views_list[1].total_views)
self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id)
self.assertEqual(0, views_list[2].total_views)
def test_populate_views_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.populate_views, single_workbook)
def test_populate_connections(self):
with open(POPULATE_CONNECTIONS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_connections(single_workbook)
self.assertEqual('37ca6ced-58d7-4dcf-99dc-f0a85223cbef', single_workbook.connections[0].id)
self.assertEqual('dataengine', single_workbook.connections[0].connection_type)
self.assertEqual('4506225a-0d32-4ab1-82d3-c24e85f7afba', single_workbook.connections[0].datasource_id)
self.assertEqual('World Indicators', single_workbook.connections[0].datasource_name)
def test_populate_permissions(self):
with open(POPULATE_PERMISSIONS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/21778de4-b7b9-44bc-a599-1506a2639ace/permissions', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '21778de4-b7b9-44bc-a599-1506a2639ace'
self.server.workbooks.populate_permissions(single_workbook)
permissions = single_workbook.permissions
self.assertEqual(permissions[0].grantee.tag_name, 'group')
self.assertEqual(permissions[0].grantee.id, '5e5e1978-71fa-11e4-87dd-7382f5c437af')
self.assertDictEqual(permissions[0].capabilities, {
TSC.Permission.Capability.WebAuthoring: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Filter: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow
})
self.assertEqual(permissions[1].grantee.tag_name, 'user')
self.assertEqual(permissions[1].grantee.id, '7c37ee24-c4b1-42b6-a154-eaeab7ee330a')
self.assertDictEqual(permissions[1].capabilities, {
TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.ShareView: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Deny,
TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Deny
})
def test_add_permissions(self):
with open(UPDATE_PERMISSIONS, 'rb') as f:
response_xml = f.read().decode('utf-8')
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '21778de4-b7b9-44bc-a599-1506a2639ace'
bob = UserItem.as_reference("7c37ee24-c4b1-42b6-a154-eaeab7ee330a")
group_of_people = GroupItem.as_reference("5e5e1978-71fa-11e4-87dd-7382f5c437af")
new_permissions = [
PermissionsRule(bob, {'Write': 'Allow'}),
PermissionsRule(group_of_people, {'Read': 'Deny'})
]
with requests_mock.mock() as m:
m.put(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml)
permissions = self.server.workbooks.update_permissions(single_workbook, new_permissions)
self.assertEqual(permissions[0].grantee.tag_name, 'group')
self.assertEqual(permissions[0].grantee.id, '5e5e1978-71fa-11e4-87dd-7382f5c437af')
self.assertDictEqual(permissions[0].capabilities, {
TSC.Permission.Capability.Read: TSC.Permission.Mode.Deny
})
self.assertEqual(permissions[1].grantee.tag_name, 'user')
self.assertEqual(permissions[1].grantee.id, '7c37ee24-c4b1-42b6-a154-eaeab7ee330a')
self.assertDictEqual(permissions[1].capabilities, {
TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow
})
def test_populate_connections_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError,
self.server.workbooks.populate_connections,
single_workbook)
def test_populate_pdf(self):
self.server.version = "3.4"
self.baseurl = self.server.workbooks.baseurl
with open(POPULATE_PDF, "rb") as f:
response = f.read()
with requests_mock.mock() as m:
m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/pdf?type=a5&orientation=landscape",
content=response)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
type = TSC.PDFRequestOptions.PageType.A5
orientation = TSC.PDFRequestOptions.Orientation.Landscape
req_option = TSC.PDFRequestOptions(type, orientation)
self.server.workbooks.populate_pdf(single_workbook, req_option)
self.assertEqual(response, single_workbook.pdf)
def test_populate_preview_image(self):
with open(POPULATE_PREVIEW_IMAGE, 'rb') as f:
response = f.read()
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/previewImage', content=response)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_preview_image(single_workbook)
self.assertEqual(response, single_workbook.preview_image)
def test_populate_preview_image_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError,
self.server.workbooks.populate_preview_image,
single_workbook)
def test_publish(self):
with open(PUBLISH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_workbook = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode)
self.assertEqual('a8076ca1-e9d8-495e-bae6-c684dbb55836', new_workbook.id)
self.assertEqual('RESTAPISample', new_workbook.name)
self.assertEqual('RESTAPISample_0', new_workbook.content_url)
self.assertEqual(False, new_workbook.show_tabs)
self.assertEqual(1, new_workbook.size)
self.assertEqual('2016-08-18T18:33:24Z', format_datetime(new_workbook.created_at))
self.assertEqual('2016-08-18T20:31:34Z', format_datetime(new_workbook.updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', new_workbook.project_id)
self.assertEqual('default', new_workbook.project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', new_workbook.owner_id)
self.assertEqual('fe0b4e89-73f4-435e-952d-3a263fbfa56c', new_workbook.views[0].id)
self.assertEqual('GDP per capita', new_workbook.views[0].name)
self.assertEqual('RESTAPISample_0/sheets/GDPpercapita', new_workbook.views[0].content_url)
def test_publish_async(self):
self.server.version = '3.0'
baseurl = self.server.workbooks.baseurl
with open(PUBLISH_ASYNC_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_job = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode,
as_job=True)
self.assertEqual('7c3d599e-949f-44c3-94a1-f30ba85757e4', new_job.id)
self.assertEqual('PublishWorkbook', new_job.type)
self.assertEqual('0', new_job.progress)
self.assertEqual('2018-06-29T23:22:32Z', format_datetime(new_job.created_at))
self.assertEqual('1', new_job.finish_code)
def test_publish_invalid_file(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
self.assertRaises(IOError, self.server.workbooks.publish, new_workbook, '.',
self.server.PublishMode.CreateNew)
def test_publish_invalid_file_type(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
self.assertRaises(ValueError, self.server.workbooks.publish,
new_workbook, os.path.join(TEST_ASSET_DIR, 'SampleDS.tds'),
self.server.PublishMode.CreateNew)
def test_publish_multi_connection(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection1 = TSC.ConnectionItem()
connection1.server_address = 'mysql.test.com'
connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
connection2 = TSC.ConnectionItem()
connection2.server_address = 'pgsql.test.com'
connection2.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
response = RequestFactory.Workbook._generate_xml(new_workbook, connections=[connection1, connection2])
# Can't use ConnectionItem parser due to xml namespace problems
connection_results = ET.fromstring(response).findall('.//connection')
self.assertEqual(connection_results[0].get('serverAddress', None), 'mysql.test.com')
self.assertEqual(connection_results[0].find('connectionCredentials').get('name', None), 'test')
self.assertEqual(connection_results[1].get('serverAddress', None), 'pgsql.test.com')
self.assertEqual(connection_results[1].find('connectionCredentials').get('password', None), 'secret')
def test_publish_single_connection(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection_creds = TSC.ConnectionCredentials('test', 'secret', True)
response = RequestFactory.Workbook._generate_xml(new_workbook, connection_credentials=connection_creds)
# Can't use ConnectionItem parser due to xml namespace problems
credentials = ET.fromstring(response).findall('.//connectionCredentials')
self.assertEqual(len(credentials), 1)
self.assertEqual(credentials[0].get('name', None), 'test')
self.assertEqual(credentials[0].get('password', None), 'secret')
self.assertEqual(credentials[0].get('embed', None), 'true')
def test_credentials_and_multi_connect_raises_exception(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection_creds = TSC.ConnectionCredentials('test', 'secret', True)
connection1 = TSC.ConnectionItem()
connection1.server_address = 'mysql.test.com'
connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
with self.assertRaises(RuntimeError):
response = RequestFactory.Workbook._generate_xml(new_workbook,
connection_credentials=connection_creds,
connections=[connection1])
def test_synchronous_publish_timeout_error(self):
with requests_mock.mock() as m:
m.register_uri('POST', self.baseurl, status_code=504)
new_workbook = TSC.WorkbookItem(project_id='')
publish_mode = self.server.PublishMode.CreateNew
self.assertRaisesRegex(InternalServerError, 'Please use asynchronous publishing to avoid timeouts',
self.server.workbooks.publish, new_workbook, asset('SampleWB.twbx'), publish_mode)
|
the-stack_0_19441
|
from poetry.toml import dumps
from poetry.toml import loads
from poetry.utils.helpers import module_name
TESTS_DEFAULT = u"""from {package_name} import __version__
def test_version():
assert __version__ == '{version}'
"""
POETRY_DEFAULT = """\
[tool.poetry]
name = ""
version = ""
description = ""
authors = []
[tool.poetry.dependencies]
[tool.poetry.dev-dependencies]
"""
POETRY_WITH_LICENSE = """\
[tool.poetry]
name = ""
version = ""
description = ""
authors = []
license = ""
[tool.poetry.dependencies]
[tool.poetry.dev-dependencies]
"""
class Layout(object):
def __init__(
self,
project,
version="0.1.0",
description="",
readme_format="md",
author=None,
license=None,
python="*",
dependencies=None,
dev_dependencies=None,
):
self._project = project
self._package_name = module_name(project)
self._version = version
self._description = description
self._readme_format = readme_format
self._license = license
self._python = python
self._dependencies = dependencies or {}
self._dev_dependencies = dev_dependencies or {"pytest": "^3.5"}
if not author:
author = "Your Name <[email protected]>"
self._author = author
def create(self, path, with_tests=True):
path.mkdir(parents=True, exist_ok=True)
self._create_default(path)
self._create_readme(path)
if with_tests:
self._create_tests(path)
self._write_poetry(path)
def generate_poetry_content(self):
template = POETRY_DEFAULT
if self._license:
template = POETRY_WITH_LICENSE
content = loads(template)
poetry_content = content["tool"]["poetry"]
poetry_content["name"] = self._project
poetry_content["version"] = self._version
poetry_content["description"] = self._description
poetry_content["authors"].append(self._author)
if self._license:
poetry_content["license"] = self._license
poetry_content["dependencies"]["python"] = self._python
for dep_name, dep_constraint in self._dependencies.items():
poetry_content["dependencies"][dep_name] = dep_constraint
for dep_name, dep_constraint in self._dev_dependencies.items():
poetry_content["dev-dependencies"][dep_name] = dep_constraint
return dumps(content)
def _create_default(self, path, src=True):
raise NotImplementedError()
def _create_readme(self, path):
if self._readme_format == "rst":
readme_file = path / "README.rst"
else:
readme_file = path / "README.md"
readme_file.touch()
def _create_tests(self, path):
self._dev_dependencies["pytest"] = "^3.0"
tests = path / "tests"
tests_init = tests / "__init__.py"
tests_default = tests / "test_{}.py".format(self._package_name)
tests.mkdir()
tests_init.touch(exist_ok=False)
with tests_default.open("w") as f:
f.write(
TESTS_DEFAULT.format(
package_name=self._package_name, version=self._version
)
)
def _write_poetry(self, path):
content = self.generate_poetry_content()
poetry = path / "pyproject.toml"
with poetry.open("w") as f:
f.write(content)
|
the-stack_0_19442
|
from hashlib import sha256
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader
from django.urls import reverse
def send_activation_email(request, username, email):
template = loader.get_template('profiles/activation_email.html')
activation_url = reverse('profiles:activate', kwargs={'username': username}) + \
'?hash=' + get_activation_hash(username)
context = {
'url': request.build_absolute_uri(activation_url),
'username': username,
}
send_mail('University forum: activating account', template.render(context),
settings.SERVICE_EMAIL, [email])
def get_activation_hash(username):
return sha256(('activation:' + username + ':' + settings.SECRET_KEY).encode()).hexdigest()
|
the-stack_0_19445
|
import pathlib
import pandas as pd
project = None
def load_VISSIM_file(path=None, columns=None, use_cols=None, skiprows=0, nrows=None, index_col=False, sep="\s+",
skipfooter=0, header=None):
"""
Function to load data from the VISSIM data files format.
Parameters:
path (Path): File location.
columns (list): Column names.
use_cols (list): Columns to be used.
skiprows (int): Rows to skip from the top when reading DataFrame.
nrows (int): Number of rows to include.
index_col (Bool): Boolean, informs whether to treat first column as the index column.
sep (regEx): custom delimiter to define the separator(s) between useful data.
skipfooter (int): Rows to skip form the bottom when reading DataFrame.
Returns:
raw_data: A pandas DataFrame.
"""
raw_data = pd.read_csv(filepath_or_buffer=path, sep=sep, names=columns, header=header, engine="python",
skiprows=skiprows,
skipfooter=skipfooter, usecols=use_cols, index_col=index_col, nrows=nrows)
return raw_data
def get_project_name(path):
"""
Function used to get the name of the data set from the loaded DatFrame; it is used to name the output file.
Parameters:
path (Path): File path object.
Returns:
file_name: a string containing the name of the project.
"""
df = load_VISSIM_file(path=path, columns=None, use_cols=None, skiprows=4, nrows=1, index_col=False, sep="\s|:")
df = df.values.tolist()[0]
file_name = [element for element in df if element != "Comment" and type(element) == str]
file_name = " ".join(file_name)
return file_name
def df_writer(project_name, analysis, data_directory):
"""
Function returns full file path and name of the save location.
Parameters:
project_name (str): The returned string from get_project_name()
analysis (str): The analysis type being performed; it is used to inform the filename.
data_directory: The directory path gathered from the ask dialogue in gui.py
Returns:
writer: a Pandas Excel writer object containing the file path of the project and where to save.
"""
save_filename = f"{analysis}_{project_name}.xlsx"
writer = pathlib.Path(data_directory).joinpath(save_filename)
return writer
def check_project_name(project, path):
""""" Checks whether a project name exist, and if not, returns it using get_project_name()"""
if project is None:
project = 1
return get_project_name(path)
def df_to_numeric(columns, *dfs):
""" Converts any number of DataFrames with data as Python Objects to numerical data. """
for df in dfs:
for col in columns:
try:
df[col] = pd.to_numeric(df[col], errors="coerce")
except KeyError:
continue
|
the-stack_0_19446
|
import csv, sys
from apiRequester import ApiRequester
class WeatherReporter:
"""
Class used to represent an weather reporter program
...
Attributes
----------
citiesCoordinates : dict
Dictionary with city code as key, and a tuple (latitude,longitude) as value
citiesTemperature : dict
Dictionary with city code as key, and city current temperature as value
destinationOriginForFlights : list
List of lists of the format [origin_city, destination_city], associated to each flight of
the input
numberOfApiCalls : int
Counter used to keep track of how much calls are made in total
to the OpenWeather API
"""
def __init__(self, apiRequester):
"""
Parameters
----------
apiRequester : ApiRequester
Api requester object used to fetch the temperature for an specific city
"""
self.apiRequester = apiRequester
self.citiesCoordinates = {}
self.citiesTemperature = {}
self.destinationOriginForFlights = []
self.numberOfApiCalls = 0
def readCsv(self, filename):
"""
Extract cities coordinates and flights information from the input csv file
...
Saves the coordinates of each city into the citiesCoordinates dictionary
with city code as key, and a tuple (latitude, longitude) as value
Then, adds a list of the format [origin_city, destination_city]
for each flight corresponding to each row into the destinationOriginForFlights list.
Parameters
----------
filename : str
The input csv filename
Raises
-------
FileNotFoundError: If could not found the specified file
OSError: If an I/O error ocurred when trying to read file
"""
try:
file = open(filename)
except FileNotFoundError:
print("File not found")
sys.exit(1)
except OSError:
print(f"Error occurred when trying to open {filename}")
sys.exit(1)
else:
with file:
csvreader = csv.reader(file)
next(csvreader, None)
for row in csvreader:
for i in range(2):
currCity = row[i]
if currCity not in self.citiesCoordinates:
currCityLatitude = row[(i * 2) + 2]
currCityLongitude = row[(i * 2) + 3]
self.citiesCoordinates[currCity] = (currCityLatitude, currCityLongitude)
self.destinationOriginForFlights.append([row[0], row[1]])
finally:
file.close()
def getTemperature(self, cityCode):
"""
Fetch a city temperature
Set the request url for the apiRequester object,
using the city latitude and longitude values as parameters,
then makes the request
Parameters
----------
cityCode : str
A 3 characters city code for the city we want to get it's temperature
Returns
-------
A string containing the temperature for this city.
N/A if could not fetch temperature value from OpenWeather API
"""
temperature = "N/A"
currCityCoordinates = self.citiesCoordinates[cityCode]
self.apiRequester.setRequestUrl(currCityCoordinates[0], currCityCoordinates[1])
responseData = self.apiRequester.makeRequest()
if responseData != None:
temperature = responseData['main']['temp']
return temperature
def updateTemperatureRecords(self):
"""
Fetch every city temperature (if this city temperature isn't available)
and saves it into the citiesTemperature dictionary used as cache
Also, update the numberOfApiCalls counter on each request to the API.
"""
for city in self.citiesCoordinates:
if city not in self.citiesTemperature:
self.citiesTemperature[city] = self.getTemperature(city)
self.numberOfApiCalls+=1
def writeOutputCsv(self, output):
"""
To each list [origin_city, destination_city]
asocciated with each flight information inside destinationOriginForFlights
appends the current temperature of both cities obtained from OpenWeather
Finally, writes this modified list of lists, into a csv file
Raises
-------
OSError: If an I/O error ocurred when trying to open file
"""
for i in range(len(self.destinationOriginForFlights)):
destinationOriginList = self.destinationOriginForFlights[i]
destinationOriginList+= [str(self.citiesTemperature[destinationOriginList[0]]) + "°C", str(self.citiesTemperature[destinationOriginList[1]]) + "°C"]
try:
with open(output, "w", newline="") as outputCsv:
writer = csv.writer(outputCsv)
writer.writerows(self.destinationOriginForFlights)
except OSError:
print(f"Error occurred when trying to open {output}")
sys.exit(1)
def main():
apiRequester = ApiRequester()
weatherReporter = WeatherReporter(apiRequester)
weatherReporter.readCsv("../data/dataset1.csv")
print("Input csv read. Fetching temperature info for each city ...")
weatherReporter.updateTemperatureRecords()
print("Number of calls to OperWeather API", weatherReporter.numberOfApiCalls)
weatherReporter.writeOutputCsv("out.csv")
print("Results written in out.csv file in current folder")
if __name__ == "__main__":
main()
|
the-stack_0_19447
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Config files parser
"""
import argparse
import yaml
from geneva.utils.logger import Logger
# Global Keys
with open('geneva/config.yml', 'r') as f:
keys = yaml.load(f)
# Experiment Configurations (Sorted by type then alphabetically)
def parse_config():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
# Integers
parser.add_argument('--batch_size',
type=int,
default=64,
help="Batch size used in training. Default: 64")
parser.add_argument('--conditioning_dim',
type=int,
default=128,
help='Dimensionality of the projected text \
representation after the conditional augmentation. \
Default: 128')
parser.add_argument('--disc_cond_channels',
type=int,
default=256,
help='For `conditioning` == concat, this flag'
'decides the number of channels to be concatenated'
'Default: 256')
parser.add_argument('--embedding_dim',
type=int,
default=1024,
help='The dimensionality of the text representation. \
Default: 1024')
parser.add_argument('--epochs',
type=int,
default=300,
help='Number of epochs for the experiment.\
Default: 300')
parser.add_argument('--hidden_dim',
type=int,
default=256,
help='Dimensionality of the RNN hidden state which is'
'used as condition for the generator. Default: 256')
parser.add_argument('--img_size',
type=int,
default=128,
help='Image size to use for training. \
Options = {128}')
parser.add_argument('--image_feat_dim',
type=int,
default=512,
help='image encoding number of channels for the'
'recurrent setup. Default: 512')
parser.add_argument('--input_dim',
type=int,
default=1024,
help='RNN condition dimension, the dimensionality of'
'the image encoder and question projector as well.')
parser.add_argument('--inception_count',
type=int,
default=5000,
help='Number of images to use for inception score.')
parser.add_argument('--noise_dim',
type=int,
default=100,
help="Dimensionality of the noise vector that is used\
as input to the generator: Default: 100")
parser.add_argument('--num_workers',
type=int,
default=8,
help="Degree of parallelism to use. Default: 8")
parser.add_argument('--num_objects',
type=int,
default=58,
help='Number of object the auxiliary objective of '
'object detection is trained on')
parser.add_argument('--projected_text_dim',
type=int,
default=1024,
help='Pre-fusion text projection dimension for the'
'recurrent setup. Default: 1024')
parser.add_argument('--save_rate',
type=int,
default=5000,
help='Number of iterations between saving current\
model sample generations. Default: 5000')
parser.add_argument('--sentence_embedding_dim',
type=int,
default=1024,
help='Dimensionality of the sentence encoding training'
'on top of glove word embedding. Default: 1024')
parser.add_argument('--vis_rate',
type=int,
default=20,
help='Number of iterations between each visualization.\
Default: 20')
# Floats
parser.add_argument('--aux_reg',
type=float,
default=5,
help='Weighting factor for the aux loss. Default: 5')
parser.add_argument('--cond_kl_reg',
type=float,
default=None,
help='CA Net KL penalty regularization weighting'
'factor. Default: None, means no regularization.')
parser.add_argument('--discriminator_lr',
type=float,
default=0.0004,
help="Learning rate used for optimizing the \
discriminator. Default = 0.0004")
parser.add_argument('--discriminator_beta1',
type=float,
default=0,
help="Beta1 value for Adam optimizers. Default = 0")
parser.add_argument('--discriminator_beta2',
type=float,
default=0.9,
help="Beta2 value for Adam optimizer. Default = 0.9")
parser.add_argument('--discriminator_weight_decay',
type=float,
default=0,
help='Weight decay for the discriminator. Default= 0')
parser.add_argument('--feature_encoder_lr',
type=float,
default=2e-3,
help='Learning rate for image encoder and condition'
'encoder. Default= 2e-3')
parser.add_argument('--generator_lr',
type=float,
default=0.0001,
help="Learning rate used for optimizing the generator \
Default = 0.0001")
parser.add_argument('--generator_beta1',
type=float,
default=0,
help="Beta1 value for Adam optimizers. Default = 0")
parser.add_argument('--generator_beta2',
type=float,
default=0.9,
help="Beta2 value for Adam optimizer. Default = 0.9")
parser.add_argument('--generator_weight_decay',
type=float,
default=0,
help='Weight decay for the generator. Default= 0')
parser.add_argument('--gp_reg',
type=float,
default=None,
help='Gradient penalty regularization weighting'
'factor. Default: None, means no regularization.')
parser.add_argument('--grad_clip',
type=float,
default=4,
help='Gradient clipping threshold for RNN and GRU.'
'Default: 4')
parser.add_argument('--gru_lr',
type=float,
default=0.0001,
help='Sentence encoder optimizer learning rate')
parser.add_argument('--rnn_lr',
type=float,
default=0.0005,
help='RNN optimizer learning rate')
parser.add_argument('--wrong_fake_ratio',
type=float,
default=0.5,
help='Ratio of wrong:fake losses.'
'Default: 0.5')
# Strings
parser.add_argument('--activation',
type=str,
default='relu',
help='Activation function to use.'
'Options = [relu, leaky_relu, selu]')
parser.add_argument('--arch',
type=str,
default='resblocks',
help='Network Architecture to use. Two options are'
'available {resblocks}')
parser.add_argument('--conditioning',
type=str,
default=None,
help='Method of Conditioning text. Default is None.'
'Options: {concat, projection}')
parser.add_argument('--condition_encoder_optimizer',
type=str,
default='adam',
help='Image encoder and text projection optimizer')
parser.add_argument('--criterion',
type=str,
default='hinge',
help='Loss function to use. Options:'
'{classical, hinge} Default: hinge')
parser.add_argument('--dataset',
type=str,
default='codraw',
help='Dataset to use for training. \
Options = {codraw, iclevr}')
parser.add_argument('--discriminator_optimizer',
type=str,
default='adam',
help="Optimizer used while training the discriminator. \
Default: Adam")
parser.add_argument('--disc_img_conditioning',
type=str,
default='subtract',
help='Image conditioning for discriminator, either'
'channel subtraction or concatenation.'
'Options = {concat, subtract}'
'Default: subtract')
parser.add_argument('--exp_name',
type=str,
default='TellDrawRepeat',
help='Experiment name that will be used for'
'visualization and Logging. Default: TellDrawRepeat')
parser.add_argument('--embedding_type',
type=str,
default='gru',
help='Type of sentence encoding. Train a GRU'
'over word embedding with \'gru\' option.'
'Default: gru')
parser.add_argument('--gan_type',
type=str,
default='recurrent_gan',
help='Gan type: recurrent. Options:'
'[recurrent_gan]. Default: recurrent_gan')
parser.add_argument('--generator_optimizer',
type=str,
default='adam',
help="Optimizer used while training the generator. \
Default: Adam")
parser.add_argument('--gen_fusion',
type=str,
default='concat',
help='Method to use when fusing the image features in'
'the generator. options = [concat, gate]')
parser.add_argument('--gru_optimizer',
type=str,
default='rmsprop',
help='Sentence encoder optimizer type')
parser.add_argument('--img_encoder_type',
type=str,
default='res_blocks',
help='Building blocks of the image encoder.'
' Default: res_blocks')
parser.add_argument('--load_snapshot',
type=str,
default=None,
help='Snapshot file to load model and optimizer'
'state from')
parser.add_argument('--log_path',
type=str,
default='logs',
help='Path where to save logs and image generations.')
parser.add_argument('--results_path',
type=str,
default='results/',
help='Path where to save the generated samples')
parser.add_argument('--rnn_optimizer',
type=str,
default='rmsprop',
help='Optimizer to use for RNN')
parser.add_argument('--test_dataset',
type=str,
help='Test dataset path key.')
parser.add_argument('--val_dataset',
type=str,
help='Validation dataset path key.')
parser.add_argument('--vis_server',
type=str,
default='http://localhost',
help='Visdom server address')
# Boolean
parser.add_argument('-debug',
action='store_true',
help='Debugging flag.(e.g, Do not save weights)')
parser.add_argument('-disc_sn',
action='store_true',
help='A flag that decides whether to use spectral norm'
'in the discriminator')
parser.add_argument('-generator_sn',
action='store_true',
help='A flag that decides whether to use'
'spectral norm in the generator.')
parser.add_argument('-generator_optim_image',
action='store_true',
help='A flag of whether to optimize the image encoder'
'w.r.t the generator.')
parser.add_argument('-inference_save_last_only',
action='store_true',
help='A flag that decides whether to only'
'save the last image for each dialogue.')
parser.add_argument('-metric_inception_objects',
action='store_true',
help='A flag that decides whether to evaluate & report'
'object detection accuracy')
parser.add_argument('-teacher_forcing',
action='store_true',
help='a flag to indicate to whether to train using'
'teacher_forcing. NOTE: With teacher_forcing=False'
'more GPU memory will be used.')
parser.add_argument('-self_attention',
action='store_true',
help='A flag that decides whether to use'
'self-attention layers.')
parser.add_argument('-skip_connect',
action='store_true',
help='A flag that decides whether to have a skip'
'connection between the GRU output and the LSTM input')
parser.add_argument('-use_fd',
action='store_true',
help='a flag which decide whether to use image'
'features conditioning in the discriminator.')
parser.add_argument('-use_fg',
action='store_true',
help='a flag which decide whether to use image'
'features conditioning in the generator.')
args = parser.parse_args()
logger = Logger(args.log_path, args.exp_name)
logger.write_config(str(args))
return args
|
the-stack_0_19449
|
# Copyright 2021 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EnvPool meta class for dm_env API."""
from abc import ABC, ABCMeta
from typing import Any, Dict, List, Tuple, Union, no_type_check
import dm_env
import numpy as np
import tree
from dm_env import TimeStep
from .data import dm_structure
from .envpool import EnvPoolMixin
from .utils import check_key_duplication
class DMEnvPoolMixin(ABC):
"""Special treatment for dm_env API."""
def observation_spec(self: Any) -> Tuple:
"""Observation spec from EnvSpec."""
return self.spec.observation_spec()
def action_spec(self: Any) -> Union[dm_env.specs.Array, Tuple]:
"""Action spec from EnvSpec."""
return self.spec.action_spec()
class DMEnvPoolMeta(ABCMeta):
"""Additional wrapper for EnvPool dm_env API."""
def __new__(cls: Any, name: str, parents: Tuple, attrs: Dict) -> Any:
"""Check internal config and initialize data format convertion."""
base = parents[0]
parents = (base, DMEnvPoolMixin, EnvPoolMixin, dm_env.Environment)
state_keys = base._state_keys
action_keys = base._action_keys
check_key_duplication(name, "state", state_keys)
check_key_duplication(name, "action", action_keys)
state_structure, state_idx = dm_structure("State", state_keys)
def _to_dm(
self: Any,
state_values: List[np.ndarray],
reset: bool,
return_info: bool,
) -> TimeStep:
state = tree.unflatten_as(
state_structure, [state_values[i] for i in state_idx]
)
done = state.done
elapse = state.elapsed_step
discount = getattr(state, "discount", (1.0 - done).astype(np.float32))
step_type = np.full(done.shape, dm_env.StepType.MID)
step_type[(elapse == 0)] = dm_env.StepType.FIRST
step_type[done] = dm_env.StepType.LAST
timestep = TimeStep(
step_type=step_type,
observation=state.State,
reward=state.reward,
discount=discount,
)
return timestep
attrs["_to"] = _to_dm
subcls = super().__new__(cls, name, parents, attrs)
@no_type_check
def init(self: Any, spec: Any) -> None:
"""Set self.spec to EnvSpecMeta."""
super(subcls, self).__init__(spec)
self.spec = spec
setattr(subcls, "__init__", init) # noqa: B010
return subcls
|
the-stack_0_19451
|
from gameskit.ui_elements import *
pygame.font.init()
class HUDLabel(UIElement):
def update_appearance(self):
brush = UIBrush(self._surface)
self._surface.fill((20, 150, 255))
font_surface = self.__font.render(self.__text, 1, self.__font_colour)
brush.draw_image((0.5, 0.5), (1, 1), font_surface, scaled_mode=True)
def __init__(self, position, size, text, font, font_colour):
UIElement.__init__(self, position, size)
self.__text = text
self.__font = font
self.__font_colour = font_colour
self.update_appearance()
def set_text(self, text):
self.__text = text
|
the-stack_0_19453
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Java/JARFLAGS.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import TestSCons
test = TestSCons.TestSCons()
test.subdir('src')
where_javac, java_version = test.java_where_javac()
where_jar = test.java_where_jar()
test.write('SConstruct', """
env = Environment(tools = ['javac', 'jar'],
JAVAC = r'%(where_javac)s',
JAR = r'%(where_jar)s',
JARFLAGS = 'cvf')
env['JARFLAGS'] = 'cvf'
class_files = env.Java(target = 'classes', source = 'src')
env.Jar(target = 'test.jar', source = class_files)
""" % locals())
test.write(['src', 'Example1.java'], """\
package src;
public class Example1
{
public static void main(String[] args)
{
}
}
""")
expect = test.wrap_stdout("""\
%(where_javac)s -d classes -sourcepath src src/Example1\.java
%(where_jar)s cvf test.jar -C classes src/Example1\.class
.*
adding: src/Example1\.class.*
""" % locals())
expect = expect.replace('/', os.sep)
test.run(arguments = '.',
match=TestSCons.match_re_dotall,
stdout = expect)
test.must_exist('test.jar')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_19455
|
"""
Tasks for conda world
"""
# TODO: must have already said this somewhere, but the parameter
# handling is a nightmare. For so many reasons. Like when chaining
# tasks, all must support the same parameters.
# TODO: conda env file/package generation: sort within each section (e.g. run dependencies, build dependencies)
# TODO: pkg name addition needs to support multiple packages e.g. gv gv-core
# TODO: pkg name addition needs to support existing recipe if present
# TODO: clean up list v. string form for _pin
# TODO: be able to generate env file (not just env)
# TODO: no support for pin deps etc in pip
# pip can install an "env" from remote file; what about conda?
# note: conda's api at https://github.com/conda/conda/issues/7059
# TODO: move tasks to conda.py and leave hacks here.
import platform
import os
import glob
import json
import re
import sys
import warnings
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
# TODO: Some conda stuff not imported until later because this file
# should be importable even without conda. Will deal with that in the
# future.
try:
import yaml
except ImportError:
yaml = None
from doit.action import CmdAction
from .util import _options_param,_options_param2, test_python, test_group, test_requires, get_tox_deps, get_tox_cmds, get_tox_python, get_env, pkg_tests, test_matrix, echo, get_buildreqs, read_pins, read_conda_packages,_all_extras_param, read_conda_namespace_map, _get_setup_metadata2
# TODO: for caching env on travis, what about links? option to copy?
try:
from conda.models.match_spec import MatchSpec
except ImportError:
pass # TODO: things will go wrong later...
########## UTIL/CONFIG ##########
## TODO: rename, plus link to hack about parameter sharing :(
name = {
'name':'name',
'long':'name',
'type':str,
'default':'test-environment'}
env_name = {
'name':'env_name',
'long':'env-name',
'type':str,
'default':'test-environment'}
env_name_again = {
'name':'env_name_again',
'long':'env-name-again',
'type':str,
'default':''}
##
# this is about selecting groups of optional extras
package_name = {'name':'package_name',
'long':'package-name',
'type':str,
'default':'' }
_channel_param = {
'name':'channel',
'long':'channel',
'short': 'c',
'type':list,
'default':[] # note: no channel means user's defaults (currently
# typically what comes with ana/miniconda)...is that
# what we want?
}
# TODO: pinning not supported for pip world yet
no_pin_deps = {
'name':'no_pin_deps',
'long':'no-pin-deps',
'type':bool,
'default':True,
'inverse':'pin-deps'
}
# hack around envs inside envs etc
CONDA_ROOT_EXE = os.environ.get('CONDA_EXE','conda') # TODO should at least warn if conda_exe not there; will be fixed as part of 0.7
# TODO: not sure what conda-using developers do/prefer...
# pip develop and don't install missing install deps or any build deps
python_develop = "pip install --no-deps --no-build-isolation -e ."
# pip develop and pip install missing deps
# python_develop = "pip install -e ."
# setuptools develop and don't install missing deps
# python_develop = "python setup.py develop --no-deps"
# setuptools develop and easy_install missing deps:
# python_develop = "python setup.py develop"
from .util import _get_dependencies
def _conda_build_deps(channel):
buildreqs = get_buildreqs()
deps = " ".join('"%s"'%_join_the_club(dep) for dep in buildreqs)
if len(buildreqs)>0:
return "conda install -y %s %s"%(" ".join(['-c %s'%c for c in channel]),deps)
else:
return echo("Skipping conda install (no build dependencies)")
def _pin(deps):
pins = read_pins('setup.cfg')
pins = { _join_the_club(d):pins[d] for d in pins }
if len(pins)==0:
warnings.warn("Pins requested, but no pins in setup.cfg")
return deps
deps = [_join_the_club(d) for d in deps]
pinned_but_missing = set(pins).difference(set([MatchSpec(d).name for d in deps]))
if len(pinned_but_missing)!=0:
raise ValueError("Pins specified for non-existent dependencies %s"%pinned_but_missing)
pinneddeps = []
for d in deps:
dname = MatchSpec(d).name
if dname in pins:
pinneddeps.append("%s ==%s"%(dname,pins[dname]))
else:
pinneddeps.append("%s"%dname)
return pinneddeps
def _conda_install_with_options(options,channel,env_name_again,no_pin_deps,all_extras):
# TODO: list v string form for _pin
deps = _get_dependencies(['install_requires']+options,all_extras=all_extras)
deps = [_join_the_club(d) for d in deps]
if len(deps)>0:
deps = _pin(deps) if no_pin_deps is False else deps
deps = " ".join('"%s"'%dep for dep in deps)
# TODO and join the club?
e = '' if env_name_again=='' else '-n %s'%env_name_again
return "conda install -y " + e + " %s %s"%(" ".join(['-c %s'%c for c in channel]),deps)
else:
return echo("Skipping conda install (no dependencies)")
# TODO: another parameter workaround
def _conda_install_with_options_hacked(options,channel,no_pin_deps,all_extras):
return _conda_install_with_options(options,channel,'',no_pin_deps,all_extras)
############################################################
# TASKS...
########## MISC ##########
def task_env_capture():
"""Report all information required to recreate current conda environment"""
return {'actions':["conda info","conda list","conda env export"]}
def task_env_export2():
# TODO: support channel pins too maybe. Either as a separate thing that can
# also be requested (like version pins), or maybe just use channel::pkg in
# pins?
# TODO: required, rename, friendlier
env_file = {
'name':'env_file',
'long':'env-file',
'type':str,
'default':''}
no_advert = {
'name':'no_advert',
'long':'no-advert',
'type':bool,
'default':False,
'inverse':'advert'
}
def x(no_pin_deps,package_name,options2,channel,all_extras,env_file,env_name_again,no_advert):
from conda_env.env import Environment
options = set(options2).union(set(read_conda_packages('setup.cfg',package_name)))
deps = [d for d in _get_dependencies(['install_requires']+list(options),all_extras=all_extras)]
if no_pin_deps is False:
deps = _pin(deps)
deps = [_join_the_club(d) for d in deps]
e = Environment(
name=env_name_again,
channels=channel,
filename = env_file,
dependencies=sorted(deps))
e.save()
if not no_advert:
# hack in link back
with open(env_file,'r+') as f:
content = f.read()
f.seek(0)
# probably more useful info could be put here
f.write("# file created by pyctdev:\n# " + " ".join(sys.argv) + "\n\n" + content)
return {'actions':[x],
'params': [_options_param2,_channel_param,_all_extras_param,no_pin_deps,package_name,env_file,env_name_again,no_advert]
}
def task_env_export():
"""
Generate a pinned environment.yaml from specified env, filtering
against specified groups of deps.
If env does not exist, will be created first.
Pretty awkward right now! Have to run something like this...
doit ecosystem=conda env_export --env-name [ENV_NAME] --env-file [SOME_FILE.yaml] --env-name-again [ENV_NAME] env_create --name [ENV_NAME]
e.g.
doit ecosystem=conda env_export --env-name _pyctdev_test_one --env-file pyctdev_test_one.yaml --env-name-again _pyctdev_test_one --options examples env_create --name _pyctdev_test_one
"""
# TODO: required, rename, friendlier
env_file = {
'name':'env_file',
'long':'env-file',
'type':str,
'default':''}
def x(env_name,options,env_file,all_extras):
import collections
from conda_env.env import from_environment
from conda.cli.python_api import Commands, run_command
env_names = [(os.path.basename(e),e) for e in json.loads(run_command(Commands.INFO,"--json")[0])['envs']]
counts = collections.Counter([x[0] for x in env_names])
assert counts[env_name]==1 # would need more than name to be sure...
prefix = dict(env_names)[env_name]
E = from_environment(env_name, prefix, no_builds=True, ignore_channels=False)
deps = [_join_the_club(d) for d in _get_dependencies(['install_requires']+options,all_extras=all_extras)]
deps = set([MatchSpec(d).name for d in deps])
for what in E.dependencies:
E.dependencies[what] = [d for d in E.dependencies[what] if MatchSpec(d).name in deps]
# fix up conda channels TODO: should probably just use non-env
# commands all along instead of conda env
if 'conda' in E.dependencies:
packages = {package['name']:package for package in json.loads(run_command(Commands.LIST,"-p", prefix, "--json")[0])}
E.dependencies['conda'] = ["%s%s"%( (packages[MatchSpec(x).name]['channel']+"::" if packages[MatchSpec(x).name]['channel']!="defaults" else '') ,x) for x in E.dependencies['conda']]
E.channels = ["defaults"]
# what could go wrong?
E.dependencies.raw = []
if len(E.dependencies.get('conda',[]))>0:
E.dependencies.raw += E.dependencies['conda']
if len(E.dependencies.get('pip',[]))>0:
E.dependencies.raw += [{'pip':E.dependencies['pip']}]
# TODO: add python_requires to conda deps?
E.prefix = None
# TODO: win/unicode
with open(env_file,'w') as f:
f.write(E.to_yaml())
return {'actions':[
CmdAction(_hacked_conda_install_with_options),
x],
'task_dep': ['env_create'],
'params': [env_name, _options_param, env_file, env_name_again,_options_param,_channel_param,_all_extras_param, no_pin_deps]}
# because of default options value...removing 'tests'
def _hacked_conda_install_with_options(task,options,channel,env_name_again,no_pin_deps,all_extras):
if 'tests' in task.options.get('options',[]):
task.options['options'].remove('tests')
return _conda_install_with_options(options,channel,env_name_again,no_pin_deps,all_extras)
miniconda_url = {
"Windows": "https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe",
"Linux": "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh",
"Darwin": "https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh"
}
# Download & install miniconda...Requires python already, so it might
# seem odd to have this. But many systems (including generic
# (non-python) travis and appveyor images) now include at least some
# system python, in which case this command can be used. But generally
# people will have installed python themselves, so the download and
# install miniconda tasks can be ignored.
def task_miniconda_download():
"""Download Miniconda3-latest"""
url = miniconda_url[platform.system()]
miniconda_installer = url.split('/')[-1]
def download_miniconda(targets):
urlretrieve(url,miniconda_installer)
return {'targets': [miniconda_installer],
'uptodate': [True], # (as has no deps)
'actions': [download_miniconda]}
def task_miniconda_install():
"""Install Miniconda3-latest to location if not already present"""
# NOTE: if caching on CI, will result in no new mc being installed
# ever until cache is cleared
location = {
'name':'location',
'long':'location',
'short':'l',
'type':str,
'default':os.path.abspath(os.path.expanduser('~/miniconda'))}
miniconda_installer = miniconda_url[platform.system()].split('/')[-1]
return {
'file_dep': [miniconda_installer],
'uptodate': [_mc_installed],
'params': [location],
'actions':
# TODO: check windows situation with update
['START /WAIT %s'%miniconda_installer + " /S /AddToPath=0 /D=%(location)s"] if platform.system() == "Windows" else ["bash %s"%miniconda_installer + " -b -u -p %(location)s"]
}
# TODO: this is another doit param hack :(
def _mc_installed(task,values):
if task.options is not None:
return os.path.exists(task.options['location'])
else:
for p in task.params:
if p['name']=='location':
return os.path.exists(p['default'])
return False
def task_ecosystem_setup():
"""Common conda setup (must be run in base env).
Updates to latest conda, and anaconda-client (cb is pinned)
"""
def thing1(channel):
return "conda update -y %s conda"%" ".join(['-c %s'%c for c in channel])
def thing2(channel):
# TODO: beware pin here and in setup.py!
return 'conda install -y %s anaconda-client conda-build'%" ".join(['-c %s'%c for c in channel])
return {
'actions': [
CmdAction(thing1),
CmdAction(thing2)
],
'params': [_channel_param]}
########## PACKAGING ##########
recipe_param = {
'name':'recipe',
'long':'recipe',
'type':str,
'default':''
}
# TODO: python2conda or something, and would be nice to use param ;)
def _join_the_club(dep):
# note: using conda's matchspec to read python package spec; should use
# fn from python packaging instead
# cb at least at 3.10.1 interprets square brackets as selectors
# even if not after a # and then silently drops...not sure what's
# accidental and what's deliberate difference between cb and
# conda. Meanwhile, I've been using the fact that e.g. conda
# install "dask[complete]" results in installing "dask" to
# implement the convention that conda packages contain everything
# i.e. any pip install x[option1,option2,...] is covered by conda
# install x. see https://github.com/pyviz/pyct/issues/42
new = re.sub(r'\[.*?\]','',dep)
# not much point warning only here, since it happens in other places too
#if new!=dep:warnings.warn("Changed your dep from %s to %s"%(dep,new))
# should be read just once rather than for each dep!
nsmap = read_conda_namespace_map('setup.cfg')
ms = MatchSpec(new)
out = "%s"%nsmap.get(ms.name,ms.name)
if ms.version is not None:
# why it doesn't include == already?
if '==' in new:
assert "===" not in new # sorry
out+= " =="
else:
out+= " "
out+= "%s"%ms.version
return out
# TODO: (almost) duplicates some bits of package_build
# TODO: missing from pip version
def task_package_test():
"""Test existing package
Specify a "test matrix" (kind of) via repeated --test-python,
--test-group, and --test-requires.
"""
def thing(channel,recipe):
cmd = "conda build %s conda.recipe/%s"%(" ".join(['-c %s'%c for c in channel]),
"%(recipe)s")
return cmd
def thing2(channel,pkg_tests,test_python,test_group,test_requires,recipe):
cmds = []
if pkg_tests:
# TODO: should test groups just be applied across others rather than product?
# It's about test isolation vs speed of running tests...
for (p,g,r,w) in test_matrix(test_python,test_group,test_requires,['pkg']):
cmds.append(
thing(channel,recipe)+" -t --append-file conda.recipe/%s/recipe_append--%s-%s-%s-%s.yaml"%("%(recipe)s",p,g,r,w)
)
cmds.append("conda build purge") # remove test/work intermediates (see same comment below)
# hack again for returning variable number of commands...
return " && ".join(cmds)
def create_recipe_append(recipe,test_python,test_group,test_requires,pkg_tests):
if not pkg_tests:
return
if yaml is None:
raise ValueError("Install pyyaml or equivalent; see extras_require['ecosystem_conda'].")
for (p,g,r,w) in test_matrix(test_python,test_group,test_requires,['pkg']):
environment = get_env(p,g,r,w)
deps = get_tox_deps(environment,hack_one=True) # note the hack_one, which is different from package_build
deps = [_join_the_club(d) for d in deps]
cmds = get_tox_cmds(environment)
py = get_tox_python(environment)
# deps and cmds are appended
#
# TODO: will overwrite recipe_append--... if someone
# already happens to use it...
#
# would maybe like to do something more like conda build
# conda.recipe -t existing_pkg --extra-command ... --extra-deps ...
with open("conda.recipe/%s/recipe_append--%s-%s-%s-%s.yaml"%(recipe,p,g,r,w),'w') as f:
f.write(yaml.dump(
{
'test':{
'requires':['python =%s'%py]+deps,
'commands':cmds,
# still undecided about which config files to use
'source_files': ['tox.ini']
}},default_flow_style=False))
def remove_recipe_append_and_clobber(recipe,pkg_tests,test_python,test_group,test_requires):
try:
p = os.path.join("conda.recipe",recipe,"_pyctdev_recipe_clobber.yaml")
os.remove(p)
except:
pass
if not pkg_tests:
return
for (p,g,r,w) in test_matrix(test_python,test_group,test_requires,['pkg']):
try:
p = os.path.join("conda.recipe",recipe,"recipe_append--%s-%s-%s-%s.yaml"%(p,g,r,w))
os.remove(p)
except:
pass
return {'actions': [
# then test it...
# (if test commands overlap what's in recipe, will be some
# repetition...they ran above, and they will run again...)
create_recipe_append,
CmdAction(thing2),
],
'teardown': [remove_recipe_append_and_clobber],
'params': [_channel_param, recipe_param, test_python, test_group, test_requires, pkg_tests]}
def task_package_build():
"""Build and then test conda.recipe/ (or specified alternative).
Specify --no-pkg-tests to avoid running any tests other than those
defined explicitly in the recipe (i.e. to run only vanilla conda
build, without any modifications).
Specify a "test matrix" (kind of) via repeated --test-python,
--test-group, and --test-requires.
Note that whatever channels you supply at build time must be
supplied by users of the package at install time for users to get
the same(ish) dependencies as used at build time. (TODO: will be
able to improve this with conda 4.4.)
"""
# TODO: conda.recipe path hardcoded/repeated
# hacks to get a quick version of
# https://github.com/conda/conda-build/issues/2648
pin_deps_as_env = {
'name':'pin_deps_as_env',
'long':'pin-deps-as-env',
'type':str,
'default':''}
force = {
'name':'force',
'long':'force',
'type':bool,
'default':False}
def create_base_recipe(package_name,force):
# TODO: need to replace this with checking for existing recipe and using that.
# and fall back to package name in normal setup.cfg
if os.path.exists("conda.recipe/meta.yaml") and not force:
print("conda.recipe/meta.yaml exists; not overwriting without --force")
return
package_name_supplied = True
if package_name == '':
package_name_supplied = False
try:
package_name = _get_setup_metadata2('name')
except KeyError:
raise ValueError("--package-name not supplied and not found in setup.cfg/setup.py")
# read from setup.cfg (not supporting doing it in setup.py)
try:
extras = str(read_conda_packages('setup.cfg',package_name))
except KeyError:
if package_name_supplied:
raise ValueError("You requested package name %s but no entry found in setup.cfg; omit --package-name or ensure you have defined conda package(s) in setup.cfg"%package_name)
extras = '[]'
r = open(os.path.join(os.path.dirname(__file__),"condatemplate.yaml")).read()
# hack https://github.com/conda/conda-build/issues/2475
r = r.replace(r"{{ pname }}",package_name)
if not os.path.exists("conda.recipe"): # could do better/race
os.makedirs("conda.recipe")
with open("conda.recipe/meta.yaml",'w') as f:
f.write("{%% set pname = %s %%}\n"%package_name)
f.write("{%% set extras = %s %%}\n"%extras)
buildreqs = get_buildreqs()
buildeps = "["+ ",".join('"%s"'%_join_the_club(dep) for dep in buildreqs) + "]"
f.write("{%% set builddeps = %s %%}\n"%buildeps)
f.write(r)
def create_recipe_clobber(recipe,pin_deps_as_env,no_pin_deps,package_name):
if pin_deps_as_env == '' and no_pin_deps is True:
return
else:
extras = read_conda_packages('setup.cfg',package_name)
deps = _get_dependencies(['install_requires']+extras)
deps = [_join_the_club(d) for d in deps]
if pin_deps_as_env != '':
assert no_pin_deps is True
# TODO: unify with conda in env_export
env_name = pin_deps_as_env
import collections
from conda.cli.python_api import Commands, run_command
env_names = [(os.path.basename(e),e) for e in json.loads(run_command(Commands.INFO,"--json")[0])['envs']]
counts = collections.Counter([x[0] for x in env_names])
assert counts[env_name]==1 # would need more than name to be sure...
prefix = dict(env_names)[env_name]
packages = json.loads(run_command(Commands.LIST,"-p %s --json"%prefix)[0])
packagesd = {package['name']:package for package in packages}
# TODO: could add channel to the pin...
requirements_run = ["%s ==%s"%(MatchSpec(d).name,packagesd[MatchSpec(d).name]['version']) for d in deps]
else:
requirements_run = _pin(deps)
with open("conda.recipe/%s/_pyctdev_recipe_clobber.yaml"%recipe,'w') as f:
f.write(yaml.dump(
{
'requirements':{
'run': requirements_run
}
},default_flow_style=False))
# TODO: this should be requested by flag! like for pip
def thing0(channel):
buildreqs = get_buildreqs()
if len(buildreqs)>0:
deps = " ".join('"%s"'%_join_the_club(dep) for dep in buildreqs)
return "conda install -y %s %s"%(" ".join(['-c %s'%c for c in channel]),deps)
else:
return 'echo "no build reqs"'
def thing(channel,pin_deps_as_env,recipe,no_pin_deps):
cmd = "conda build %s conda.recipe/%s"%(" ".join(['-c %s'%c for c in channel]),
"%(recipe)s")
if pin_deps_as_env != '' or no_pin_deps is False:
cmd += " --clobber-file conda.recipe/%s/_pyctdev_recipe_clobber.yaml"%recipe
return cmd
def thing2(channel,pkg_tests,test_python,test_group,test_requires,recipe,pin_deps_as_env,no_pin_deps):
cmds = []
if pkg_tests:
# TODO: should test groups just be applied across others rather than product?
# It's about test isolation vs speed of running tests...
for (p,g,r,w) in test_matrix(test_python,test_group,test_requires,['pkg']):
cmds.append(
thing(channel,pin_deps_as_env,recipe,no_pin_deps)+" -t --append-file conda.recipe/%s/recipe_append--%s-%s-%s-%s.yaml"%("%(recipe)s",p,g,r,w)
)
cmds.append("conda build purge") # remove test/work intermediates (see same comment below)
# hack again for returning variable number of commands...
return " && ".join(cmds)
def create_recipe_append(recipe,test_python,test_group,test_requires,pkg_tests):
if not pkg_tests:
return
if yaml is None:
raise ValueError("Install pyyaml or equivalent; see extras_require['ecosystem_conda'].")
for (p,g,r,w) in test_matrix(test_python,test_group,test_requires,['pkg']):
environment = get_env(p,g,r,w)
deps = [_join_the_club(d) for d in get_tox_deps(environment)]
cmds = get_tox_cmds(environment)
py = get_tox_python(environment)
# deps and cmds are appended
#
# TODO: will overwrite recipe_append--... if someone
# already happens to use it...
#
# would maybe like to do something more like conda build
# conda.recipe -t existing_pkg --extra-command ... --extra-deps ...
with open("conda.recipe/%s/recipe_append--%s-%s-%s-%s.yaml"%(recipe,p,g,r,w),'w') as f:
f.write(yaml.dump(
{
'test':{
'requires':['python =%s'%py]+deps,
'commands':cmds,
# still undecided about which config files to use
'source_files': ['tox.ini']
}},default_flow_style=False))
def remove_recipe_append_and_clobber(recipe,pkg_tests,test_python,test_group,test_requires):
try:
p = os.path.join("conda.recipe",recipe,"_pyctdev_recipe_clobber.yaml")
os.remove(p)
except:
pass
if not pkg_tests:
return
for (p,g,r,w) in test_matrix(test_python,test_group,test_requires,['pkg']):
try:
p = os.path.join("conda.recipe",recipe,"recipe_append--%s-%s-%s-%s.yaml"%(p,g,r,w))
os.remove(p)
except:
pass
return {'actions': [
# 0. install build requirements (conda build doesn't support pyproject.toml/PEP518
CmdAction(thing0),
create_base_recipe,
create_recipe_clobber,
# first build the package...
CmdAction(thing),
"conda build purge", # remove test/work intermediates (disk space on travis...but could potentially annoy someone as it'll remove other test/work intermediates too...)
# then test it...
# (if test commands overlap what's in recipe, will be some
# repetition...they ran above, and they will run again...)
create_recipe_append,
CmdAction(thing2),
],
'teardown': [remove_recipe_append_and_clobber],
'params': [_channel_param, recipe_param, test_python, test_group, test_requires, pkg_tests, pin_deps_as_env,no_pin_deps,package_name, force]}
def task_package_upload():
"""Upload package built from conda.recipe/ (or specified alternative)."""
# TODO: need to upload only if package doesn't exist (as
# e.g. there are cron builds)
def thing(label):
# TODO: fix backticks hack/windows
return 'anaconda --token %(token)s upload --user %(user)s ' + ' '.join(['--label %s'%l for l in label]) + ' `conda build --output conda.recipe/%(recipe)s`'
label_param = {
'name':'label',
'long':'label',
'short':'l',
'type':list,
'default':[]}
# should be required, when I figure out params
token_param = {
'name':'token',
'long':'token',
'type':str,
'default':''}
# should be required, when I figure out params
user_param = {
'name':'user',
'long':'user',
'type':str,
'default':'pyviz'}
return {'actions': [CmdAction(thing)],
'params': [label_param,token_param,recipe_param,user_param]}
########## TESTING ##########
# TODO
########## DOCS ##########
# TODO
########## FOR DEVELOPERS ##########
# TODO: not sure this task buys much (but allows to call create_env
# even if env already exists, for updating).
def task_env_create():
"""Create named environment if it doesn't already exist
Environment will include pyctdev.
"""
python = {
'name':'python',
'long':'python',
'type':str,
'default':'3.6'}
# TODO: improve messages about missing deps
try:
from conda.cli.python_api import Commands, run_command # noqa: hack
uptodate = _env_exists
except:
uptodate = False
def _morex(channel):
return CONDA_ROOT_EXE + " create -y %s"%(" ".join(['-c %s'%c for c in channel])) + " --name %(name)s python=%(python)s"
def _morexx():
# when installing selfi nto environment, get from appropriate channel
# (doing this is a hack anyway/depends how env stacking ends up going)
from . import __version__
from setuptools._vendor.packaging.version import Version
selfchan = "pyviz"
if Version(__version__).is_prerelease:
selfchan+="/label/dev"
if "PYCTDEV_SELF_CHANNEL" in os.environ:
selfchan=os.environ["PYCTDEV_SELF_CHANNEL"]
if selfchan!="":
selfchan = " -c " + selfchan
return CONDA_ROOT_EXE + " install -y --name %(name)s " + selfchan + " pyctdev"
return {
'params': [python,name,_channel_param],
'uptodate': [uptodate],
# TODO: Wouldn't need to check for env if conda create --force
# would overwrite/update existing env.
# TODO: note: pyctdev when testing itself will use previous pyctdev
# but not yet testing this command...
'actions': [CmdAction(_morex),CmdAction(_morexx)]}
# TODO: this is another doit param hack :(
# need to file issue. meanwhile probably decorate uptodate fns
def _env_exists(task,values):
name = None
if task.options is not None:
name = task.options['name']
else:
for p in task.params:
if p['name']=='name':
name = p['default']
if name is None:
return False
else:
from conda.cli.python_api import Commands, run_command
return name in [os.path.basename(e) for e in json.loads(run_command(Commands.INFO,"--json")[0])['envs']]
# TODO: doit - how to share parameters with dependencies? Lots of
# awkwardness here to work around that...
# conda installs are independent tasks for speed (so conda gets all
# deps to think about at once)
# TODO: should be one command with --options param
def task_develop_install():
"""python develop install, with specified optional groups of dependencies (installed by conda only).
Typically ``conda install "test dependencies" && pip install -e . --no-deps``.
Pass --options multiple times to specify other optional groups
(see project's setup.py for available options).
E.g.
``doit develop_install -o examples -o tests``
``doit develop_install -o all``
"""
return {'actions': [
CmdAction(_conda_build_deps),
CmdAction(_conda_install_with_options_hacked),
python_develop],
'params': [_options_param,_channel_param,no_pin_deps,_all_extras_param]}
def task_env_dependency_graph():
"""Write out dependency graph of named environment."""
def _x(env_name):
##### find the environment
# (todo copied from earlier in file!)
import collections
from conda.cli.python_api import Commands, run_command
env_names = [(os.path.basename(e),e) for e in json.loads(run_command(Commands.INFO,"--json")[0])['envs']]
counts = collections.Counter([x[0] for x in env_names])
assert counts[env_name]==1 # would need more than name to be sure...
prefix = dict(env_names)[env_name]
###### build graph from packages' metadata
nodes = set()
edges = set()
for pkgmetafile in glob.glob(os.path.join(prefix,'conda-meta','*.json')):
pkgmeta = json.load(open(pkgmetafile))
pkgname = pkgmeta['name']
nodes.add(pkgname)
for d in pkgmeta.get('depends', []):
edges.add( (pkgname, MatchSpec(d).name) )
###### write out the graph
try:
import graphviz
except ImportError:
graphviz = None
if graphviz:
G = graphviz.Digraph(filename=env_name,format='svg') # can open in browser, can search text
for n in nodes:
G.node(n)
for e in edges:
G.edge(*e)
G.render()
print("wrote %s.svg"%env_name)
else:
# should replace this made up format with something else
with open(env_name+".txt",'w') as f:
f.write("***** packages *****\n")
for n in nodes:
f.write("%s\n"%n)
f.write("\n***** dependencies *****\n")
for e in edges:
f.write("%s -> %s\n"%e)
print("wrote %s.txt (install graphviz for svg)"%env_name)
return {'actions': [_x,], 'params':[env_name,]}
|
the-stack_0_19456
|
from typing import (
List,
Optional,
Tuple,
)
import base64
from collections import deque
import pathlib
from IPython import display as ipydisplay
import numpy as np
from PIL import Image
import torch
from vendor.atari_wrappers import make_atari, wrap_deepmind
from utils_types import (
GymImg,
GymObs,
TensorObs,
TensorStack4,
TensorStack5,
TorchDevice,
)
from utils_drl import Agent
# HTML_TEMPLATE is a template element for displaying an mp4 video
HTML_TEMPLATE = """<video alt="{alt}" autoplay loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{data}" type="video/mp4" />
</video>"""
class MyEnv(object):
def __init__(self, device: TorchDevice) -> None:
env_raw = make_atari('BreakoutNoFrameskip-v4')
self.__env_train = wrap_deepmind(env_raw, episode_life=True)
env_raw = make_atari('BreakoutNoFrameskip-v4')
self.__env_eval = wrap_deepmind(env_raw, episode_life=True)
self.__env = self.__env_train
self.__device = device
def reset(
self,
render: bool = False,
) -> Tuple[List[TensorObs], float, List[GymImg]]:
"""reset resets and initializes the underlying gym environment."""
self.__env.reset()
init_reward = 0.
observations = []
frames = []
for _ in range(5): # no-op
obs, reward, done = self.step(0)
observations.append(obs)
init_reward += reward
if done:
return self.reset(render)
if render:
frames.append(self.get_frame())
return observations, init_reward, frames
def step(self, action: int) -> Tuple[TensorObs, int, bool]:
"""step forwards an action to the environment and returns the newest
observation, the reward, and an bool value indicating whether the
episode is terminated."""
action = action + 1 if not action == 0 else 0
obs, reward, done, _ = self.__env.step(action)
return self.to_tensor(obs), reward, done
def get_frame(self) -> GymImg:
"""get_frame renders the current game frame."""
return Image.fromarray(self.__env.render(mode="rgb_array"))
@staticmethod
def to_tensor(obs: GymObs) -> TensorObs:
"""to_tensor converts an observation to a torch tensor."""
return torch.from_numpy(obs).view(1, 84, 84)
@staticmethod
def get_action_dim() -> int:
"""get_action_dim returns the reduced number of actions."""
return 3
@staticmethod
def get_action_meanings() -> List[str]:
"""get_action_meanings returns the actual meanings of the reduced
actions."""
return ["NOOP", "RIGHT", "LEFT"]
@staticmethod
def get_eval_lives() -> int:
"""get_eval_lives returns the number of lives to consume in an
evaluation round."""
return 5
@staticmethod
def make_state(obs_queue: deque) -> TensorStack4:
"""make_state makes up a state given an obs queue."""
return torch.cat(list(obs_queue)[1:]).unsqueeze(0)
@staticmethod
def make_folded_state(obs_queue: deque) -> TensorStack5:
"""make_folded_state makes up an n_state given an obs queue."""
return torch.cat(list(obs_queue)).unsqueeze(0)
@staticmethod
def show_video(path_to_mp4: str) -> None:
"""show_video creates an HTML element to display the given mp4 video in
IPython."""
mp4 = pathlib.Path(path_to_mp4)
video_b64 = base64.b64encode(mp4.read_bytes())
html = HTML_TEMPLATE.format(alt=mp4, data=video_b64.decode("ascii"))
ipydisplay.display(ipydisplay.HTML(data=html))
def evaluate(
self,
obs_queue: deque,
agent: Agent,
num_episode: int = 3,
render: bool = False,
) -> Tuple[
float,
List[GymImg],
]:
"""evaluate uses the given agent to run the game for a few episodes and
returns the average reward and the captured frames."""
self.__env = self.__env_eval
ep_rewards = []
frames = []
for _ in range(self.get_eval_lives() * num_episode):
observations, ep_reward, _frames = self.reset(render=render)
for obs in observations:
obs_queue.append(obs)
if render:
frames.extend(_frames)
done = False
while not done:
state = self.make_state(obs_queue).to(self.__device).float()
action = agent.run(state)
obs, reward, done = self.step(action)
ep_reward += reward
obs_queue.append(obs)
if render:
frames.append(self.get_frame())
ep_rewards.append(ep_reward)
self.__env = self.__env_train
return np.sum(ep_rewards) / num_episode, frames
|
the-stack_0_19458
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import os
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..attack import Attack
DEFAULT_EPS_DICT_BY_NORM = {'Linf': .3, 'L2': 1., 'L1': 5.0}
class Square(Attack):
r"""
Square Attack in the paper 'Square Attack: a query-efficient black-box adversarial attack via random search'
[https://arxiv.org/abs/1912.00049]
[https://github.com/fra31/auto-attack]
Distance Measure : Linf, L2
Arguments:
model (nn.Module): model to attack.
norm (str): Lp-norm of the attack. ('Linf', 'L2' supported, DEFAULT: 'Linf')
eps (float): maximum perturbation. (DEFALUT: None)
n_queries (int): max number of queries (each restart). (DEFALUT: 5000)
n_restarts (int): number of random restarts. (DEFALUT: 1)
p_init (float): parameter to control size of squares. (DEFALUT: 0.8)
loss (str): loss function optimized ('margin', 'ce' supported, DEFALUT: 'margin')
resc_schedule (bool): adapt schedule of p to n_queries (DEFAULT: True)
seed (int): random seed for the starting point. (DEFAULT: 0)
verbose (bool): print progress. (DEFAULT: False)
targeted (bool): targeted. (DEFAULT: False)
Shape:
- images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].
- labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \leq y_i \leq` `number of labels`.
- output: :math:`(N, C, H, W)`.
Examples::
>>> attack = torchattacks.Square(model, model, norm='Linf', n_queries=5000, n_restarts=1, eps=None, p_init=.8, seed=0, verbose=False, targeted=False, loss='margin', resc_schedule=True)
>>> adv_images = attack(images, labels)
"""
def __init__(self, model, norm='Linf', eps=None, n_queries=5000, n_restarts=1,
p_init=.8, loss='margin', resc_schedule=True,
seed=0, verbose=False, targeted=False):
super(Square, self).__init__("Square", model)
self.norm = norm
self.n_queries = n_queries
self.eps = eps
self.p_init = p_init
self.n_restarts = n_restarts
self.seed = seed
self.verbose = verbose
self.targeted = targeted
self.loss = loss
self.rescale_schedule = resc_schedule
self._attack_mode = 'only_default'
def forward(self, images, labels):
r"""
Overridden.
"""
images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
adv_images = self.perturb(images, labels)
return adv_images
def margin_and_loss(self, x, y):
"""
:param y: correct labels if untargeted else target labels
"""
logits = self.model(x)
xent = F.cross_entropy(logits, y, reduction='none')
u = torch.arange(x.shape[0])
y_corr = logits[u, y].clone()
logits[u, y] = -float('inf')
y_others = logits.max(dim=-1)[0]
if not self.targeted:
if self.loss == 'ce':
return y_corr - y_others, -1. * xent
elif self.loss == 'margin':
return y_corr - y_others, y_corr - y_others
else:
return y_others - y_corr, xent
def init_hyperparam(self, x):
assert self.norm in ['Linf', 'L2']
assert not self.eps is None
assert self.loss in ['ce', 'margin']
if self.device is None:
self.device = x.device
self.orig_dim = list(x.shape[1:])
self.ndims = len(self.orig_dim)
if self.seed is None:
self.seed = time.time()
def random_target_classes(self, y_pred, n_classes):
y = torch.zeros_like(y_pred)
for counter in range(y_pred.shape[0]):
l = list(range(n_classes))
l.remove(y_pred[counter])
t = self.random_int(0, len(l))
y[counter] = l[t]
return y.long().to(self.device)
def check_shape(self, x):
return x if len(x.shape) == (self.ndims + 1) else x.unsqueeze(0)
def random_choice(self, shape):
t = 2 * torch.rand(shape).to(self.device) - 1
return torch.sign(t)
def random_int(self, low=0, high=1, shape=[1]):
t = low + (high - low) * torch.rand(shape).to(self.device)
return t.long()
def normalize(self, x):
if self.norm == 'Linf':
t = x.abs().view(x.shape[0], -1).max(1)[0]
return x / (t.view(-1, *([1] * self.ndims)) + 1e-12)
elif self.norm == 'L2':
t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
return x / (t.view(-1, *([1] * self.ndims)) + 1e-12)
def lp_norm(self, x):
if self.norm == 'L2':
t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
return t.view(-1, *([1] * self.ndims))
def eta_rectangles(self, x, y):
delta = torch.zeros([x, y]).to(self.device)
x_c, y_c = x // 2 + 1, y // 2 + 1
counter2 = [x_c - 1, y_c - 1]
for counter in range(0, max(x_c, y_c)):
delta[max(counter2[0], 0):min(counter2[0] + (2*counter + 1), x),
max(0, counter2[1]):min(counter2[1] + (2*counter + 1), y)
] += 1.0/(torch.Tensor([counter + 1]).view(1, 1).to(
self.device) ** 2)
counter2[0] -= 1
counter2[1] -= 1
delta /= (delta ** 2).sum(dim=(0,1), keepdim=True).sqrt()
return delta
def eta(self, s):
delta = torch.zeros([s, s]).to(self.device)
delta[:s // 2] = self.eta_rectangles(s // 2, s)
delta[s // 2:] = -1. * self.eta_rectangles(s - s // 2, s)
delta /= (delta ** 2).sum(dim=(0, 1), keepdim=True).sqrt()
if torch.rand([1]) > 0.5:
delta = delta.permute([1, 0])
return delta
def p_selection(self, it):
""" schedule to decrease the parameter p """
if self.rescale_schedule:
it = int(it / self.n_queries * 10000)
if 10 < it <= 50:
p = self.p_init / 2
elif 50 < it <= 200:
p = self.p_init / 4
elif 200 < it <= 500:
p = self.p_init / 8
elif 500 < it <= 1000:
p = self.p_init / 16
elif 1000 < it <= 2000:
p = self.p_init / 32
elif 2000 < it <= 4000:
p = self.p_init / 64
elif 4000 < it <= 6000:
p = self.p_init / 128
elif 6000 < it <= 8000:
p = self.p_init / 256
elif 8000 < it:
p = self.p_init / 512
else:
p = self.p_init
return p
def attack_single_run(self, x, y):
with torch.no_grad():
adv = x.clone()
c, h, w = x.shape[1:]
n_features = c * h * w
n_ex_total = x.shape[0]
if self.norm == 'Linf':
x_best = torch.clamp(x + self.eps * self.random_choice(
[x.shape[0], c, 1, w]), 0., 1.)
margin_min, loss_min = self.margin_and_loss(x_best, y)
n_queries = torch.ones(x.shape[0]).to(self.device)
s_init = int(math.sqrt(self.p_init * n_features / c))
for i_iter in range(self.n_queries):
idx_to_fool = (margin_min > 0.0).nonzero().squeeze()
x_curr = self.check_shape(x[idx_to_fool])
x_best_curr = self.check_shape(x_best[idx_to_fool])
y_curr = y[idx_to_fool]
if len(y_curr.shape) == 0:
y_curr = y_curr.unsqueeze(0)
margin_min_curr = margin_min[idx_to_fool]
loss_min_curr = loss_min[idx_to_fool]
p = self.p_selection(i_iter)
s = max(int(round(math.sqrt(p * n_features / c))), 1)
vh = self.random_int(0, h - s)
vw = self.random_int(0, w - s)
new_deltas = torch.zeros([c, h, w]).to(self.device)
new_deltas[:, vh:vh + s, vw:vw + s
] = 2. * self.eps * self.random_choice([c, 1, 1])
x_new = x_best_curr + new_deltas
x_new = torch.min(torch.max(x_new, x_curr - self.eps),
x_curr + self.eps)
x_new = torch.clamp(x_new, 0., 1.)
x_new = self.check_shape(x_new)
margin, loss = self.margin_and_loss(x_new, y_curr)
# update loss if new loss is better
idx_improved = (loss < loss_min_curr).float()
loss_min[idx_to_fool] = idx_improved * loss + (
1. - idx_improved) * loss_min_curr
# update margin and x_best if new loss is better
# or misclassification
idx_miscl = (margin <= 0.).float()
idx_improved = torch.max(idx_improved, idx_miscl)
margin_min[idx_to_fool] = idx_improved * margin + (
1. - idx_improved) * margin_min_curr
idx_improved = idx_improved.reshape([-1,
*[1]*len(x.shape[:-1])])
x_best[idx_to_fool] = idx_improved * x_new + (
1. - idx_improved) * x_best_curr
n_queries[idx_to_fool] += 1.
ind_succ = (margin_min <= 0.).nonzero().squeeze()
if self.verbose and ind_succ.numel() != 0:
print('{}'.format(i_iter + 1),
'- success rate={}/{} ({:.2%})'.format(
ind_succ.numel(), n_ex_total,
float(ind_succ.numel()) / n_ex_total),
'- avg # queries={:.1f}'.format(
n_queries[ind_succ].mean().item()),
'- med # queries={:.1f}'.format(
n_queries[ind_succ].median().item()),
'- loss={:.3f}'.format(loss_min.mean()))
if ind_succ.numel() == n_ex_total:
break
elif self.norm == 'L2':
delta_init = torch.zeros_like(x)
s = h // 5
sp_init = (h - s * 5) // 2
vh = sp_init + 0
for _ in range(h // s):
vw = sp_init + 0
for _ in range(w // s):
delta_init[:, :, vh:vh + s, vw:vw + s] += self.eta(
s).view(1, 1, s, s) * self.random_choice(
[x.shape[0], c, 1, 1])
vw += s
vh += s
x_best = torch.clamp(x + self.normalize(delta_init
) * self.eps, 0., 1.)
margin_min, loss_min = self.margin_and_loss(x_best, y)
n_queries = torch.ones(x.shape[0]).to(self.device)
s_init = int(math.sqrt(self.p_init * n_features / c))
for i_iter in range(self.n_queries):
idx_to_fool = (margin_min > 0.0).nonzero().squeeze()
x_curr = self.check_shape(x[idx_to_fool])
x_best_curr = self.check_shape(x_best[idx_to_fool])
y_curr = y[idx_to_fool]
if len(y_curr.shape) == 0:
y_curr = y_curr.unsqueeze(0)
margin_min_curr = margin_min[idx_to_fool]
loss_min_curr = loss_min[idx_to_fool]
delta_curr = x_best_curr - x_curr
p = self.p_selection(i_iter)
s = max(int(round(math.sqrt(p * n_features / c))), 3)
if s % 2 == 0:
s += 1
vh = self.random_int(0, h - s)
vw = self.random_int(0, w - s)
new_deltas_mask = torch.zeros_like(x_curr)
new_deltas_mask[:, :, vh:vh + s, vw:vw + s] = 1.0
norms_window_1 = (delta_curr[:, :, vh:vh + s, vw:vw + s
] ** 2).sum(dim=(-2, -1), keepdim=True).sqrt()
vh2 = self.random_int(0, h - s)
vw2 = self.random_int(0, w - s)
new_deltas_mask_2 = torch.zeros_like(x_curr)
new_deltas_mask_2[:, :, vh2:vh2 + s, vw2:vw2 + s] = 1.
norms_image = self.lp_norm(x_best_curr - x_curr)
mask_image = torch.max(new_deltas_mask, new_deltas_mask_2)
norms_windows = self.lp_norm(delta_curr * mask_image)
new_deltas = torch.ones([x_curr.shape[0], c, s, s]
).to(self.device)
new_deltas *= (self.eta(s).view(1, 1, s, s) *
self.random_choice([x_curr.shape[0], c, 1, 1]))
old_deltas = delta_curr[:, :, vh:vh + s, vw:vw + s] / (
1e-12 + norms_window_1)
new_deltas += old_deltas
new_deltas = new_deltas / (1e-12 + (new_deltas ** 2).sum(
dim=(-2, -1), keepdim=True).sqrt()) * (torch.max(
(self.eps * torch.ones_like(new_deltas)) ** 2 -
norms_image ** 2, torch.zeros_like(new_deltas)) /
c + norms_windows ** 2).sqrt()
delta_curr[:, :, vh2:vh2 + s, vw2:vw2 + s] = 0.
delta_curr[:, :, vh:vh + s, vw:vw + s] = new_deltas + 0
x_new = torch.clamp(x_curr + self.normalize(delta_curr
) * self.eps, 0. ,1.)
x_new = self.check_shape(x_new)
norms_image = self.lp_norm(x_new - x_curr)
margin, loss = self.margin_and_loss(x_new, y_curr)
# update loss if new loss is better
idx_improved = (loss < loss_min_curr).float()
loss_min[idx_to_fool] = idx_improved * loss + (
1. - idx_improved) * loss_min_curr
# update margin and x_best if new loss is better
# or misclassification
idx_miscl = (margin <= 0.).float()
idx_improved = torch.max(idx_improved, idx_miscl)
margin_min[idx_to_fool] = idx_improved * margin + (
1. - idx_improved) * margin_min_curr
idx_improved = idx_improved.reshape([-1,
*[1]*len(x.shape[:-1])])
x_best[idx_to_fool] = idx_improved * x_new + (
1. - idx_improved) * x_best_curr
n_queries[idx_to_fool] += 1.
ind_succ = (margin_min <= 0.).nonzero().squeeze()
if self.verbose and ind_succ.numel() != 0:
print('{}'.format(i_iter + 1),
'- success rate={}/{} ({:.2%})'.format(
ind_succ.numel(), n_ex_total, float(
ind_succ.numel()) / n_ex_total),
'- avg # queries={:.1f}'.format(
n_queries[ind_succ].mean().item()),
'- med # queries={:.1f}'.format(
n_queries[ind_succ].median().item()),
'- loss={:.3f}'.format(loss_min.mean()))
assert (x_new != x_new).sum() == 0
assert (x_best != x_best).sum() == 0
if ind_succ.numel() == n_ex_total:
break
return n_queries, x_best
def perturb(self, x, y=None):
"""
:param x: clean images
:param y: untargeted attack -> clean labels,
if None we use the predicted labels
targeted attack -> target labels, if None random classes,
different from the predicted ones, are sampled
"""
self.init_hyperparam(x)
adv = x.clone()
if y is None:
if not self.targeted:
with torch.no_grad():
output = self.model(x)
y_pred = output.max(1)[1]
y = y_pred.detach().clone().long().to(self.device)
else:
with torch.no_grad():
output = self.model(x)
n_classes = output.shape[-1]
y_pred = output.max(1)[1]
y = self.random_target_classes(y_pred, n_classes)
else:
y = y.detach().clone().long().to(self.device)
if not self.targeted:
acc = self.model(x).max(1)[1] == y
else:
acc = self.model(x).max(1)[1] != y
startt = time.time()
torch.random.manual_seed(self.seed)
torch.cuda.random.manual_seed(self.seed)
for counter in range(self.n_restarts):
ind_to_fool = acc.nonzero().squeeze()
if len(ind_to_fool.shape) == 0:
ind_to_fool = ind_to_fool.unsqueeze(0)
if ind_to_fool.numel() != 0:
x_to_fool = x[ind_to_fool].clone()
y_to_fool = y[ind_to_fool].clone()
_, adv_curr = self.attack_single_run(x_to_fool, y_to_fool)
output_curr = self.model(adv_curr)
if not self.targeted:
acc_curr = output_curr.max(1)[1] == y_to_fool
else:
acc_curr = output_curr.max(1)[1] != y_to_fool
ind_curr = (acc_curr == 0).nonzero().squeeze()
acc[ind_to_fool[ind_curr]] = 0
adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()
if self.verbose:
print('restart {} - robust accuracy: {:.2%}'.format(
counter, acc.float().mean()),
'- cum. time: {:.1f} s'.format(
time.time() - startt))
return adv
|
the-stack_0_19460
|
""" Userbot module containing hash and encode/decode commands. """
from subprocess import PIPE
from subprocess import run as runapp
import pybase64
from userbot import CMD_HELP
from userbot.events import register, errors_handler
@register(outgoing=True, pattern="^.hash (.*)")
@errors_handler
async def gethash(hash_q):
""" For .hash command, find the md5, sha1, sha256, sha512 of the string. """
hashtxt_ = hash_q.pattern_match.group(1)
hashtxt = open("hashdis.txt", "w+")
hashtxt.write(hashtxt_)
hashtxt.close()
md5 = runapp(["md5sum", "hashdis.txt"], stdout=PIPE)
md5 = md5.stdout.decode()
sha1 = runapp(["sha1sum", "hashdis.txt"], stdout=PIPE)
sha1 = sha1.stdout.decode()
sha256 = runapp(["sha256sum", "hashdis.txt"], stdout=PIPE)
sha256 = sha256.stdout.decode()
sha512 = runapp(["sha512sum", "hashdis.txt"], stdout=PIPE)
runapp(["rm", "hashdis.txt"], stdout=PIPE)
sha512 = sha512.stdout.decode()
ans = ("Text: `" + hashtxt_ + "`\nMD5: `" + md5 + "`SHA1: `" + sha1 +
"`SHA256: `" + sha256 + "`SHA512: `" + sha512[:-1] + "`")
if len(ans) > 4096:
hashfile = open("hashes.txt", "w+")
hashfile.write(ans)
hashfile.close()
await hash_q.client.send_file(
hash_q.chat_id,
"hashes.txt",
reply_to=hash_q.id,
caption="`It's too big, sending a text file instead. `")
runapp(["rm", "hashes.txt"], stdout=PIPE)
else:
await hash_q.reply(ans)
@register(outgoing=True, pattern="^.hbase (en|de) (.*)")
@errors_handler
async def endecrypt(query):
""" For .base64 command, find the base64 encoding of the given string. """
if query.pattern_match.group(1) == "en":
lething = str(
pybase64.b64encode(bytes(query.pattern_match.group(2),
"utf-8")))[2:]
await query.reply("Shhh! It's Encoded: `" + lething[:-1] + "`")
else:
lething = str(
pybase64.b64decode(bytes(query.pattern_match.group(2), "utf-8"),
validate=True))[2:]
await query.reply("Decoded: `" + lething[:-1] + "`")
CMD_HELP.update({"base64": "Find the base64 encoding of the given string"})
CMD_HELP.update({
"hash":
"Find the md5, sha1, sha256, sha512 of the string when written into a txt file."
})
|
the-stack_0_19462
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import hsv_to_rgb
data = np.genfromtxt(open("colours.txt","rb"),delimiter="\t")
hue = data[:,0]
count = data[:,1]
hsv = np.dstack((hue/180.0,np.ones(180),np.ones(180)))
rgb = hsv_to_rgb(hsv)
colours=['#%02x%02x%02x' % tuple(c*255) for c in rgb[0]]
plt.bar(hue,count,color=colours,edgecolor=colours)
plt.xlim(0,180)
plt.xlabel('Hue')
plt.ylabel('Total Pixel Count')
plt.show()
|
the-stack_0_19463
|
import typing
import inspect
try:
from pydantic import create_model
from pydantic import BaseModel
except ImportError:
def create_model(*args, **kwargs): # type: ignore
raise NotImplementedError()
BaseModel = None # type: ignore
Callable = typing.TypeVar("Callable", bound=typing.Callable)
def set_type_model(func: Callable) -> Callable:
"""
try generate request body model from type hint and default value
"""
sig = inspect.signature(func)
field_definitions = {}
for name, parameter in sig.parameters.items():
if (
parameter.annotation == parameter.empty
and parameter.default == parameter.empty
):
# raise ValueError(
# f"You must specify the type for the parameter {func.__name__}:{name}."
# )
return func # Maybe the type hint should be mandatory? I'm not sure.
if parameter.annotation == parameter.empty:
field_definitions[name] = parameter.default
elif parameter.default == parameter.empty:
field_definitions[name] = (parameter.annotation, ...)
else:
field_definitions[name] = (parameter.annotation, parameter.default)
if field_definitions:
body_model = create_model(func.__name__, **field_definitions)
setattr(func, "__body_model__", body_model)
return func
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<title>OpenAPI power by rpc.py</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
* {
font-family: Menlo, Consolas, "Source Code Pro", Inconsolata, Monaco, "Courier New",
'Segoe UI', Helvetica, Arial, sans-serif !important;
}
h1,
h2 {
font-family: 'Segoe UI', Helvetica, Arial, sans-serif !important;
}
body {
margin: 0;
padding: 0;
}
</style>
</head>
<body>
<redoc spec-url='get-openapi-docs'></redoc>
<script src="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js"> </script>
</body>
</html>
"""
|
the-stack_0_19464
|
from abc import ABC
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError
from pathlib import Path
from typing import Optional, List
PathList = Optional[List[Path]]
class PathCommand(Command, ABC):
def ensure_path_list(self, option):
val = getattr(self, option)
if val is None:
return
if not isinstance(val, list) or not all(isinstance(o, Path) for o in val):
self.ensure_string_list(option)
val = [Path(s) for s in getattr(self, option)]
not_exist = [p for p in val if not p.exists()]
if any(not_exist):
raise DistutilsOptionError(f"Paths {', '.join(str(o.absolute()) for o in not_exist)} don't exist.")
setattr(self, option, val)
def ensure_dir_list(self, option):
self.ensure_path_list(option)
val = getattr(self, option)
if val is None:
return
not_dir = [p for p in val if not p.is_dir()]
if any(not_dir):
raise DistutilsOptionError(f"Paths {', '.join(str(o.absolute()) for o in not_dir)} are not directories.")
|
the-stack_0_19465
|
""" pretrain a word2vec on the corpus"""
import argparse
import json
import logging
import os
from os.path import join, exists
from time import time
from datetime import timedelta
from cytoolz import concatv
import gensim
from utils import count_data
try:
DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
class Sentences(object):
""" needed for gensim word2vec training"""
def __init__(self):
self._path = join(DATA_DIR, 'train')
self._n_data = count_data(self._path)
def __iter__(self):
for i in range(self._n_data):
with open(join(self._path, '{}.json'.format(i))) as f:
data = json.loads(f.read())
for s in concatv(data['article'], data['abstract']):
yield ['<s>'] + s.lower().split() + [r'<\s>']
def main(args):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
start = time()
save_dir = args.path
if not exists(save_dir):
os.makedirs(save_dir)
sentences = Sentences()
model = gensim.models.Word2Vec(
size=args.dim, min_count=5, workers=16, sg=1)
model.build_vocab(sentences)
print('vocab built in {}'.format(timedelta(seconds=time()-start)))
model.train(sentences,
total_examples=model.corpus_count, epochs=model.iter)
model.save(join(save_dir, 'word2vec.{}d.{}k.bin'.format(
args.dim, len(model.wv.vocab)//1000)))
model.wv.save_word2vec_format(join(
save_dir,
'word2vec.{}d.{}k.w2v'.format(args.dim, len(model.wv.vocab)//1000)
))
print('word2vec trained in {}'.format(timedelta(seconds=time()-start)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='train word2vec embedding used for model initialization'
)
parser.add_argument('--path', required=True, help='root of the model')
parser.add_argument('--dim', action='store', type=int, default=128)
args = parser.parse_args()
main(args)
|
the-stack_0_19466
|
from typing import Dict, List, Optional, Sequence, Any, Union
from functools import partial
import numpy as np
import logging
log = logging.getLogger(__name__)
import time
import matplotlib.pyplot as plt
import numpy as np
import zhinst.utils
import qcodes as qc
from qcodes.instrument.base import Instrument
import qcodes.utils.validators as vals
from qcodes.instrument.parameter import ParameterWithSetpoints, Parameter
from qcodes.dataset.measurements import Measurement, res_type, DataSaver
from qcodes.instrument.specialized_parameters import ElapsedTimeParameter
class HF2LI(Instrument):
"""Qcodes driver for Zurich Instruments HF2LI lockin amplifier.
This driver is meant to emulate a single-channel lockin amplifier,
so one instance has a single demodulator, a single sigout channel,
and multiple auxout channels (for X, Y, R, Theta, or an arbitrary manual value).
Multiple instances can be run simultaneously as independent lockin amplifiers.
This instrument has a great deal of additional functionality that is
not currently supported by this driver.
Args:
name: Name of instrument.
device: Device name, e.g. "dev204", used to create zhinst API session.
demod: Index of the demodulator to use.
sigout: Index of the sigout channel to use as excitation source.
auxouts: Dict of the form {output: index},
where output is a key of HF2LI.OUTPUT_MAPPING, for example {"X": 0, "Y": 3}
to use the instrument as a lockin amplifier in X-Y mode with auxout channels 0 and 3.
num_sigout_mixer_channels: Number of mixer channels to enable on the sigouts. Default: 1.
"""
OUTPUT_MAPPING = {-1: 'manual', 0: 'X', 1: 'Y', 2: 'R', 3: 'Theta'}
def __init__(self, name: str, device: str, demod: int, sigout: int,
auxouts: Dict[str, int], num_sigout_mixer_channels: int=1, **kwargs) -> None:
super().__init__(name, **kwargs)
instr = zhinst.utils.create_api_session(device, 1, required_devtype='HF2LI') #initializes the instrument
self.daq, self.dev_id, self.props = instr
self.demod = demod
self.sigout = sigout
self.auxouts = auxouts
log.info(f'Successfully connected to {name}.')
for ch in self.auxouts:
self.add_parameter(
name=ch,
label=f'Scaled {ch} output value',
unit='V',
get_cmd=lambda channel=ch: self._get_output_value(channel),
get_parser=float,
docstring=f'Scaled and demodulated {ch} value.'
)
self.add_parameter(
name=f'gain_{ch}',
label=f'{ch} output gain',
unit='V/Vrms',
get_cmd=lambda channel=ch: self._get_gain(channel),
get_parser=float,
set_cmd=lambda gain, channel=ch: self._set_gain(gain, channel),
vals=vals.Numbers(),
docstring=f'Gain factor for {ch}.'
)
self.add_parameter(
name=f'offset_{ch}',
label=f'{ch} output offset',
unit='V',
get_cmd=lambda channel=ch: self._get_offset(channel),
get_parser=float,
set_cmd=lambda offset, channel=ch: self._set_offset(offset, channel),
vals=vals.Numbers(-2560, 2560),
docstring=f'Manual offset for {ch}, applied after scaling.'
)
self.add_parameter(
name=f'output_{ch}',
label=f'{ch} outptut select',
get_cmd=lambda channel=ch: self._get_output_select(channel),
get_parser=str
)
# Making output select only gettable, since we are
# explicitly mapping auxouts to X, Y, R, Theta, etc.
self._set_output_select(ch)
self.add_parameter(
name='phase',
label='Phase',
unit='deg',
get_cmd=self._get_phase,
get_parser=float,
set_cmd=self._set_phase,
vals=vals.Numbers(-180,180)
)
self.add_parameter(
name='time_constant',
label='Time constant',
unit='s',
get_cmd=self._get_time_constant,
get_parser=float,
set_cmd=self._set_time_constant,
vals=vals.Numbers()
)
self.add_parameter(
name='frequency',
label='Frequency',
unit='Hz',
get_cmd=self._get_frequency,
set_cmd=self._set_frequency,
get_parser=float
)
self.add_parameter(
name='sigout_range',
label='Signal output range',
unit='V',
get_cmd=self._get_sigout_range,
get_parser=float,
set_cmd=self._set_sigout_range,
vals=vals.Enum(0.01, 0.1, 1, 10)
)
self.add_parameter(
name='sigout_offset',
label='Signal output offset',
unit='V',
get_cmd=self._get_sigout_offset,
get_parser=float,
set_cmd=self._set_sigout_offset,
vals=vals.Numbers(-1, 1),
docstring='Multiply by sigout_range to get actual offset voltage.'
)
single_values = (('x', 'Demodulated x', 'V'),
('y', 'Demodulated y', 'V') )
for name, unit, label in single_values :
self.add_parameter( f'demod{self.demod}_{name}',
unit=unit,
label=label,
get_cmd = partial( self._single_get, name )
)
self.add_parameter(
name=f'demod{self.demod}_theta',
label=f'Demodulated theta'+ str(self.demod),
unit='deg',
get_cmd= self._get_theta,
get_parser=float
)
#### Parameters for sweeping
sweeper = self.daq.sweep()
sweeper.set("device", self.dev_id)
### different ones?!
sweeper.set("gridnode", f"oscs/{self.sigout}/freq") ### Set the sweeping parameter
self.sweeper = sweeper
# do an initial trigger for snapshot
sweeper_params = ( ( 'samplecount', '', 'Points' ),
( 'start', 'Hz', 'Start frequency' ),
('stop', 'Hz', 'Stop frequency' ),
('xmapping', '', 'X scale as log or linear'),
('bandwidthoverlap', '', 'Bandwidth Overlap') )
for namex, unit, label in sweeper_params :
self.add_parameter( f'sweeper_{namex}',
unit=unit,
label=label,
set_cmd = partial( self.sweeper.set, namex ),
get_cmd = partial( self._sweeper_get, namex )
)
self.add_parameter( 'trace_frequency',
unit='Hz',
label= 'Frequency',
snapshot_value=False,
get_cmd= lambda : self.sweeper_samples['frequency'],
vals=vals.Arrays(shape=(self.sweeper_samplecount,))
)
self.add_parameter( 'averaging',
unit='npts',
label= 'Averaging',
set_cmd = partial( self.sweeper.set, 'averaging/sample' ),
get_cmd = partial( self._sweeper_get, 'averaging/sample' )
)
self.auto_trigger = False
for p, units in ( ('r', 'dB'), ('x','dB'), ('y','dB'),('phase', 'rad') ) :
self.add_parameter( f'trace_{p}',
unit= units,
label= p,
parameter_class = ParameterWithSetpoints,
setpoints = ( self.trace_frequency,),
get_cmd= partial(self._get_sweep_param, p ),
vals=vals.Arrays(shape=(self.sweeper_samplecount,))
)
for i in range(6, num_sigout_mixer_channels):
self.add_parameter(
name=f'sigout_enable{i}',
label=f'Signal output mixer {i} enable',
get_cmd=lambda mixer_channel=i: self._get_sigout_enable(mixer_channel),
get_parser=float,
set_cmd=lambda amp, mixer_channel=i: self._set_sigout_enable(mixer_channel, amp),
vals=vals.Enum(0,1,2,3),
docstring="""\
0: Channel off (unconditionally)
1: Channel on (unconditionally)
2: Channel off (will be turned off on next change of sign from negative to positive)
3: Channel on (will be turned on on next change of sign from negative to positive)
"""
)
self.add_parameter(
name=f'sigout_amplitude{i}',
label=f'Signal output mixer {i} amplitude',
unit='Gain',
get_cmd=lambda mixer_channel=i: self._get_sigout_amplitude(mixer_channel),
get_parser=float,
set_cmd=lambda amp, mixer_channel=i: self._set_sigout_amplitude(mixer_channel, amp),
vals=vals.Numbers(-1, 1),
docstring='Multiply by sigout_range to get actual output voltage.'
)
def _sweeper_get( self, name ) :
""" wrap zi sweeper.get
"""
return self.sweeper.get( name )[name][0]
def _single_get(self, name):
path = f'/{self.dev_id}/demods/{self.demod}/sample/'
return self.daq.getSample(path)[name][0]
def _get_sweep_param(self, param, fr=True):
if self.auto_trigger :
self.trigger_sweep()
if param is 'phase' :
values = self.sweeper_samples[param]
else :
# detect which node we are sweeping with
amplitude = self._get_sigout_amplitude(self.sigout+6) / ( 2 * np.sqrt(2) ) # normalization factor for vpp 2x fudge
values = 20 * np.log10( self.sweeper_samples[param]/amplitude )
return values
def _get_theta(self):
path = f'/{self.dev_id}/demods/{self.demod}/sample/'
theta = np.arctan(self.daq.getSample(path)['y']/self.daq.getSample(path)['x'])*180/np.pi
return theta
def trigger_sweep(self):
sweeper = self.daq.sweep()
#sweeper = self.sweeper
sweeper.set('scan', 0) ### Sequenctial sweep
sweeper.set("bandwidthcontrol", 2) ### Bandwidth control: Auto
sweeper.set('maxbandwidth', 100) ### Max demodulation bandwidth
path = f"/{self.dev_id}/demods/{self.demod}/sample"
sweeper.set("start", self.sweeper_start())
sweeper.set("stop", self.sweeper_stop())
sweeper.set("samplecount", self.sweeper_samplecount())
#sweeper.set()
sweeper.subscribe(path)
sweeper.execute()
### Wait until measurement is done
start_t = time.time()
timeout = 60 # [s]
while not sweeper.finished(): # Wait until the sweep is complete, with timeout.
time.sleep(1)
progress = sweeper.progress()
if (time.time() - start_t) > timeout:
print("\nSweep still not finished, forcing finish...")
sweeper.finish()
print("")
data = sweeper.read(True)
self.sweeper_samples = data[path][0][0]
sweeper.unsubscribe(path) ### Unsubscribe from the signal path
def _get_data(self, poll_length=0.1) -> float:
path = f'/{self.dev_id}/demods/{self.demod}/sample'
self.daq.unsubscribe("*")
poll_timeout = 500 # [ms]
poll_flags = 0
poll_return_flat_dict = True
self.daq.sync()
self.daq.subscribe(path)
data = self.daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict)
self.daq.unsubscribe("*")
return data
def readout(self):
path = f'/{self.dev_id}/demods/{self.demod}/sample'
data = self._get_data()
sample = data[path]
X = sample['x']
Y = sample['y']
clockbase = float(self.daq.getInt(f'/{self.dev_id}/clockbase'))
t = (sample['timestamp'] - sample['timestamp'][0]) / clockbase
return (X, Y, t)
def _get_phase(self) -> float:
path = f'/{self.dev_id}/demods/{self.demod}/phaseshift/'
return self.daq.getDouble(path)
def _set_phase(self, phase: float) -> None:
path = f'/{self.dev_id}/demods/{self.demod}/phaseshift/'
self.daq.setDouble(path, phase)
def _get_gain(self, channel: str) -> float:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/scale/'
return self.daq.getDouble(path)
def _set_gain(self, gain: float, channel: str) -> None:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/scale/'
self.daq.setDouble(path, gain)
def _get_offset(self, channel: str) -> float:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/offset/'
return self.daq.getDouble(path)
def _set_offset(self, offset: float, channel: str) -> None:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/offset/'
self.daq.setDouble(path, offset)
def _get_output_value(self, channel: str) -> float:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/value/'
return self.daq.getDouble(path)
def _get_output_select(self, channel: str) -> str:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/outputselect/'
idx = self.daq.getInt(path)
return self.OUTPUT_MAPPING[idx]
def _set_output_select(self, channel: str) -> None:
path = f'/{self.dev_id}/auxouts/{self.auxouts[channel]}/outputselect/'
keys = list(self.OUTPUT_MAPPING.keys())
idx = keys[list(self.OUTPUT_MAPPING.values()).index(channel)]
self.daq.setInt(path, idx)
def _get_time_constant(self) -> float:
path = f'/{self.dev_id}/demods/{self.demod}/timeconstant/'
return self.daq.getDouble(path)
def _set_time_constant(self, tc: float) -> None:
path = f'/{self.dev_id}/demods/{self.demod}/timeconstant/'
self.daq.setDouble(path, tc)
def _get_sigout_range(self) -> float:
path = f'/{self.dev_id}/sigouts/{self.sigout}/range/'
return self.daq.getDouble(path)
def _set_sigout_range(self, rng: float) -> None:
path = f'/{self.dev_id}/sigouts/{self.sigout}/range/'
self.daq.setDouble(path, rng)
def _get_sigout_offset(self) -> float:
path = f'/{self.dev_id}/sigouts/{self.sigout}/offset/'
range = self._get_sigout_range()
return self.daq.getDouble(path)*range
def _set_sigout_offset(self, offset: float) -> None:
path = f'/{self.dev_id}/sigouts/{self.sigout}/offset/'
range = self._get_sigout_range()
return self.daq.setDouble(path, offset/range)
def _get_sigout_amplitude(self, mixer_channel: int) -> float:
path = f'/{self.dev_id}/sigouts/{self.sigout}/amplitudes/{mixer_channel}/'
range = self._get_sigout_range()
return self.daq.getDouble(path)*range
def _set_sigout_amplitude(self, mixer_channel: int, amp: float) -> None:
path = f'/{self.dev_id}/sigouts/{self.sigout}/amplitudes/{mixer_channel}/'
range = self._get_sigout_range()
return self.daq.setDouble(path, amp/range)
def _get_sigout_enable(self, mixer_channel: int) -> int:
path = f'/{self.dev_id}/sigouts/{self.sigout}/enables/{mixer_channel}/'
return self.daq.getInt(path)
def _set_sigout_enable(self, mixer_channel: int, val: int) -> None:
path = f'/{self.dev_id}/sigouts/{self.sigout}/enables/{mixer_channel}/'
self.daq.setInt(path, val)
def _get_frequency(self) -> float:
path = f'/{self.dev_id}/demods/{self.demod}/freq/'
return self.daq.getDouble(path)
def _set_frequency(self, freq) -> float:
osc_index = 0
return self.daq.set([["/%s/oscs/%d/freq" % (self.dev_id, osc_index), freq]])
def sample(self) -> dict:
path = f'/{self.dev_id}/demods/{self.demod}/sample/'
return self.daq.getSample(path)
"""
def trigger_sweep(self):
sweeper = self.daq.sweep()
# sweeper.set("device", self.dev_id)
# ### different ones?!
# sweeper.set("gridnode", f"oscs/{0}/freq") ### Set the sweeping parameter
sweeper = self.sweeper
# sweeper.set("start", self.sweeper_start() )
# sweeper.set("stop", self.sweeper_stop() )
# sweeper.set('samplecount', self.sweeper_samplecount() )
# sweeper.set("xmapping", 1) ### Logarithmic sweep
sweeper.set('scan', 0) ### Sequenctial sweep
sweeper.set("bandwidthcontrol", 2) ### Bandwidth control: Auto
sweeper.set('maxbandwidth', 100) ### Max demodulation bandwidth
#sweeper.set('bandwidthoverlap', 0) ### No bandwidth overlap
# ### Number of averaging for each sweep point
# sweeper.set('averaging/sample', 20)
path = f"/{self.dev_id}/demods/{self.demod}/sample"
sweeper.subscribe(path)
### Start measurement
sweeper.execute()
### Wait until measurement is done
start_t = time.time()
timeout = 60 # [s]
#print("Will measure", self.sweeper_samplecount.get_latest(), "sweep pointss...")
while not sweeper.finished(): # Wait until the sweep is complete, with timeout.
time.sleep(1)
progress = sweeper.progress()
#print(f"Sweep progress: {progress[0]:.2%}.", end="\n")
if (time.time() - start_t) > timeout:
print("\nSweep still not finished, forcing finish...")
sweeper.finish()
print("")
data = sweeper.read(True)
self.samples = data[path][0][0]
sweeper.unsubscribe(path) ### Unsubscribe from the signal path
# sweeper.finish() ### Finish the nodule
# sweeper.clear() ### Delete the module
#frequency = samples['frequency']
#amplitude = samples['r']
#phase = samples['phase']
"""
|
the-stack_0_19467
|
# Copyright 2020, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
try:
from unittest import mock
except ImportError:
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from octavia.common import constants as o_constants
from octavia.common import data_models as o_data_models
from a10_octavia.common.config_options import A10_SERVICE_GROUP_OPTS
from a10_octavia.common.data_models import VThunder
import a10_octavia.controller.worker.tasks.service_group_tasks as task
from a10_octavia.controller.worker.tasks import utils
from a10_octavia.tests.common import a10constants
from a10_octavia.tests.unit.base import BaseTaskTestCase
VTHUNDER = VThunder()
POOL = o_data_models.Pool(id=a10constants.MOCK_POOL_ID)
AXAPI_ARGS = {'service_group': utils.meta(POOL, 'service_group', {})}
class TestHandlerServiceGroupTasks(BaseTaskTestCase):
def setUp(self):
super(TestHandlerServiceGroupTasks, self).setUp()
imp.reload(task)
self.client_mock = mock.Mock()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.register_opts(A10_SERVICE_GROUP_OPTS,
group=a10constants.SERVICE_GROUP_CONF_SECTION)
def tearDown(self):
super(TestHandlerServiceGroupTasks, self).tearDown()
self.conf.reset()
def test_revert_pool_create_task(self):
mock_pool = task.PoolCreate()
mock_pool.axapi_client = self.client_mock
mock_pool.revert(POOL, VTHUNDER)
self.client_mock.slb.service_group.delete.assert_called_with(POOL.id)
def test_create_lb_algorithm_source_ip_hash_only(self):
mock_pool = task.PoolCreate()
mock_pool.axapi_client = self.client_mock
mock_pool.CONF = self.conf
pool = o_data_models.Pool(id=a10constants.MOCK_POOL_ID,
protocol=o_constants.PROTOCOL_HTTP,
lb_algorithm=o_constants.LB_ALGORITHM_SOURCE_IP)
mock_pool.execute(pool, VTHUNDER)
self.client_mock.slb.service_group.create.assert_called_with(
a10constants.MOCK_POOL_ID,
protocol=mock.ANY,
lb_method=mock_pool.axapi_client.slb.service_group.SOURCE_IP_HASH_ONLY,
service_group_templates=mock.ANY,
axapi_args=AXAPI_ARGS)
|
the-stack_0_19468
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2019 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "12/12/2017"
import unittest
import shutil
import tempfile
import numpy
import six
from silx.gui.utils.testutils import TestCaseQt
from silx.gui.utils.testutils import SignalListener
from ..TextFormatter import TextFormatter
import h5py
class TestTextFormatter(TestCaseQt):
def test_copy(self):
formatter = TextFormatter()
copy = TextFormatter(formatter=formatter)
self.assertIsNot(formatter, copy)
copy.setFloatFormat("%.3f")
self.assertEqual(formatter.integerFormat(), copy.integerFormat())
self.assertNotEqual(formatter.floatFormat(), copy.floatFormat())
self.assertEqual(formatter.useQuoteForText(), copy.useQuoteForText())
self.assertEqual(formatter.imaginaryUnit(), copy.imaginaryUnit())
def test_event(self):
listener = SignalListener()
formatter = TextFormatter()
formatter.formatChanged.connect(listener)
formatter.setFloatFormat("%.3f")
formatter.setIntegerFormat("%03i")
formatter.setUseQuoteForText(False)
formatter.setImaginaryUnit("z")
self.assertEqual(listener.callCount(), 4)
def test_int(self):
formatter = TextFormatter()
formatter.setIntegerFormat("%05i")
result = formatter.toString(512)
self.assertEqual(result, "00512")
def test_float(self):
formatter = TextFormatter()
formatter.setFloatFormat("%.3f")
result = formatter.toString(1.3)
self.assertEqual(result, "1.300")
def test_complex(self):
formatter = TextFormatter()
formatter.setFloatFormat("%.1f")
formatter.setImaginaryUnit("i")
result = formatter.toString(1.0 + 5j)
result = result.replace(" ", "")
self.assertEqual(result, "1.0+5.0i")
def test_string(self):
formatter = TextFormatter()
formatter.setIntegerFormat("%.1f")
formatter.setImaginaryUnit("z")
result = formatter.toString("toto")
self.assertEqual(result, '"toto"')
def test_numpy_void(self):
formatter = TextFormatter()
result = formatter.toString(numpy.void(b"\xFF"))
self.assertEqual(result, 'b"\\xFF"')
def test_char_cp1252(self):
# degree character in cp1252
formatter = TextFormatter()
result = formatter.toString(numpy.bytes_(b"\xB0"))
self.assertEqual(result, u'"\u00B0"')
class TestTextFormatterWithH5py(TestCaseQt):
@classmethod
def setUpClass(cls):
super(TestTextFormatterWithH5py, cls).setUpClass()
cls.tmpDirectory = tempfile.mkdtemp()
cls.h5File = h5py.File("%s/formatter.h5" % cls.tmpDirectory, mode="w")
cls.formatter = TextFormatter()
@classmethod
def tearDownClass(cls):
super(TestTextFormatterWithH5py, cls).tearDownClass()
cls.h5File.close()
cls.h5File = None
shutil.rmtree(cls.tmpDirectory)
def create_dataset(self, data, dtype=None):
testName = "%s" % self.id()
dataset = self.h5File.create_dataset(testName, data=data, dtype=dtype)
return dataset
def testAscii(self):
d = self.create_dataset(data=b"abc")
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, '"abc"')
def testUnicode(self):
d = self.create_dataset(data=u"i\u2661cookies")
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(len(result), 11)
self.assertEqual(result, u'"i\u2661cookies"')
def testBadAscii(self):
d = self.create_dataset(data=b"\xF0\x9F\x92\x94")
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, 'b"\\xF0\\x9F\\x92\\x94"')
def testVoid(self):
d = self.create_dataset(data=numpy.void(b"abc\xF0"))
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, 'b"\\x61\\x62\\x63\\xF0"')
def testEnum(self):
dtype = h5py.special_dtype(enum=('i', {"RED": 0, "GREEN": 1, "BLUE": 42}))
d = numpy.array(42, dtype=dtype)
d = self.create_dataset(data=d)
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, 'BLUE(42)')
def testRef(self):
dtype = h5py.special_dtype(ref=h5py.Reference)
d = numpy.array(self.h5File.ref, dtype=dtype)
d = self.create_dataset(data=d)
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, 'REF')
def testArrayAscii(self):
d = self.create_dataset(data=[b"abc"])
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, '["abc"]')
def testArrayUnicode(self):
dtype = h5py.special_dtype(vlen=six.text_type)
d = numpy.array([u"i\u2661cookies"], dtype=dtype)
d = self.create_dataset(data=d)
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(len(result), 13)
self.assertEqual(result, u'["i\u2661cookies"]')
def testArrayBadAscii(self):
d = self.create_dataset(data=[b"\xF0\x9F\x92\x94"])
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, '[b"\\xF0\\x9F\\x92\\x94"]')
def testArrayVoid(self):
d = self.create_dataset(data=numpy.void([b"abc\xF0"]))
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, '[b"\\x61\\x62\\x63\\xF0"]')
def testArrayEnum(self):
dtype = h5py.special_dtype(enum=('i', {"RED": 0, "GREEN": 1, "BLUE": 42}))
d = numpy.array([42, 1, 100], dtype=dtype)
d = self.create_dataset(data=d)
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, '[BLUE(42) GREEN(1) 100]')
def testArrayRef(self):
dtype = h5py.special_dtype(ref=h5py.Reference)
d = numpy.array([self.h5File.ref, None], dtype=dtype)
d = self.create_dataset(data=d)
result = self.formatter.toString(d[()], dtype=d.dtype)
self.assertEqual(result, '[REF NULL_REF]')
def suite():
loadTests = unittest.defaultTestLoader.loadTestsFromTestCase
test_suite = unittest.TestSuite()
test_suite.addTest(loadTests(TestTextFormatter))
test_suite.addTest(loadTests(TestTextFormatterWithH5py))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
the-stack_0_19469
|
"""
The read4 API is already defined for you.
@param buf4, a list of characters
@return an integer
def read4(buf4):
# Below is an example of how the read4 API can be called.
file = File("abcdefghijk") # File is "abcdefghijk", initially file pointer (fp) points to 'a'
buf4 = [' '] * 4 # Create buffer with enough space to store characters
read4(buf4) # read4 returns 4. Now buf = ['a','b','c','d'], fp points to 'e'
read4(buf4) # read4 returns 4. Now buf = ['e','f','g','h'], fp points to 'i'
read4(buf4) # read4 returns 3. Now buf = ['i','j','k',...], fp points to end of file
"""
class Solution:
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Number of characters to read (int)
:rtype: The number of actual characters read (int)
"""
idx = 0
while n > 0:
# read file to buf4
buf4 = [""]*4
l = read4(buf4)
# if no more char in file, return
if not l:
return idx
# write buf4 into buf directly
for i in range(min(l, n)):
buf[idx] = buf4[i]
idx += 1
n -= 1
return idx
# Time: O(N)
# Space:O(N)
|
the-stack_0_19470
|
from trabant import *
from trabant.gameapi import waitload
from trabant.mesh import mesh_union
from trabant.objects import *
from trabant.objgen import createcapsule
from types import MethodType
body = r'''
/XX\
<XXXXXXXXX\
´XXXXXXXXX>
'''
template_tank = None
def _add_canon(orientation,gfx,phys):
canon = createcapsule(0.4, 6, 2, 4)[0]
gfx.vertices,gfx.indices = mesh_union(gfx.vertices, gfx.indices, canon.vertices, canon.indices, quat().rotate_y(pi/2), vec3(-4,-2,0))
return orientation,gfx,phys
def init_tank_obj():
global template_tank
preprocess_tank = process_chain(orthoscale((1,1.6,6)), process_rot((0,0,-pi/2)), _add_canon)
template_tank = create_ascii_object(body, pos=vec3(1000,1000,1000), static=True, process=preprocess_tank)
def update_tank(self, vel):
vel = vel.with_z(0)
if vel.length() > 0.5:
direction = vel.normalize()
yaw = -atan2(direction.x, direction.y)
self.orientation(quat().rotate_z(yaw))
self.avel(vec3())
def create_tank_obj(pos, col, vel=None):
obj = create_clones(template_tank, [(pos,quat())])[0]
gameapi.waitload(obj.id)
obj.vel(vel)
obj.col(col)
obj.update_tank = MethodType(update_tank, obj)
return obj
def process_tank_floor(orientation, gfx, phys):
top = max(gfx.vertices, key=lambda v:v.z).z
bottom = top-30
for v in gfx.vertices:
v.z = max(v.z,bottom)
return orientation,gfx,phys
|
the-stack_0_19471
|
import time
import math
import random
import board
import adafruit_dotstar as dotstar
from os import listdir
from os.path import isfile, join, splitext
from PIL import Image, ImageOps
from datetime import datetime
panelWidth = 40
panelHeight = 10
dotCount = panelWidth * panelHeight
asciiDir = './ascii'
FILENAME = asciiDir + '/' + '48.png'
asciiDict = {}
dots = dotstar.DotStar(
board.SCK,
board.MOSI,
dotCount,
auto_write=False,
brightness=0.05
)
def loadImage(filename):
imageRaw = Image.open(filename).convert("RGB")
imageInvert = ImageOps.invert(imageRaw)
imagePixels = imageInvert.load()
imageWidth = imageInvert.size[0]
imageHeight = imageInvert.size[1]
print("%dx%d pixels" % imageInvert.size)
print("Allocating...")
imageColumns = [0 for x in range(imageWidth)]
for x in range(imageWidth):
imageColumns[x] = [[0, 0, 0, 0] for _ in range(imageHeight)]
print("Converting...")
for x in range(imageWidth): # For each column of image
for y in range(imageHeight): # For each pixel in column
value = imagePixels[x, y] # Read RGB pixel in image
imageColumns[x][y][0] = value[0] # Gamma-corrected R
imageColumns[x][y][1] = value[1] # Gamma-corrected G
imageColumns[x][y][2] = value[2] # Gamma-corrected B
imageColumns[x][y][3] = 1.0 # Brightness
#print ('X ' + str(x) + ' Y ' + str(y) + ' value: ' + str(value[0]))
return imageColumns
asciiFiles = [f for f in listdir(asciiDir) if isfile(join(asciiDir, f))]
for asciiFile in asciiFiles:
parts = splitext(asciiFile)
print('Loading ASCII #' + parts[0])
asciiDict[parts[0]] = loadImage(asciiDir + '/' + asciiFile)
# a random color 0 -> 192
def random_color():
return random.randrange(0, 7) * 32
#
# Draw a given image matrix at a given offset
#
def showImage(img, xOffset, yOffset):
for x in range(len(img)): # For each column of image
for y in range(len(img[0])): # For each pixel in column
b = 0.1
value = img[x][y] # Read RGB pixel in image
pixel = getPixel(xOffset + x,yOffset + y)
dots[pixel] = [int(value[0] * b), int(value[1] * b), int(value[2] * b)]
return xOffset + len(img)
def showText(text, xOffset, yOffset):
x = xOffset
for char in text:
asciiNum = ord(char)
asciiImage = asciiDict[str(asciiNum)]
x = showImage(asciiImage, x, yOffset) + 1
def getPixel(col, row):
baseCount = (row) * panelWidth
# If we are an even row, count lef to right
if (row % 2) == 0:
pixel = baseCount + col
else:
pixel = baseCount + (panelWidth - col) -1
#print('col %d row %d baseCount %d pixel %d even %d' % (col, row, baseCount, pixel, row%2))
return pixel
def clearScreen():
for x in range(0, dotCount):
dots[x] = (0, 0, 0)
while True:
clearScreen()
now = datetime.now()
currentTime = now.strftime("%-I:%M")
showText(currentTime, 0, 0)
dots.show()
time.sleep(1.0)
|
the-stack_0_19474
|
import streamlit as st
#import sim_pseudo_code as spc
#import Team_Class as tc
import pandas as pd
import plotly.graph_objects as go
import numpy as np
#add in seed
@st.cache
def load_data():
win_probs = pd.read_csv('dash_data4.csv')
return win_probs
df = load_data()
data = df.reindex(columns=['sim_prob','rf_model','nn_model','elo_pred','seed_pred','Pred'])
df_t_seed = df.reindex(columns=['Team_Name_1','Team_Name_2','Seed1','Seed2'])
df_t_seed['Team_Name_1'] = df_t_seed.apply(lambda x: str(x.Team_Name_1) + ' ('+str(x.Seed1)+')', axis=1)
df_t_seed['Team_Name_2'] = df_t_seed.apply(lambda x: str(x.Team_Name_2) + ' ('+str(x.Seed2)+')', axis=1)
teams = df_t_seed.reindex(columns= ['Team_Name_1','Team_Name_2'])
data_inverse = 1- data.copy()
data['Team1'] = teams.Team_Name_1
data['Team2'] = teams.Team_Name_2
data_inverse['Team1'] = teams.Team_Name_2
data_inverse['Team2'] = teams.Team_Name_1
df_fin = pd.concat([data,data_inverse])
st.title('Win Probability By Model 2021')
t1 = st.selectbox('Team 1:', df_fin.Team1.unique())
t2 = st.selectbox('Team 2:', df_fin.Team2.unique())
d_out = df_fin[(df_fin['Team1']== t1) & (df_fin['Team2'] == t2)].round(2)
values = [d_out.iloc[0]['Pred'],d_out.iloc[0]['elo_pred'], d_out.iloc[0]['seed_pred'], d_out.iloc[0]['sim_prob'], d_out.iloc[0]['rf_model'], d_out.iloc[0]['nn_model']]
values2 = [1-d_out.iloc[0]['Pred'],1-d_out.iloc[0]['elo_pred'],1- d_out.iloc[0]['seed_pred'],1- d_out.iloc[0]['sim_prob'], 1-d_out.iloc[0]['rf_model'], 1-d_out.iloc[0]['nn_model']]
#st.dataframe(d_out)
def create_fig(d_out,values):
fig = go.Figure()
fig.add_trace(go.Bar(
y=['AGGREGATE', 'elo', 'seed','sim','rf_model','nn_model'],
#x=[d_out['aggregate'][0],d_out['elo_pred'][0], d_out['seed_pred'][0], d_out['sim_prob'][0], d_out['rf_model'][0], d_out['nn_model'][0]],
x =values,
name=d_out.iloc[0]['Team1'],
orientation='h',
text=values,
textposition='auto',
marker=dict(
color='rgba(246, 78, 139, 0.6)',
line=dict(color='rgba(246, 78, 139, 1.0)', width=3)
)
))
fig.add_trace(go.Bar(
y=['AGGREGATE', 'elo', 'seed','sim','rf_model','nn_model'],
x=values2,
name=d_out.iloc[0]['Team2'],
orientation='h',
marker=dict(
color='rgba(58, 71, 80, 0.6)',
line=dict(color='rgba(58, 71, 80, 1.0)', width=3)
)
))
fig.update_layout(barmode='stack')
return fig
f_out = create_fig(d_out,values)
st.plotly_chart(f_out)
#st.dataframe(d_out)
|
the-stack_0_19475
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Abstract classes used by pyglet.font implementations.
These classes should not be constructed directly. Instead, use the functions
in `pyglet.font` to obtain platform-specific instances. You can use these
classes as a documented interface to the concrete classes.
"""
import unicodedata
from pyglet.gl import *
from pyglet import image
_other_grapheme_extend = list(map(chr, [0x09be, 0x09d7, 0x0be3, 0x0b57, 0x0bbe, 0x0bd7, 0x0cc2,
0x0cd5, 0x0cd6, 0x0d3e, 0x0d57, 0x0dcf, 0x0ddf, 0x200c,
0x200d, 0xff9e, 0xff9f])) # skip codepoints above U+10000
_logical_order_exception = list(map(chr, list(range(0xe40, 0xe45)) + list(range(0xec0, 0xec4))))
_grapheme_extend = lambda c, cc: cc in ('Me', 'Mn') or c in _other_grapheme_extend
_CR = u'\u000d'
_LF = u'\u000a'
_control = lambda c, cc: cc in ('ZI', 'Zp', 'Cc', 'Cf') and not \
c in list(map(chr, [0x000d, 0x000a, 0x200c, 0x200d]))
_extend = lambda c, cc: _grapheme_extend(c, cc) or \
c in list(map(chr, [0xe30, 0xe32, 0xe33, 0xe45, 0xeb0, 0xeb2, 0xeb3]))
_prepend = lambda c, cc: c in _logical_order_exception
_spacing_mark = lambda c, cc: cc == 'Mc' and c not in _other_grapheme_extend
def _grapheme_break(left, right):
# GB1
if left is None:
return True
# GB2 not required, see end of get_grapheme_clusters
# GB3
if left == _CR and right == _LF:
return False
left_cc = unicodedata.category(left)
# GB4
if _control(left, left_cc):
return True
right_cc = unicodedata.category(right)
# GB5
if _control(right, right_cc):
return True
# GB6, GB7, GB8 not implemented
# GB9
if _extend(right, right_cc):
return False
# GB9a
if _spacing_mark(right, right_cc):
return False
# GB9b
if _prepend(left, left_cc):
return False
# GB10
return True
def get_grapheme_clusters(text):
"""Implements Table 2 of UAX #29: Grapheme Cluster Boundaries.
Does not currently implement Hangul syllable rules.
:Parameters:
`text` : unicode
String to cluster.
.. versionadded:: 1.1.2
:rtype: List of `unicode`
:return: List of Unicode grapheme clusters
"""
clusters = []
cluster = ''
left = None
for right in text:
if cluster and _grapheme_break(left, right):
clusters.append(cluster)
cluster = ''
elif cluster:
# Add a zero-width space to keep len(clusters) == len(text)
clusters.append(u'\u200b')
cluster += right
left = right
# GB2
if cluster:
clusters.append(cluster)
return clusters
class Glyph(image.TextureRegion):
"""A single glyph located within a larger texture.
Glyphs are drawn most efficiently using the higher level APIs, for example
`GlyphString`.
:Ivariables:
`advance` : int
The horizontal advance of this glyph, in pixels.
`vertices` : (int, int, int, int)
The vertices of this glyph, with (0,0) originating at the
left-side bearing at the baseline.
"""
baseline = 0
lsb = 0
advance = 0
vertices = (0, 0, 0, 0)
def set_bearings(self, baseline, left_side_bearing, advance, x_offset=0, y_offset=0):
"""Set metrics for this glyph.
:Parameters:
`baseline` : int
Distance from the bottom of the glyph to its baseline;
typically negative.
`left_side_bearing` : int
Distance to add to the left edge of the glyph.
`advance` : int
Distance to move the horizontal advance to the next glyph.
`offset_x` : int
Distance to move the glyph horizontally from it's default position.
`offset_y` : int
Distance to move the glyph vertically from it's default position.
"""
self.baseline = baseline
self.lsb = left_side_bearing
self.advance = advance
self.vertices = (
left_side_bearing + x_offset,
-baseline + y_offset,
left_side_bearing + self.width + x_offset,
-baseline + self.height + y_offset)
def draw(self):
"""Debug method.
Use the higher level APIs for performance and kerning.
"""
glBindTexture(GL_TEXTURE_2D, self.owner.id)
glBegin(GL_QUADS)
self.draw_quad_vertices()
glEnd()
def draw_quad_vertices(self):
"""Debug method.
Use the higher level APIs for performance and kerning.
"""
glTexCoord3f(*self.tex_coords[:3])
glVertex2f(self.vertices[0], self.vertices[1])
glTexCoord3f(*self.tex_coords[3:6])
glVertex2f(self.vertices[2], self.vertices[1])
glTexCoord3f(*self.tex_coords[6:9])
glVertex2f(self.vertices[2], self.vertices[3])
glTexCoord3f(*self.tex_coords[9:12])
glVertex2f(self.vertices[0], self.vertices[3])
def get_kerning_pair(self, right_glyph):
"""Not implemented.
"""
return 0
class GlyphTextureAtlas(image.Texture):
"""A texture within which glyphs can be drawn.
"""
region_class = Glyph
x = 0
y = 0
line_height = 0
def apply_blend_state(self):
"""Set the OpenGL blend state for the glyphs in this texture.
"""
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
def fit(self, image):
"""Place `image` within this texture.
:Parameters:
`image` : `pyglet.image.AbstractImage`
Image to place within the texture.
:rtype: `Glyph`
:return: The glyph representing the image from this texture, or None
if the image doesn't fit.
"""
if image.width > self.width or image.height > self.height:
return None
if self.x + image.width > self.width:
self.x = 0
self.y += self.line_height + 1
self.line_height = 0
if self.y + image.height > self.height:
return None
self.line_height = max(self.line_height, image.height)
region = self.get_region(
self.x, self.y, image.width, image.height)
if image.width > 0:
region.blit_into(image, 0, 0, 0)
self.x += image.width + 1
return region
class GlyphRenderer:
"""Abstract class for creating glyph images.
"""
def __init__(self, font):
pass
def render(self, text):
raise NotImplementedError('Subclass must override')
class FontException(Exception):
"""Generic exception related to errors from the font module. Typically
these relate to invalid font data."""
pass
class Font:
"""Abstract font class able to produce glyphs.
To construct a font, use :py:func:`pyglet.font.load`, which will instantiate the
platform-specific font class.
Internally, this class is used by the platform classes to manage the set
of textures into which glyphs are written.
:Ivariables:
`ascent` : int
Maximum ascent above the baseline, in pixels.
`descent` : int
Maximum descent below the baseline, in pixels. Usually negative.
"""
texture_width = 256
texture_height = 256
texture_internalformat = GL_ALPHA
texture_min_filter = GL_LINEAR
texture_mag_filter = GL_LINEAR
# These should also be set by subclass when known
ascent = 0
descent = 0
glyph_renderer_class = GlyphRenderer
texture_class = GlyphTextureAtlas
def __init__(self):
self.textures = []
self.glyphs = {}
@property
def name(self):
"""Return the Family Name of the font as a string."""
raise NotImplementedError
@classmethod
def add_font_data(cls, data):
"""Add font data to the font loader.
This is a class method and affects all fonts loaded. Data must be
some byte string of data, for example, the contents of a TrueType font
file. Subclasses can override this method to add the font data into
the font registry.
There is no way to instantiate a font given the data directly, you
must use :py:func:`pyglet.font.load` specifying the font name.
"""
pass
@classmethod
def have_font(cls, name):
"""Determine if a font with the given name is installed.
:Parameters:
`name` : str
Name of a font to search for
:rtype: bool
"""
return True
def create_glyph(self, image):
"""Create a glyph using the given image.
This is used internally by `Font` subclasses to add glyph data
to the font. Glyphs are packed within large textures maintained by
`Font`. This method inserts the image into a font texture and returns
a glyph reference; it is up to the subclass to add metadata to the
glyph.
Applications should not use this method directly.
:Parameters:
`image` : `pyglet.image.AbstractImage`
The image to write to the font texture.
:rtype: `Glyph`
"""
glyph = None
self._adapt_texture_size(image)
for texture in self.textures:
glyph = texture.fit(image)
if glyph:
break
if not glyph:
texture = self.texture_class.create_for_size(GL_TEXTURE_2D,
self.texture_width,
self.texture_height,
self.texture_internalformat,
self.texture_min_filter,
self.texture_mag_filter)
self.textures.insert(0, texture)
glyph = texture.fit(image)
return glyph
def _adapt_texture_size(self, image):
if image.width > self.texture_width or image.height > self.texture_height:
largest_dimension = max(image.width, image.height)
self.texture_height = self.texture_width = largest_dimension * 4
def get_glyphs(self, text):
"""Create and return a list of Glyphs for `text`.
If any characters do not have a known glyph representation in this
font, a substitution will be made.
:Parameters:
`text` : str or unicode
Text to render.
:rtype: list of `Glyph`
"""
glyph_renderer = None
glyphs = [] # glyphs that are committed.
for c in get_grapheme_clusters(str(text)):
# Get the glyph for 'c'. Hide tabs (Windows and Linux render
# boxes)
if c == '\t':
c = ' '
if c not in self.glyphs:
if not glyph_renderer:
glyph_renderer = self.glyph_renderer_class(self)
self.glyphs[c] = glyph_renderer.render(c)
glyphs.append(self.glyphs[c])
return glyphs
def get_glyphs_for_width(self, text, width):
"""Return a list of glyphs for `text` that fit within the given width.
If the entire text is larger than 'width', as much as possible will be
used while breaking after a space or zero-width space character. If a
newline is encountered in text, only text up to that newline will be
used. If no break opportunities (newlines or spaces) occur within
`width`, the text up to the first break opportunity will be used (this
will exceed `width`). If there are no break opportunities, the entire
text will be used.
You can assume that each character of the text is represented by
exactly one glyph; so the amount of text "used up" can be determined
by examining the length of the returned glyph list.
:Parameters:
`text` : str or unicode
Text to render.
`width` : int
Maximum width of returned glyphs.
:rtype: list of `Glyph`
:see: `GlyphString`
"""
glyph_renderer = None
glyph_buffer = [] # next glyphs to be added, as soon as a BP is found
glyphs = [] # glyphs that are committed.
for c in text:
if c == '\n':
glyphs += glyph_buffer
break
# Get the glyph for 'c'
if c not in self.glyphs:
if not glyph_renderer:
glyph_renderer = self.glyph_renderer_class(self)
self.glyphs[c] = glyph_renderer.render(c)
glyph = self.glyphs[c]
# Add to holding buffer and measure
glyph_buffer.append(glyph)
width -= glyph.advance
# If over width and have some committed glyphs, finish.
if width <= 0 and len(glyphs) > 0:
break
# If a valid breakpoint, commit holding buffer
if c in u'\u0020\u200b':
glyphs += glyph_buffer
glyph_buffer = []
# If nothing was committed, commit everything (no breakpoints found).
if len(glyphs) == 0:
glyphs = glyph_buffer
return glyphs
|
the-stack_0_19476
|
"""
Facebook Prophet
----------------
"""
from typing import Optional, Union, List
import re
import logging
import prophet
import numpy as np
import pandas as pd
from darts.timeseries import TimeSeries
from darts.models.forecasting.forecasting_model import DualCovariatesForecastingModel
from darts.logging import get_logger, execute_and_suppress_output, raise_if
logger = get_logger(__name__)
logger.level = logging.WARNING # set to warning to suppress prophet logs
class Prophet(DualCovariatesForecastingModel):
def __init__(
self,
add_seasonalities: Optional[Union[dict, List[dict]]] = None,
country_holidays: Optional[str] = None,
**prophet_kwargs,
):
"""Facebook Prophet
This class provides a basic wrapper around `Facebook Prophet <https://github.com/facebook/prophet>`_.
It supports adding country holidays as well as custom seasonalities and adds support for stochastic
forecasting and future covariates.
Parameters
----------
add_seasonalities
Optionally, a dict or list of dicts with custom seasonality/ies to add to the model.
Each dict takes the following mandatory and optional data:
.. highlight:: python
.. code-block:: python
dict({
'name': str # (name of the seasonality component),
'seasonal_periods': int # (nr of steps composing a season),
'fourier_order': int # (number of Fourier components to use),
'prior_scale': Optional[float] # (a prior scale for this component),
'mode': Optional[str] # ('additive' or 'multiplicative')
})
..
An example for `seasonal_periods`: If you have hourly data (frequency='H') and your seasonal cycle repeats
after 48 hours then set `seasonal_periods=48`.
Apart from `seasonal_periods`, this is very similar to how you would call Facebook Prophet's
`add_seasonality()` method.
Alternatively, you can add seasonalities after model creation and before fitting with
:meth:`add_seasonality() <Prophet.add_seasonality()>`.
country_holidays
An optional country code, for which holidays can be taken into account by Prophet.
See: https://github.com/dr-prodigy/python-holidays
In addition to those countries, Prophet includes holidays for these
countries: Brazil (BR), Indonesia (ID), India (IN), Malaysia (MY), Vietnam (VN),
Thailand (TH), Philippines (PH), Turkey (TU), Pakistan (PK), Bangladesh (BD),
Egypt (EG), China (CN), and Russia (RU).
prophet_kwargs
Some optional keyword arguments for Prophet.
For information about the parameters see:
`The Prophet source code <https://github.com/facebook/prophet/blob/master/python/prophet/forecaster.py>`_.
"""
super().__init__()
self._auto_seasonalities = self._extract_auto_seasonality(prophet_kwargs)
self._add_seasonalities = dict()
add_seasonality_calls = (
add_seasonalities
if isinstance(add_seasonalities, list)
else [add_seasonalities]
)
for call in add_seasonality_calls:
self._store_add_seasonality_call(seasonality_call=call)
self.country_holidays = country_holidays
self.prophet_kwargs = prophet_kwargs
self.model = None
def __str__(self):
return "Prophet"
def _fit(self, series: TimeSeries, future_covariates: Optional[TimeSeries] = None):
super()._fit(series, future_covariates)
series = self.training_series
fit_df = pd.DataFrame(
data={"ds": series.time_index, "y": series.univariate_values()}
)
self.model = prophet.Prophet(**self.prophet_kwargs)
# add user defined seasonalities (from model creation and/or pre-fit self.add_seasonalities())
interval_length = self._freq_to_days(series.freq_str)
for seasonality_name, attributes in self._add_seasonalities.items():
self.model.add_seasonality(
name=seasonality_name,
period=attributes["seasonal_periods"] * interval_length,
fourier_order=attributes["fourier_order"],
)
# add covariates
if future_covariates is not None:
fit_df = fit_df.merge(
future_covariates.pd_dataframe(),
left_on="ds",
right_index=True,
how="left",
)
for covariate in future_covariates.columns:
self.model.add_regressor(covariate)
# add built-in country holidays
if self.country_holidays is not None:
self.model.add_country_holidays(self.country_holidays)
execute_and_suppress_output(self.model.fit, logger, logging.WARNING, fit_df)
return self
def _predict(
self,
n: int,
future_covariates: Optional[TimeSeries] = None,
num_samples: int = 1,
) -> TimeSeries:
super()._predict(n, future_covariates, num_samples)
predict_df = self._generate_predict_df(n=n, future_covariates=future_covariates)
if num_samples == 1:
forecast = self.model.predict(predict_df)["yhat"].values
else:
forecast = np.expand_dims(
self._stochastic_samples(predict_df, n_samples=num_samples), axis=1
)
return self._build_forecast_series(forecast)
def _generate_predict_df(
self, n: int, future_covariates: Optional[TimeSeries] = None
) -> pd.DataFrame:
"""Returns a pandas DataFrame in the format required for Prophet.predict() with `n` dates after the end of
the fitted TimeSeries"""
predict_df = pd.DataFrame(data={"ds": self._generate_new_dates(n)})
if future_covariates is not None:
predict_df = predict_df.merge(
future_covariates.pd_dataframe(),
left_on="ds",
right_index=True,
how="left",
)
return predict_df
def _is_probabilistic(self) -> bool:
return True
def _stochastic_samples(self, predict_df, n_samples) -> np.ndarray:
"""Returns stochastic forecast of `n_samples` samples.
This method is a replicate of Prophet.predict() which suspends simplification of stochastic samples to
deterministic target values."""
# save default number of uncertainty_samples and set user-defined n_samples
n_samples_default = self.model.uncertainty_samples
self.model.uncertainty_samples = n_samples
if self.model.history is None:
raise ValueError("Model has not been fit.")
if predict_df is None:
predict_df = self.model.history.copy()
else:
if predict_df.shape[0] == 0:
raise ValueError("Dataframe has no rows.")
predict_df = self.model.setup_dataframe(predict_df.copy())
predict_df["trend"] = self.model.predict_trend(predict_df)
forecast = self.model.sample_posterior_predictive(predict_df)
# reset default number of uncertainty_samples
self.model.uncertainty_samples = n_samples_default
return forecast["yhat"]
def predict_raw(
self, n: int, future_covariates: Optional[TimeSeries] = None
) -> pd.DataFrame:
"""Returns the output of the base Facebook Prophet model in form of a pandas DataFrame. Note however,
that the output of this method is not supported for further processing with the Darts API.
Methods of the base Prophet model can be accessed with self.model.method() (i.e. self.model.plot_components())
"""
super().predict(n, future_covariates, num_samples=1)
predict_df = self._generate_predict_df(n=n, future_covariates=future_covariates)
return self.model.predict(predict_df)
def add_seasonality(
self,
name: str,
seasonal_periods: int,
fourier_order: int,
prior_scale: Optional[float] = None,
mode: Optional[str] = None,
) -> None:
"""Adds a custom seasonality to the model that reapeats after every n `seasonal_periods` timesteps.
An example for `seasonal_periods`: If you have hourly data (frequency='H') and your seasonal cycle repeats
after 48 hours -> `seasonal_periods=48`.
Apart from `seasonal_periods`, this is very similar to how you would call Facebook Prophet's
`add_seasonality()` method. For information about the parameters see:
`The Prophet source code <https://github.com/facebook/prophet/blob/master/python/prophet/forecaster.py>`_.
Parameters
----------
name
name of the seasonality component
seasonal_periods
number of timesteps after which the seasonal cycle repeats
fourier_order
number of Fourier components to use
prior_scale
optionally, a prior scale for this component
mode
optionally, 'additive' or 'multiplicative'
"""
function_call = {
"name": name,
"seasonal_periods": seasonal_periods,
"fourier_order": fourier_order,
"prior_scale": prior_scale,
"mode": mode,
}
self._store_add_seasonality_call(seasonality_call=function_call)
def _store_add_seasonality_call(
self, seasonality_call: Optional[dict] = None
) -> None:
"""Checks the validity of an add_seasonality() call and stores valid calls.
As the actual model is only created at fitting time, and seasonalities are added pre-fit,
the add_seasonality calls must be stored and checked on Darts' side.
Raises
----------
ValueError
if `seasonality_call` has missing or empty mandatory keys/arguments
if `seasonality_call` with `name` already exists.
if `seasonality_call` has invalid keys/arguments
if `seasonality_call` has invalid dtypes
"""
if seasonality_call is None:
return
seasonality_properties = {
"name": {"default": None, "dtype": str},
"seasonal_periods": {"default": None, "dtype": int},
"fourier_order": {"default": None, "dtype": int},
"prior_scale": {"default": None, "dtype": float},
"mode": {"default": None, "dtype": str},
}
seasonality_default = {
kw: seasonality_properties[kw]["default"] for kw in seasonality_properties
}
mandatory_keywords = ["name", "seasonal_periods", "fourier_order"]
add_seasonality_call = dict(seasonality_default, **seasonality_call)
missing_kws = [
kw for kw in mandatory_keywords if add_seasonality_call[kw] is None
]
raise_if(
len(missing_kws) > 0,
f'Seasonality `{add_seasonality_call["name"]}` has missing mandatory keywords or empty arguments: '
f"{missing_kws}.",
logger,
)
seasonality_name = add_seasonality_call["name"]
raise_if(
seasonality_name in self._auto_seasonalities
or seasonality_name in self._add_seasonalities,
f"Adding seasonality with `name={seasonality_name}` failed. A seasonality with this name already "
f"exists.",
)
invalid_kws = [
kw for kw in add_seasonality_call if kw not in seasonality_default
]
raise_if(
len(invalid_kws) > 0,
f'Seasonality `{add_seasonality_call["name"]}` has invalid keywords: {invalid_kws}. Only the '
f"following arguments are supported: {list(seasonality_default)}",
logger,
)
invalid_types = [
kw
for kw, value in add_seasonality_call.items()
if not isinstance(value, seasonality_properties[kw]["dtype"])
and value is not None
]
raise_if(
len(invalid_types) > 0,
f'Seasonality `{add_seasonality_call["name"]}` has invalid value dtypes: {invalid_types} must be '
f'of type {[seasonality_properties[kw]["dtype"] for kw in invalid_types]}.',
logger,
)
self._add_seasonalities[seasonality_name] = add_seasonality_call
@staticmethod
def _extract_auto_seasonality(prophet_kwargs: dict) -> list:
"""Returns the automatically added seasonalities by Prophet's base model based on kwargs of model creation"""
auto_seasonalities = []
for auto_seasonality in ["daily", "weekly", "yearly"]:
s_name = auto_seasonality + "_seasonality"
if not (s_name in prophet_kwargs and not prophet_kwargs[s_name]):
auto_seasonalities.append(auto_seasonality)
return auto_seasonalities
@staticmethod
def _freq_to_days(freq: str) -> float:
"""Converts a frequency to number of days required by Facebook Prophet
Parameters
----------
freq
frequency string of the underlying TimeSeries's time index (pd.DateTimeIndex.freq_str)
"""
# this regex extracts all digits from `freq`: exp: '30S' -> 30
freq_times = re.findall(r"\d+", freq)
freq_times = 1 if not freq_times else int(freq_times[0])
# this regex extracts all characters and '-' from `freq` and then extracts left string from '-'
# exp: 'W-SUN' -> 'W', '30S' -> 'S'
freq = "".join(re.split("[^a-zA-Z-]*", freq)).split("-")[0]
seconds_per_day = 86400
if freq in ["A", "BA", "Y", "BY", "RE"] or freq.startswith(
("A", "BA", "Y", "BY", "RE")
): # year
days = 365.25
elif freq in ["Q", "BQ", "REQ"] or freq.startswith(
("Q", "BQ", "REQ")
): # quarter
days = 3 * 30.4375
elif freq in ["M", "BM", "CBM", "SM"] or freq.startswith(
("M", "BM", "BS", "CBM", "SM")
): # month
days = 30.4375
elif freq in ["W"]: # week
days = 7.0
elif freq in ["B", "C"]: # business day
days = 1 * 7 / 5
elif freq in ["D"]: # day
days = 1.0
elif freq in ["H", "BH", "CBH"]: # hour
days = 1 / 24
elif freq in ["T", "min"]: # minute
days = 1 / (24 * 60)
elif freq in ["S"]: # second
days = 1 / seconds_per_day
elif freq in ["L", "ms"]: # millisecond
days = 1 / (seconds_per_day * 10 ** 3)
elif freq in ["U", "us"]: # microsecond
days = 1 / (seconds_per_day * 10 ** 6)
elif freq in ["N"]: # nanosecond
days = 1 / (seconds_per_day * 10 ** 9)
else:
raise ValueError(
"freq {} not understood. Please report if you think this is in error.".format(
freq
)
)
return freq_times * days
def _supports_range_index(self) -> bool:
"""Prophet does not support integer range index."""
raise_if(
True,
"Prophet does not support integer range index. The index of the TimeSeries must be of type "
"pandas.DatetimeIndex",
logger,
)
return False
|
the-stack_0_19477
|
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from src.crawler.media import ettoday
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="ETtoday新聞雲_1",
link="https://www.ettoday.net/news/20210720/2035771.htm",
expected_output=NewsStruct(
title="東元股東會前夕再出招 黃育仁旗下兩公司光菱、東友明停牌",
content="\n▲菱光董座黃育仁。(圖/記者陳心怡攝)\n記者陳心怡/台北報導\n光菱 (8032) 與東友 (5438) 今 (20) 日傍晚同步公告,因有重大事項待公布,經櫃買中心同意明 (21) 日暫停交易。由於兩家公司大股東均為菱光,也是東元集團會長黃茂雄兒子黃育仁主導的公司,兩家公司是否進行換股、策略合作,引發外界關注,同時也被視為可能是黃育仁的反撲。\n東元電機(1504)股東會23日登場前,東元黃家黃茂雄、黃育仁父子之爭越演越烈。黃育仁旗下的3家公司,包括上市的菱光科技,上櫃的東友科技和光菱電子,黃茂雄先前結合創投達勝伍成立鈺叡,要收購菱光過半股權;7月初,黃茂雄主導成立的安富國際投宣布要以6.14億元,公開收購東友30%股權。\n不過黃育仁近期頻出招,更讓兩家公司明天同時停牌,外界預料黃育仁可能再出招,確保在兩家公司的掌控能力,對抗東元勢力。",
keywords=["東元", "黃育仁", "黃茂雄"],
category="財經",
media="ETtoday新聞雲",
datetime="2021-07-20T20:21:00+08:00",
link="https://www.ettoday.net/news/20210720/2035771.htm",
),
)
TEST_DATA_2 = TEST_DATA(
name="ETtoday財經雲_1",
link="https://finance.ettoday.net/news/1929335",
expected_output=NewsStruct(
title="外資上周賣超938億元 聯電、台積電淪提款機",
content="\n▲聯電、台積電上週成為外資提款機。(圖/路透社)\n記者陳心怡/台北報導\n根據證交所公布上周外資買賣超狀況,上周外資賣超938.45億元,今年以來累計賣超為1,931億元,外資總持有股票市值為22兆2,981.33億元新台幣,占全體上市股票市值的45.88%,較2月19日的23兆4,521.50億元新台幣,減少11,540.17億元。\n就個股比較部分,外資外資賣超前三名則為聯電(2303)16萬5910張,台積電(2330)8萬8182張,新光金(2888)5萬3488張。\n賣超四到十名分別為,佳格(1227)3萬4063張、長榮航(2618)3萬3858張、和碩(4938)2萬8592張、兆豐金(2886)2萬5131張、鴻海(2317)2萬4052張、精英(2331)1萬8523張、旺宏(2337)1萬8322張。\n另外,外資買超前三名分別是華航(2610)、友達(2409)、華邦電(2344),華航買超6萬7486張,友達買超5萬5519張,華邦電則是買超5萬4734張。\n買超四到十名分別為,中信金(2891)4萬1761張、陽明(2609)2萬9584張、遠東新(1402)2萬2187張、榮成(1909)1萬7634張、永豐金(2890)1萬5470張、華南金(2880)1萬2961張、台塑(1301)1萬2933張。",
keywords=[],
category="財經",
media="ETtoday財經雲",
datetime="2021-03-02T15:52:00+08:00",
link="https://finance.ettoday.net/news/1929335",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return ettoday.ETtodayNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
|
the-stack_0_19479
|
import asyncio
import os
import sys
from functools import wraps
from pathlib import Path
from typing import Optional
import click
import tomlkit
from ptpython.repl import embed
from tortoise import Tortoise
from tortoise_cli import __version__, utils
def coro(f):
@wraps(f)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(f(*args, **kwargs))
finally:
if f.__name__ != "cli":
loop.run_until_complete(Tortoise.close_connections())
return wrapper
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option(__version__, "-V", "--version")
@click.option(
"-c",
"--config",
help="TortoiseORM config dictionary path, like settings.TORTOISE_ORM",
)
@click.pass_context
@coro
async def cli(ctx: click.Context, config: Optional[str]):
if not config:
config = os.getenv("TORTOISE_ORM")
if not config:
file = "pyproject.toml"
if Path(file).exists():
with open(file, "r") as f:
content = f.read()
doc = tomlkit.parse(content)
config = doc.get("tool", {}).get("aerich", {}).get("tortoise_orm") # type:ignore
if not config:
raise click.UsageError(
"You must specify TORTOISE_ORM in option or env, or config file pyproject.toml from config of aerich",
ctx=ctx,
)
tortoise_config = utils.get_tortoise_config(ctx, config)
await Tortoise.init(config=tortoise_config)
await Tortoise.generate_schemas(safe=True)
@cli.command(help="Start a interactive shell.")
@click.pass_context
@coro
async def shell(ctx: click.Context):
try:
await embed(
globals=globals(),
title="Tortoise Shell",
vi_mode=True,
return_asyncio_coroutine=True,
patch_stdout=True,
)
except (EOFError, ValueError):
pass
def main():
sys.path.insert(0, ".")
cli()
if __name__ == "__main__":
main()
|
the-stack_0_19480
|
# coding=utf-8
import pygame.font
class Button():
def __init__(self, ai_settings, screen, msg):
"""初始化按钮的属性"""
self.screen = screen
self.screen_rect = screen.get_rect()
# 设置按钮的尺寸和其他属性
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# 创建按钮的rect对象,并使其居中
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# 按钮的标签只需创建一次
self.prep_msg(msg)
def prep_msg(self, msg):
"""将msg渲染为图像, 并使其在按钮上居中"""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# 绘制一个用颜色填充的按钮,再绘制文本
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
|
the-stack_0_19481
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
from aws_cdk import (
core,
aws_lambda as _lambda,
aws_logs as logs,
aws_iam as iam,
aws_iot as iot
)
class IoTStack(core.Construct):
def __init__(self, scope: core.Construct, id: str, project_prefix_obj, time_zone_obj, dynamoTable_obj, **kwargs):
super().__init__(scope, id, **kwargs)
prefix_project_string = str(project_prefix_obj) + "-"
time_zone_string = time_zone_obj
update_dynamo_table_lambda = _lambda.Function(self, "IoTUpdatePatientTable",
runtime=_lambda.Runtime.PYTHON_3_8,
handler="lambda_function.lambda_handler",
function_name=prefix_project_string + 'iot-update-patient-table',
environment={"TableName": dynamoTable_obj.table_name,
"time_zone": time_zone_string},
timeout=core.Duration.minutes(2),
code=_lambda.Code.asset('iot_resources/_lambda')
)
# grant permission to dynamodb
dynamoTable_obj.grant_read_write_data(update_dynamo_table_lambda)
# Creating the IoT Topic
iot_topic_sub_rule_action_property = iot.CfnTopicRule.LambdaActionProperty(
function_arn=update_dynamo_table_lambda.function_arn)
iot_topic_sub_rule_action = iot.CfnTopicRule.ActionProperty(lambda_=iot_topic_sub_rule_action_property)
iot_topic_sub_rule_payload = iot.CfnTopicRule.TopicRulePayloadProperty(actions=[iot_topic_sub_rule_action],
sql="SELECT * FROM 'esp32/pub'",
error_action=iot_topic_sub_rule_action,
rule_disabled=False,aws_iot_sql_version='2016-03-23')
iot_topic_sub = iot.CfnTopicRule(self, 'iotTopic', topic_rule_payload=iot_topic_sub_rule_payload)
|
the-stack_0_19482
|
from Utils.util import *
from Utils.errors import *
##North-West algorithm
def nw_algorithm_path_calculation(cost,toSend,toReceive):
if (isMatrix(cost)):
path = []
toReceiveCounter = 0
toSendCounter = 0
while (toSendCounter!=len(cost[0]) and toReceiveCounter!=len(cost)):
#If there is less to send than to receive, we send everything and go to the next sender while staying with the same receiver with less to receive (former receiver value minus sender value)
if (toSend[toSendCounter] < toReceive[toReceiveCounter]):
path.append({
'from':toSendCounter,
'to':toReceiveCounter,
'amount':toSend[toSendCounter]
})
toReceive[toReceiveCounter] -= toSend[toSendCounter]
toSend[toSendCounter] = 0
toSendCounter += 1
#If there is as much to send than to receive, we send everything and can change both the sender and the receiver
elif (toSend[toSendCounter] == toReceive[toReceiveCounter]):
path.append({
'from':toSendCounter,
'to':toReceiveCounter,
'amount':toSend[toSendCounter]
})
toReceive[toReceiveCounter] = 0
toSend[toSendCounter] = 0
toSendCounter += 1
toReceiveCounter += 1
#If there is more to send than to receive, we send everything and go to the next receiver while staying with the same sender with less to send (former sender value minus receiver value)
else:
path.append({
'from':toSendCounter,
'to':toReceiveCounter,
'amount':toReceive[toReceiveCounter]
})
toSend[toSendCounter] -= toReceive[toReceiveCounter]
toReceive[toReceiveCounter] = 0
toReceiveCounter += 1
return path
else:
raise InputError('NotAMatrix','The provided input is not a correct matrix')
def nw_algorithm_cost_calculation(cost,path):
#Once we have found the path, we just have to evaluate it
val = 0
for item in path:
val += cost[item['to']][item['from']]*item['amount']
return val
##Balas-Hammer Algorithm
#This method evaluates the regrets of the sent and received values that may use the formerRegrets parameter
def bh_algorithm_evaluation_of_regrets_difference(cost,toSend,toReceive):
toSendRegretsDifference = []
toReceiveRegretsDifference = []
#We, firstly, will evaluate the minimum of each row (i.e for the receiver)
for i in range(len(cost)):
# if (listOfMaxRegret != None and i in listOfMaxRegret['toReceive'] and toReceive[i] == 0):
if (toReceive[i] == 0):
toReceiveRegretsDifference.append(-1)
else:
row_copy = deepcopy(cost[i])
row_copy.sort()
if (row_copy[1] == float('inf')):
row_copy[1] = row_copy[0]
row_copy[0] = 0
toReceiveRegretsDifference.append(row_copy[1]-row_copy[0])
for j in range(len(cost[0])):
#if (listOfMaxRegret != None and j in listOfMaxRegret['toSend']):
if (toSend[j] == 0):
toSendRegretsDifference.append(-1)
else:
column = []
for i in range(len(cost)):
column.append(cost[i][j])
column.sort()
if (column[1] == float('inf')):
column[1] = column[0]
column[0] = 0
toSendRegretsDifference.append(column[1]-column[0])
return {
'toSendRegretsDifference':toSendRegretsDifference,
'toReceiveRegretsDifference':toReceiveRegretsDifference
}
#Method selecting the maximum regrets from the lists of regrets generated by the "bh_algorithm_evaluation_of_regrets_differences" function
def select_maximum_regret_from_regrets_difference(regretsDifferences):
max = 0
index = 0
isToSend = True
for sindex in range(len(regretsDifferences['toSendRegretsDifference'])):
value = regretsDifferences['toSendRegretsDifference'][sindex]
if (value > max):
index = sindex
max = value
for rindex in range(len(regretsDifferences['toReceiveRegretsDifference'])):
value = regretsDifferences['toReceiveRegretsDifference'][rindex]
if (value > max):
index = rindex
max = value
isToSend = False
#When the max is evaluated, we no longer need its value, therefore setting it to -1
# if (isToSend):
# regretsDifferences['toSendRegretsDifference'][index] = 0
# else:
# regretsDifferences['toReceiveRegretsDifference'][index] = 0
return {
'max' : max,
'isToSend' : isToSend,
'index' : index
}
#init a matrix full of zeros from the size of cost
def init_result_matrix(cost):
return np.zeros((len(cost),len(cost[0])), dtype=int)
#Method used to apply the maximum regret found on the correct column or line
def transport_using_max_regret(maxRegret,cost,toSend,toReceive,resultMatrix):
if (isMatrix(cost)):
indexMaxRegret = maxRegret['index']
#If the maximum regret was found in the toSend array
if (maxRegret['isToSend']):
min = cost[0][indexMaxRegret]
indexMin = 0
for index in range(len(cost)):
if (min > cost[index][indexMaxRegret]):
min = cost[index][indexMaxRegret]
indexMin = index
#We change the toReceive value, substracting the due merchandise
if (toReceive[indexMin] >= toSend[indexMaxRegret]):
toReceive[indexMin] -= toSend[indexMaxRegret]
#Once we have found the receiver to send the merchandise to, we establish this using the resultMatrix (same size as cost matrix, filled with zeros)
resultMatrix[indexMin][indexMaxRegret] = toSend[indexMaxRegret]
#We update toSend, setting to 0 as the merchandise has been sent
toSend[indexMaxRegret] = 0
#And we set all the value of the column of cost matrix at inf
for index in range(len(cost)):
cost[index][indexMaxRegret] = float("inf")
else:
toSend[indexMaxRegret] -= toReceive[indexMin]
resultMatrix[indexMin][indexMaxRegret] = toReceive[indexMin]
toReceive[indexMin] = 0
#And we set all the value of the column of cost matrix at inf
for index in range(len(cost[indexMin])):
cost[indexMin][index] = float("inf")
else:
min = cost[indexMaxRegret][0]
indexMin = 0
for index in range(len(cost[indexMaxRegret])):
if (min > cost[indexMaxRegret][index]):
min = cost[indexMaxRegret][index]
indexMin = index
#We change the toReceive value, substracting the due merchandise
if (toSend[indexMin] >= toReceive[indexMaxRegret]):
toSend[indexMin] -= toReceive[indexMaxRegret]
#Once we have found the receiver to send the merchandise to, we establish this using the resultMatrix (same size as cost matrix, filled with zeros)
resultMatrix[indexMaxRegret][indexMin] = toReceive[indexMaxRegret]
#We update toSend, setting to 0 as the merchandise has been sent
toReceive[indexMaxRegret] = 0
#And we set all the value of the row of cost matrix at inf
for index in range(len(cost[indexMaxRegret])):
cost[indexMaxRegret][index] = float("inf")
else:
toReceive[indexMaxRegret] -= toSend[indexMin]
resultMatrix[indexMaxRegret][indexMin] = toSend[indexMin]
toSend[indexMin] = 0
#And we set all the value of the column of cost matrix at inf
for index in range(len(cost)):
cost[index][indexMin] = float("inf")
else:
raise InputError('NotAMatrix','The provided input is not a matrix')
#This method evaluates the total cost of the transport algorithm
def cost_calculation(cost,resMatrix):
res = 0
for i in range(len(cost)):
for j in range(len(cost[0])):
res += cost[i][j]*resMatrix[i][j]
return res
def transport(cost,toSend,toReceive):
#At first, we evaluate the regrets
regrets = bh_algorithm_evaluation_of_regrets_difference(cost,toSend,toReceive)
#We initialize the result matrix
res_matrix = init_result_matrix(cost)
#We setup a deepcopy of the cost values as they will be set to infinity to block the use of the column/row formerly emptied or fullfilled
cost_copy = deepcopy(cost)
#While we have not set every formerly evaluated regrets at -1 (i.e not every merchandise has been fully sent and received), we select the one that will cost us the less amount of regret
while ((isMatrixFilledWithValue(cost_copy,float('inf')) == False)):
#We select the maximum of them all to know which column/line we will work on
max_regret = select_maximum_regret_from_regrets_difference(regrets)
#Depending on if the toSend attribute is true or not, we will evaluate if the regret we found was for a stock to be emptied or for a demand to be fullfilled and apply the correct algorithm for the situation
transport_using_max_regret(max_regret,cost_copy,toSend,toReceive, res_matrix)
regrets = bh_algorithm_evaluation_of_regrets_difference(cost_copy,toSend,toReceive)
print(cost_copy)
print("Result matrix :")
print(res_matrix)
print("Total cost : ",cost_calculation(cost,res_matrix))
return {
"resMatrix":res_matrix,
"totalCost":cost_calculation(cost,res_matrix)
}
##EXECUTION
###Test variables
test_matrix_cost = [[21,11,84,49,13],
[27,52,43,29,42],
[11,47,14,80,93],
[52,94,76,74,54]]
test_toSend_quantities = [800,439,50,790,1470]
test_toReceive_quantities = [896,
782,
943,
928]
###North-West Algorithm
path = nw_algorithm_path_calculation(deepcopy(test_matrix_cost),deepcopy(test_toSend_quantities),deepcopy(test_toReceive_quantities))
cost = nw_algorithm_cost_calculation(deepcopy(test_matrix_cost),path)
#print(path,"\ncost = ",cost)
###Balas Hammer Algorithm
#At first, we evaluate the regrets
#print(bh_algorithm_evaluation_of_regrets_difference(test_matrix_cost,test_toSend_quantities,test_toReceive_quantities))
regrets = bh_algorithm_evaluation_of_regrets_difference(test_matrix_cost,test_toSend_quantities,test_toReceive_quantities)
#Then we select the maximum of them all to know which column/line we will work on
max_regret = select_maximum_regret_from_regrets_difference(regrets)
#We initialize the result matrix
res_matrix = init_result_matrix(test_matrix_cost)
#print(res_matrix)
transport(test_matrix_cost,test_toSend_quantities,test_toReceive_quantities)
|
the-stack_0_19483
|
from jumpscale import j
"""
Provides the Params object and the ParamsFactory that is used in the Q-Tree
"""
class ParamsFactory:
"""
This factory can create new Params objects
"""
def __init__(self):
self.__jslocation__ = "j.data.params"
def get(self, dictObject={}):
"""
Create and return a new Params object
@param dictObject when dict given then dict will be converted into params
@return: a new Params object
@rtype: Params
"""
return Params(dictObject)
def isParams(self, p):
"""
Return if the argument object is an instance of Params
@param p: object to check
@type p: object
@return: Whether or not `p` is a Params instance
@rtype: boolean
"""
return isinstance(p, Params)
class Params:
def __init__(self, dictObject=None):
if dictObject is not None:
self.__dict__ = dictObject
def merge(self, otherParams):
self.__dict__.update(otherParams.__dict__)
def get(self, key, defaultvalue=None):
return self.__dict__.get(key, defaultvalue)
def __contains__(self, key):
return key in self.__dict__
def __getitem__(self, key):
return self.__dict__[key]
def expandParamsAsDict(self, **kwargs):
"""
adds paramsExtra, tags & params from requestContext if it exists
return as dict
for each item given as named argument check it is already in dict and if not add
e.g. args=self.expandParamsAsDict(id=1,name="test")
will return a dict with id & name and these values unless if they were set in the params already
can further use it as follows:
params.result=infomgr.getInfoWithHeaders(**args)
full example:
#############
args=params.expandParamsAsDict(maxvalues=100,id=None,start="-3d",stop=None)
args["start"]=j.data.time.getEpochAgo(args["start"])
args["stop"]=j.data.time.getEpochFuture(args["stop"])
params.result=j.apps.system.infomgr.extensions.infomgr.addInfo(**args)
"""
params = self
params2 = params.getDict()
if "paramsExtra" in params and params.paramsExtra is not None:
params2.update(params.paramsExtra)
if "requestContext" in params and params.requestContext is not None:
params2.update(params.requestContext.params)
if "tags" in params and params2["tags"] != "":
params2.update(params2["tags"].getDict())
for item in ["requestContext", "tags", "paramsExtra"]:
if item in params:
params2.pop(item)
if len(kwargs) == 0:
return params2
result = {}
for key in list(kwargs.keys()):
if key in params2:
result[key] = params2[key]
return result
def expandParams(self, **kwargs):
"""
adds paramsExtra, tags & params from requestContext if it exists
returns params but not needed because params just get modified to have all these extra arguments/params as properties
set default as params to this method e.g.
expandParams(id=10,hight=100)
"""
def getArgs(d):
r = {}
reserved = ["name", "doc", "macro",
"macrostr", "cmdstr", "page", "tags"]
for key in list(d.keys()):
if key in reserved:
r["arg_%s" % key] = d[key]
else:
r[key] = d[key]
return r
if "paramsExtra" in self and self.paramsExtra is not None:
self.setDict(getArgs(self.paramsExtra))
# self.pop("paramsExtra")
if "requestContext" in self and self.requestContext is not None:
self.setDict(getArgs(self.requestContext.params))
# self.pop("requestContext")
if "tags" in self and self.tags != "":
self.setDict(getArgs(self.tags.getDict()))
# self.pop("tags")
for argname in list(kwargs.keys()):
if argname not in self.__dict__:
self.__dict__[argname] = kwargs[argname]
return self
def getTag(self, name, default=None):
tags = getattr(self, 'tags', None)
if not tags:
return default
tags = tags.getDict()
tag = tags.get(name)
if tag and j.data.text.toStr(tag).startswith('$$'):
return default
if not tag:
return default
return tag
def pop(self, key):
if key in self:
self.__dict__.pop(key)
def has_key(self, key):
return key in self.__dict__
def getDict(self):
return self.__dict__
def setDict(self, dictObject):
self.__dict__.update(dictObject)
def extend(self, params):
"""
Update this Params object with the contents of the argument Params
object
@param params: the Params or dict object to update from
@type params: dict or Params
@raise TypeError: if the argument is not a dict or Params object
"""
if isinstance(params, Params):
d = params.__dict__
elif isinstance(params, dict):
d = params
else:
raise TypeError("Argument params is of an unknown type %s" %
type(params))
self.__dict__.update(d)
# def __dir__(self):
# return sorted(dir(super(Params, self)) + self.__dict__.keys())
def __repr__(self):
parts = ["PARAMS:"]
for key, value in list(self.__dict__.items()):
parts.append(" %s:%s" % (key, value))
return "\n".join(parts)
def __str__(self):
return self.__repr__()
|
the-stack_0_19484
|
import random
top_of_range = input ("Type a number: ")
if top_of_range.isdigit():
top_of_range = int(top_of_range)
if top_of_range <= 0:
print("Please type a number larger than 0 next time...")
quit()
else:
print("Please type a number next time...")
quit()
random_number = random.randint(0, top_of_range)
guesses = 0
while True:
guesses += 1
user_guess = input("Make a guess: ")
if user_guess.digit():
user_guess = int(user_guess)
else:
print("Please type a number next time...")
continue
if user_guess == random_number:
print("You got it!")
break
elif user_guess > random_number:
print("You were above the number!")
else:
print("You were below the number!")
print("You got it in ", guesses, "guesses")
|
the-stack_0_19485
|
"""Some basic shell utilities, used for ExternalCodeComp mostly."""
import os
import signal
import subprocess
import sys
import time
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEV_NULL = 'nul:' if sys.platform == 'win32' else '/dev/null'
class CalledProcessError(subprocess.CalledProcessError):
"""
:class:`subprocess.CalledProcessError` plus `errormsg` attribute.
Attributes
----------
errormsg : str
Error message saved for string access.
"""
def __init__(self, returncode, cmd, errormsg):
"""
Initialize.
Parameters
----------
returncode : int
Error code for this error.
cmd : str or list
If a string, then this is the command line to execute, and the
:class:`subprocess.Popen` ``shell`` argument is set True.
Otherwise, this is a list of arguments; the first is the command
to execute.
errormsg : str
Error message for this error.
"""
super(CalledProcessError, self).__init__(returncode, cmd)
self.errormsg = errormsg
def __str__(self):
"""
Return string of error message.
Returns
-------
str
Error message.
"""
return 'Command %r returned non-zero exit status %d: %s' % (self.cmd, self.returncode,
self.errormsg)
class ShellProc(subprocess.Popen):
"""
A slight modification to :class:`subprocess.Popen`.
If `args` is a string, then the ``shell`` argument is set True.
Updates a copy of ``os.environ`` with `env` and opens files for any
stream which is a :class:`str`.
Attributes
----------
_stdin_arg : str, file, or int
Save handle to make closing easier.
_stdout_arg : str, file, or int
Save handle to make closing easier.
_stderr_arg : str, file, or int
Save handle to make closing easier.
_inp : str, file, or int
Save handle to make closing easier.
_out : str, file, or int
Save handle to make closing easier.
_err : str, file, or int
Save handle to make closing easier.
"""
def __init__(self, args, stdin=None, stdout=None, stderr=None, env=None,
universal_newlines=False):
"""
Initialize.
Parameters
----------
args : str or list
If a string, then this is the command line to execute and the
:class:`subprocess.Popen` ``shell`` argument is set True.
Otherwise, this is a list of arguments; the first is the command
to execute.
stdin : str, file, or int
Specify handling of stdin stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
stdout : str, file, or int
Specify handling of stdout stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
stderr : str, file, or int
Specify handling of stderr stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
env : dict
Environment variables for the command.
universal_newlines : bool
Set to True to turn on universal newlines.
"""
environ = os.environ.copy()
if env:
environ.update(env)
self._stdin_arg = stdin
self._stdout_arg = stdout
self._stderr_arg = stderr
if isinstance(stdin, str):
self._inp = open(stdin, 'r')
else:
self._inp = stdin
if isinstance(stdout, str):
self._out = open(stdout, 'w')
else:
self._out = stdout
if isinstance(stderr, str):
self._err = open(stderr, 'w')
else:
self._err = stderr
shell = isinstance(args, str)
try:
if sys.platform == 'win32':
subprocess.Popen.__init__(self, args, stdin=self._inp,
stdout=self._out, stderr=self._err,
shell=shell, env=environ,
universal_newlines=universal_newlines)
else:
subprocess.Popen.__init__(self, args, stdin=self._inp,
stdout=self._out, stderr=self._err,
shell=shell, env=environ,
universal_newlines=universal_newlines,
# setsid to put this and any children in
# same process group so we can kill them
# all if necessary
preexec_fn=os.setsid)
except Exception:
self.close_files()
raise
def close_files(self):
"""
Close files that were implicitly opened.
"""
if isinstance(self._stdin_arg, str):
self._inp.close()
if isinstance(self._stdout_arg, str):
self._out.close()
if isinstance(self._stderr_arg, str):
self._err.close()
def terminate(self, timeout=None):
"""
Stop child process.
If `timeout` is specified, then :meth:`wait` will be called to wait for the process
to terminate.
Parameters
----------
timeout : float (seconds)
Maximum time to wait for the process to stop.
A value of zero implies an infinite maximum wait.
Returns
-------
int
Return Code
str
Error Message
"""
if sys.platform == 'win32':
subprocess.Popen("TASKKILL /F /PID {pid} /T".format(pid=self.pid))
else:
os.killpg(os.getpgid(self.pid), signal.SIGTERM)
if timeout is not None:
return self.wait(timeout=timeout)
def wait(self, poll_delay=0., timeout=0.):
"""
Poll for command completion or timeout.
Closes any files implicitly opened.
Parameters
----------
poll_delay : float (seconds)
Time to delay between polling for command completion.
A value of zero uses an internal default.
timeout : float (seconds)
Maximum time to wait for command completion.
A value of zero implies an infinite maximum wait.
Returns
-------
int
Return Code
str
Error Message
"""
return_code = None
try:
if poll_delay <= 0:
poll_delay = max(0.1, timeout / 100.)
poll_delay = min(10., poll_delay)
npolls = int(timeout / poll_delay) + 1
time.sleep(poll_delay)
return_code = self.poll()
while return_code is None:
npolls -= 1
if (timeout > 0) and (npolls < 0):
self.terminate()
break
time.sleep(poll_delay)
return_code = self.poll()
finally:
self.close_files()
# self.returncode set by self.poll().
if return_code is not None:
self.errormsg = self.error_message(return_code)
else:
self.errormsg = 'Timed out'
return (return_code, self.errormsg)
def error_message(self, return_code):
"""
Return error message for `return_code`.
The error messages are derived from the operating system definitions.
Some programs don't necessarily return exit codes conforming to these
definitions.
Parameters
----------
return_code : int
Return code from :meth:`poll`.
Returns
-------
str
Error Message string.
"""
error_msg = ''
if return_code:
if return_code > 0:
try:
err_msg = os.strerror(return_code)
except OverflowError:
err_msg = "Process exited with unknown return code {}".format(return_code)
elif sys.platform != 'win32':
sig = -return_code
if sig < signal.NSIG:
for item in signal.__dict__.keys():
if item.startswith('SIG'):
if getattr(signal, item) == sig:
error_msg = ': %s' % item
break
return error_msg
def call(args, stdin=None, stdout=None, stderr=None, env=None,
poll_delay=0., timeout=0.):
"""
Run command with arguments.
Parameters
----------
args : str or list
If a string, then this is the command line to execute and the
:class:`subprocess.Popen` ``shell`` argument is set True.
Otherwise, this is a list of arguments; the first is the command
to execute.
stdin : str, file, or int
Specify handling of stdin stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
stdout : str, file, or int
Specify handling of stdout stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
stderr : str, file, or int
Specify handling of stderr stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
env : dict
Environment variables for the command.
poll_delay : float (seconds)
Time to delay between polling for command completion.
A value of zero uses an internal default.
timeout : float (seconds)
Maximum time to wait for command completion.
A value of zero implies an infinite maximum wait.
Returns
-------
int
Return Code
str
Error Message
"""
process = ShellProc(args, stdin, stdout, stderr, env)
return process.wait(poll_delay, timeout)
def check_call(args, stdin=None, stdout=None, stderr=None, env=None,
poll_delay=0., timeout=0.):
"""
Run command with arguments.
Raises :class:`CalledProcessError` if process returns an error code.
Parameters
----------
args : str or list
If a string, then this is the command line to execute, and the
:class:`subprocess.Popen` ``shell`` argument is set True.
Otherwise, this is a list of arguments; the first is the command
to execute.
stdin : str, file, or int
Specify handling of stdin stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
stdout : str, file, or int
Specify handling of stdout stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
stderr : str, file, or int
Specify handling of stderr stream. If a string, a file
of that name is opened. Otherwise, see the :mod:`subprocess`
documentation.
env : dict
Environment variables for the command.
poll_delay : float (seconds)
Time to delay between polling for command completion.
A value of zero uses an internal default.
timeout : float (seconds)
Maximum time to wait for command completion.
A value of zero implies an infinite maximum wait.
"""
process = ShellProc(args, stdin, stdout, stderr, env)
return_code, error_msg = process.wait(poll_delay, timeout)
if return_code:
raise CalledProcessError(return_code, args, error_msg)
|
the-stack_0_19486
|
from sermar.models import Locality, Collection, Occurrence
from rest_framework import serializers
class LocalitySerializer(serializers.ModelSerializer):
"""
JSON serializer for Locality class in sermar.models.py.
This class controls data exposed through the API endpoint at
https://paleocore.org/sermar/api/localities
"""
class Meta:
model = Locality
fields = ['name', 'roost_id', 'in_park', 'owls', 'owl_species', 'pellets', 'pellet_species',
'bones', 'sample_size', 'landmark', 'roost_type', 'analysis',
'accumulating_agent', 'protected_area', 'geom']
class CollectionSerializer(serializers.ModelSerializer):
"""
JSON serializer for Collection class in sermar.models.py.
This class controls data exposed through the API endpoint at
https://paleocore.org/sermar/api/collections
"""
# Two fields need special treatment because they are foreign key fields. The two approaches essentially produce
# the same result -- a string representation of the related object -- but with different methods.
# For the type_specimen field we retrieve the string representation of the type specimen object using the
# StringRelatedField class of the serializer library.
# For the taxon_rank field we specify calling the taxon_rank_label method of the Nomen class
# which gets a string representation of the taxon rank, but one that is slightly different than the standard
# string representation generated by the __str__ method of the TaxonRank class.
locality = serializers.StringRelatedField()
#taxon_rank = serializers.CharField(source='taxon_rank_label')
class Meta:
model = Collection
fields = ['name', 'collection_code', 'roost_id', 'locality', 'date', 'specimen_loc',
'disposition', 'spec_start', 'spec_end', 'sample_size', 'source',
'status', 'comments', 'bags', 'weight', 'accumulating_agent']
class SpecimenSerializer(serializers.ModelSerializer):
"""
JSON serializer for Specimen/Occurrence class in sermar.models.py
This class controls data accessible through API endpoint at
https://paleocore.org/sermar/api/specimens
"""
|
the-stack_0_19488
|
class Solution:
"""
@param nums: An integer array sorted in ascending order
@param target: An integer
@return: An integer
"""
def lastPosition(self, nums, target):
# write your code here
if nums is None or len(nums) == 0:
return -1
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if nums[mid] == target:
start = mid
elif nums[mid] < target:
start = mid
else:
end = mid
if nums[end] == target:
return end
if nums[start] == target:
return start
return -1
|
the-stack_0_19491
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
from mindspore.common.initializer import TruncatedNormal
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
weight_init=weight,
has_bias=False,
pad_mode="valid",
)
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
def __init__(self, num_class=10, channel=3):
super(LeNet5, self).__init__()
self.num_class = num_class
self.conv1 = conv(channel, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, self.num_class)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
|
the-stack_0_19493
|
from __future__ import absolute_import, print_function, division, unicode_literals
import imp
import sys
import os
import hashlib
import json
import numpy as np
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from six import exec_, iteritems
from sklearn import datasets
from sklearn import model_selection
from xcessiv import app, exceptions
def hash_file(path, block_size=65536):
"""Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block
"""
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def hash_string(string):
"""Hashes an input string using SHA256"""
return hashlib.sha256(string).hexdigest()
def import_object_from_path(path, object):
"""Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module
"""
with open(path) as f:
return import_object_from_string_code(f.read(), object)
def import_object_from_string_code(code, object):
"""Used to import an object from arbitrary passed code.
Passed in code is treated as a module and is imported and added
to `sys.modules` with its SHA256 hash as key.
Args:
code (string): Python code to import as module
object (string): Name of object to extract from imported module
"""
sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()
module = imp.new_module(sha256)
try:
exec_(code, module.__dict__)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
sys.modules[sha256] = module
try:
return getattr(module, object)
except AttributeError:
raise exceptions.UserError("{} not found in code".format(object))
def import_string_code_as_module(code):
"""Used to run arbitrary passed code as a module
Args:
code (string): Python code to import as module
Returns:
module: Python module
"""
sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()
module = imp.new_module(sha256)
try:
exec_(code, module.__dict__)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
sys.modules[sha256] = module
return module
def verify_dataset(X, y):
"""Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
"""
X_shape, y_shape = np.array(X).shape, np.array(y).shape
if len(X_shape) != 2:
raise exceptions.UserError("X must be 2-dimensional array")
if len(y_shape) != 1:
raise exceptions.UserError("y must be 1-dimensional array")
if X_shape[0] != y_shape[0]:
raise exceptions.UserError("X must have same number of elements as y")
return dict(
features_shape=X_shape,
labels_shape=y_shape
)
def is_valid_json(x):
"""Returns true if x can be JSON serialized
Args:
x: Object to test
"""
try:
json.dumps(x)
return True
except TypeError:
return False
def make_serializable(json):
"""This function ensures that the dictionary is JSON serializable. If not,
keys with non-serializable values are removed from the return value.
Args:
json (dict): Dictionary to convert to serializable
Returns:
new_dict (dict): New dictionary with non JSON serializable values removed
"""
new_dict = dict()
for key, value in iteritems(json):
if is_valid_json(value):
new_dict[key] = value
return new_dict
def get_sample_dataset(dataset_properties):
"""Returns sample dataset
Args:
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
X (array-like): Features array
y (array-like): Labels array
splits (iterator): This is an iterator that returns train test splits for
cross-validation purposes on ``X`` and ``y``.
"""
kwargs = dataset_properties.copy()
data_type = kwargs.pop('type')
if data_type == 'multiclass':
try:
X, y = datasets.make_classification(random_state=8, **kwargs)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
except Exception as e:
raise exceptions.UserError(repr(e))
elif data_type == 'iris':
X, y = datasets.load_iris(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'mnist':
X, y = datasets.load_digits(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'breast_cancer':
X, y = datasets.load_breast_cancer(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'boston':
X, y = datasets.load_boston(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
elif data_type == 'diabetes':
X, y = datasets.load_diabetes(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
else:
raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type']))
return X, y, splits
def verify_estimator_class(est, meta_feature_generator, metric_generators, dataset_properties):
"""Verify if estimator object is valid for use i.e. scikit-learn format
Verifies if an estimator is fit for use by testing for existence of methods
such as `get_params` and `set_params`. Must also be able to properly fit on
and predict a sample iris dataset.
Args:
est: Estimator object with `fit`, `predict`/`predict_proba`,
`get_params`, and `set_params` methods.
meta_feature_generator (str, unicode): Name of the method used by the estimator
to generate meta-features on a set of data.
metric_generators (dict): Dictionary of key value pairs where the key
signifies the name of the metric calculated and the value is a list
of strings, when concatenated, form Python code containing the
function used to calculate the metric from true values and the
meta-features generated.
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
performance_dict (mapping): Mapping from performance metric
name to performance metric value e.g. "Accuracy": 0.963
hyperparameters (mapping): Mapping from the estimator's hyperparameters to
their default values e.g. "n_estimators": 10
"""
X, y, splits = get_sample_dataset(dataset_properties)
if not hasattr(est, "get_params"):
raise exceptions.UserError('Estimator does not have get_params method')
if not hasattr(est, "set_params"):
raise exceptions.UserError('Estimator does not have set_params method')
if not hasattr(est, meta_feature_generator):
raise exceptions.UserError('Estimator does not have meta-feature generator'
' {}'.format(meta_feature_generator))
performance_dict = dict()
true_labels = []
preds = []
try:
for train_index, test_index in splits:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
est.fit(X_train, y_train)
true_labels.append(y_test)
preds.append(getattr(est, meta_feature_generator)(X_test))
true_labels = np.concatenate(true_labels)
preds = np.concatenate(preds, axis=0)
except Exception as e:
raise exceptions.UserError(repr(e))
if preds.shape[0] != true_labels.shape[0]:
raise exceptions.UserError('Estimator\'s meta-feature generator '
'does not produce valid shape')
for key in metric_generators:
metric_generator = import_object_from_string_code(
metric_generators[key],
'metric_generator'
)
try:
performance_dict[key] = metric_generator(true_labels, preds)
except Exception as e:
raise exceptions.UserError(repr(e))
return performance_dict, make_serializable(est.get_params())
def get_path_from_query_string(req):
"""Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
"""
if req.args.get('path') is None:
raise exceptions.UserError('Path not found in query string')
return req.args.get('path')
class DBContextManager():
"""Use this context manager to automatically start and close a database session
Examples:
>>> with DBContextManager('ProjectFolder') as session:
>>> # Do stuff with session
"""
def __init__(self, path):
"""Initialize context manager
Args:
path (str, unicode): Path to project folder
"""
self.path = os.path.join(path, app.config['XCESSIV_NOTEBOOK_NAME'])
def __enter__(self):
if not os.path.exists(self.path):
raise exceptions.UserError('{} does not exist'.format(self.path))
sqlite_url = 'sqlite:///{}'.format(self.path)
engine = create_engine(sqlite_url)
self.session = Session(bind=engine)
return self.session
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, 'session'):
if exc_type is not None:
self.session.rollback()
self.session.close()
return False # re-raise any exception
|
the-stack_0_19494
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Visualize detections made by tensorflow.
License_info:
# ==============================================================================
# ISC License (ISC)
# Copyright 2021 Christian Doppler Laboratory for Embedded Machine Learning
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# The following is a slightly modified version from the following script
# Source:
"""
# Futures
# from __future__ import print_function
# Built-in/Generic Imports
import glob
import json
import os
import argparse
import time
import re
import pickle
# Libs
from tqdm import tqdm
from xmltodict import unparse
import xml.etree.ElementTree as ET
import sys
import numpy as np
import pandas as pd
from multiprocessing import Pool
import matplotlib
# If you get _tkinter.TclError: no display name and no $DISPLAY environment variable use
# matplotlib.use('Agg') instead
matplotlib.use('TkAgg')
from six import BytesIO
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
# Own modules
import bbox_utils as bbox
import image_utils as im
from datetime import datetime
__author__ = 'Alexander Wendt'
__copyright__ = 'Copyright 2021, Christian Doppler Laboratory for ' \
'Embedded Machine Learning'
__credits__ = ['']
__license__ = 'ISC'
__version__ = '0.1.0'
__maintainer__ = 'Alexander Wendt'
__email__ = '[email protected]'
__status__ = 'Experimental'
parser = argparse.ArgumentParser(description='Google Tensorflow Detection API 2.0 Inferrer')
# parser.add_argument("-p", '--model_path', default='pre-trained-models/efficientdet_d5_coco17_tpu-32/saved_model/',
# help='Saved model path', required=False)
parser.add_argument("-i", '--image_dir', default='images/inference',
help='Image directory', required=False)
parser.add_argument("-l", '--labelmap', default='annotations/mscoco_label_map.pbtxt.txt',
help='Labelmap path', required=False)
parser.add_argument("-d", '--detections_file', default='detections.csv',
help='TF2 Object Detection API saved inference file as csv.', required=False)
parser.add_argument("-s", '--min_score', default=0.5, type=float,
help='Max score of detection box to save the image.', required=False)
# parser.add_argument("-r", '--run_detection', default=False,
# help='Run detection or load saved detection model', required=False, type=bool)
# parser.add_argument("-x", '--xml_dir', default=None,
# help='Source of XML files. '
# 'If run_detection is True, xml files are saved here. '
# 'If run detection is False, XML files are loaded from here. '
# 'If run_detection is True and value is None, no XMLs are saved.', required=False, type=str)
# parser.add_argument("-vis", '--run_visualization', default=False,
# help='Run image visualization', required=False, type=bool)
parser.add_argument("-o", '--output_dir', default="detection_images", help='Result directory for images. ',
required=False)
# parser.add_argument("-lat", '--latency_out', default="latency.csv", help='Output path for latencies file, which is '
# 'appended or created new. ',
# required=False)
# parser.add_argument("-m", '--model_name', default="Model", type=str,
# help='Model name for collecting model data.', required=False)
# parser.add_argument("-hw", '--hardware_name', default="Hardware", type=str,
# help='Hardware name collecting statistical data.', required=False)
args = parser.parse_args()
def load_model(model_path):
'''
Load tensorflow model
:param model_path:
:return:
'''
print("Start model loading from path ", model_path)
tf.keras.backend.clear_session()
start_time = time.time()
detect_fn = tf.saved_model.load(model_path)
end_time = time.time()
elapsed_time = end_time - start_time
print('Finished. Elapsed time: {:.0f}s'.format(elapsed_time))
return detect_fn
def create_single_imagedict(source, image_name):
'''
:type
'''
image_dict = {}
image_path = os.path.join(source, image_name)
image_np = bbox.load_image_into_numpy_array(image_path)
input_tensor = np.expand_dims(image_np, 0)
image_dict[image_name] = (image_np, input_tensor)
return image_dict
def detect_image(detect_fn, image_path, min_score):
'''
:param detect_fn:
:param image_dict:
:return:
'''
elapsed = []
detection_dict = dict()
# print("Start detection")
# for image_name in image_list:
# Load image
# image_path = os.path.join(image_dir, image_name)
# Convert image to array
print("Process ", image_path)
image_np = im.load_image_into_numpy_array(image_path)
# Make image tensor of it
input_tensor = np.expand_dims(image_np, 0)
# Infer
start_time = time.time()
detections = detect_fn(input_tensor)
end_time = time.time()
latency = end_time - start_time
# elapsed.append(latency)
image_filename = os.path.basename(image_path)
print("Inference time {} : {}s".format(image_filename, latency))
# Process detections
boxes = detections['detection_boxes'][0].numpy()
classes = detections['detection_classes'][0].numpy().astype(np.int32)
scores = detections['detection_scores'][0].numpy()
return image_filename, image_np, boxes, classes, scores, latency
def convert_reduced_detections_to_df(image_filename, image_np, boxes, classes, scores, min_score=0.8):
image_width = image_np.shape[1]
image_height = image_np.shape[0]
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax', 'score']
xml_df = pd.DataFrame(columns=column_name)
for i in range(scores.shape[0]):
if min_score <= scores[i]:
ymin, xmin, ymax, xmax = tuple(boxes[i].tolist())
content = [image_filename, image_width, image_height,
classes[i], xmin, ymin, xmax, ymax, scores[i]]
xml_df = xml_df.append(pd.DataFrame([content], columns=column_name))
return xml_df
def plot_image(image, title=None):
ax = plt.subplot(111)
ax.tick_params(labelbottom=False, labelleft=False)
if title:
plt.title(title, fontsize=40)
plt.imshow(image)
plt.axis('off')
plt.tight_layout()
return plt.gcf()
def infer_images(detections_file, image_dir, labelmap, min_score, output_dir):
'''
'''
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
print("Created ", output_dir)
# Load inference images
print("Loading images from ", image_dir)
image_list = im.get_images_name(image_dir)
# Load label path
print("Loading labelmap from ", labelmap)
category_index = label_map_util.create_category_index_from_labelmap(os.path.abspath(labelmap))
# Load stored XML Files
print("Loading saved Detections files from ealier inferences from ", detections_file)
data = pd.read_csv(detections_file, sep=';').set_index('filename')
# Define scores and latencies
# detection_scores = pd.DataFrame(columns=['filename', 'width', 'height', 'class', 'xmin',
#
# 'ymin', 'xmax', 'ymax', 'score'])
# Process each image
for image_name in image_list:
print("Load xml data for ", image_name)
if isinstance(data.loc[image_name], pd.Series):
subdata = pd.DataFrame([data.loc[image_name]])
else:
subdata = data.loc[image_name]
#classes = np.array(data.loc[image_name].shape[0])
boxes = np.zeros([subdata.shape[0], 4])
classes = np.zeros([subdata.shape[0]]).astype('int')
if 'score' in subdata.columns and subdata['score'][0] is not None:
scores = np.zeros([subdata.shape[0]])
else:
scores = None
for i in range(subdata.shape[0]):
boxes[i][0] = subdata['ymin'][i]
boxes[i][1] = subdata['xmin'][i]
boxes[i][2] = subdata['ymax'][i]
boxes[i][3] = subdata['xmax'][i]
if 'score' in data.columns and subdata['score'][i] is not None:
scores[i] = subdata['score'][i]
#class_index = [category_index[j + 1].get('id') for j in range(len(category_index)) if
# category_index[j + 1].get('name') == subdata['class'][i]][0]
classes[i] = subdata['class'][i]
#if classes.size == 1: # If only one detection
# classes = classes.reshape(-1)
# np.vstack(classes, 1)
#scores = np.array(data.loc[image_name]['score'])
#if scores.size == 1: # If only one detection
# scores = scores.reshape(-1)
# np.vstack(scores, 0)
#boxes = np.array(data.loc[image_name][['ymin', 'xmin', 'ymax', 'xmax']])
#if boxes.size==4:
# boxes = boxes.reshape(1, -1)
# np.vstack(boxes, [0, 0, 0, 0])
image_filename = image_name
image_np = im.load_image_into_numpy_array(os.path.join(image_dir, image_name))
# If output directory is provided, visualize and save image
image = bbox.visualize_image(image_name, image_np, scores, boxes, classes, category_index, min_score=min_score)
plt.gcf()
new_image_path = os.path.join(output_dir, os.path.splitext(image_filename)[0] + "_det" + ".png")
print("Save image {} to {}".format(image_filename, new_image_path))
fig = plot_image(image)
plt.savefig(new_image_path)
if __name__ == "__main__":
infer_images(args.detections_file, args.image_dir, args.labelmap, args.min_score, args.output_dir)
print("=== Program end ===")
|
the-stack_0_19496
|
result = 0
hp = "hp"
damage = "damage"
armour = "armour"
cost = "cost"
boss = {
hp: 103,
damage: 9,
armour: 2
}
def calcDamage(d, a):
return max(d-a, 1)
player = {
hp: 100
}
weapons = [
{cost: 8, damage: 4, armour: 0},
{cost: 10, damage: 5, armour: 0},
{cost: 25, damage: 6, armour: 0},
{cost: 40, damage: 7, armour: 0},
{cost: 74, damage: 8, armour: 0}
]
mails = [
{cost: 0, damage: 0, armour: 0},
{cost: 13, damage: 0, armour: 1},
{cost: 31, damage: 0, armour: 2},
{cost: 53, damage: 0, armour: 3},
{cost: 75, damage: 0, armour: 4},
{cost: 102, damage: 0, armour: 5}
]
rings = [
{cost: 0, damage: 0, armour: 0, "id": 0},
{cost: 20, damage: 0, armour: 1},
{cost: 25, damage: 1, armour: 0},
{cost: 40, damage: 0, armour: 2},
{cost: 50, damage: 2, armour: 0},
{cost: 80, damage: 0, armour: 3},
{cost: 100, damage: 3, armour: 0}
]
stats = []
for w in weapons:
for m in mails:
for (i, r1) in enumerate(rings[:-1]):
j = i + 1 if i else i
for r2 in rings[j:]:
stats.append({
p: sum([o[p] for o in [w,m,r1,r2]]) for p in [cost, damage, armour]
})
stats.sort(reverse=True, key=lambda x: x[cost])
def combat(stat):
bossHP = boss[hp]
playerHP = player[hp]
turn = 0
while bossHP > 0 and playerHP > 0:
if turn == 0:
bossHP -= calcDamage(stat[damage], boss[armour])
else:
playerHP -= calcDamage(boss[damage], stat[armour])
turn = 1 - turn
return bossHP <= 0
for stat in stats:
victory = combat(stat)
if not victory:
result = stat[cost]
break
with open("output2.txt", "w") as output:
output.write(str(result))
print(str(result))
|
the-stack_0_19497
|
# -*- coding: utf-8 -*-
'''
Created on 2015/01/25
@author: samuraitaiga
'''
import logging
import os
from mt4 import get_mt4
from mt4 import DEFAULT_MT4_NAME
from __builtin__ import str
class BackTest(object):
"""
Attributes:
ea_name(string): ea name
param(dict): ea parameter
symbol(string): currency symbol. e.g.: USDJPY
from_date(datetime.datetime): backtest from date
to_date(datetime.datetime): backtest to date
model(int): backtest model
0: Every tick
1: Control points
2: Open prices only
spread(int): spread
optimization(bool): optimization flag. optimization is enabled if True
replace_report(bool): replace report flag. replace report is enabled if True
"""
def __init__(self, ea_name, param, symbol, period, from_date, to_date, model=0, spread=5, replace_repot=True):
self.ea_name = ea_name
self.param = param
self.symbol = symbol
self.period = period
self.from_date = from_date
self.to_date = to_date
self.model = model
self.spread = spread
self.replace_report = replace_repot
def _prepare(self, alias=DEFAULT_MT4_NAME):
"""
Notes:
create backtest config file and parameter file
"""
self._create_conf(alias=alias)
self._create_param(alias=alias)
def _create_conf(self, alias=DEFAULT_MT4_NAME):
"""
Notes:
create config file(.conf) which is used parameter of terminal.exe
in %APPDATA%\\MetaQuotes\\Terminal\\<UUID>\\tester
file contents goes to
TestExpert=SampleEA
TestExpertParameters=SampleEA.set
TestSymbol=USDJPY
TestPeriod=M5
TestModel=0
TestSpread=5
TestOptimization=true
TestDateEnable=true
TestFromDate=2014.09.01
TestToDate=2015.01.05
TestReport=SampleEA
TestReplaceReport=false
TestShutdownTerminal=true
"""
mt4 = get_mt4(alias=alias)
conf_file = os.path.join(mt4.appdata_path, 'tester', '%s.conf' % self.ea_name)
# shutdown_terminal must be True.
# If false, popen don't end and backtest report analyze don't start.
shutdown_terminal = True
with open(conf_file, 'w') as fp:
fp.write('TestExpert=%s\n' % self.ea_name)
fp.write('TestExpertParameters=%s.set\n' % self.ea_name)
fp.write('TestSymbol=%s\n' % self.symbol)
fp.write('TestModel=%s\n' % self.model)
fp.write('TestPeriod=%s\n' % self.period)
fp.write('TestSpread=%s\n' % self.spread)
fp.write('TestOptimization=%s\n' % str(self.optimization).lower())
fp.write('TestDateEnable=true\n')
fp.write('TestFromDate=%s\n' % self.from_date.strftime('%Y.%m.%d'))
fp.write('TestToDate=%s\n' % self.to_date.strftime('%Y.%m.%d'))
fp.write('TestReport=%s\n' % self.ea_name)
fp.write('TestReplaceReport=%s\n' % str(self.replace_report).lower())
fp.write('TestShutdownTerminal=%s\n' % str(shutdown_terminal).lower())
def _create_param(self, alias=DEFAULT_MT4_NAME):
"""
Notes:
create ea parameter file(.set) in %APPDATA%\\MetaQuotes\\Terminal\\<UUID>\\tester
Args:
ea_name(string): ea name
"""
mt4 = get_mt4(alias=alias)
param_file = os.path.join(mt4.appdata_path, 'tester', '%s.set' % self.ea_name)
with open(param_file, 'w') as fp:
for k in self.param:
values = self.param[k].copy()
value = values.pop('value')
fp.write('%s=%s\n' % (k, value))
if self.optimization:
if values.has_key('max') and values.has_key('interval'):
fp.write('%s,F=1\n' % k)
fp.write('%s,1=%s\n' % (k, value))
interval = values.pop('interval')
fp.write('%s,2=%s\n' % (k,interval))
maximum = values.pop('max')
fp.write('%s,3=%s\n' % (k,maximum))
else:
# if this value won't be optimized, write unused dummy data for same format.
fp.write('%s,F=0\n' % k)
fp.write('%s,1=0\n' % k)
fp.write('%s,2=0\n' % k)
fp.write('%s,3=0\n' % k)
else:
if type(value) == str:
# this ea arg is string. then don't write F,1,2,3 section in config
pass
else:
# write unused dummy data for same format.
fp.write('%s,F=0\n' % k)
fp.write('%s,1=0\n' % k)
fp.write('%s,2=0\n' % k)
fp.write('%s,3=0\n' % k)
def _get_conf_abs_path(self, alias=DEFAULT_MT4_NAME):
mt4 = get_mt4(alias=alias)
conf_file = os.path.join(mt4.appdata_path, 'tester', '%s.conf' % self.ea_name)
return conf_file
def run(self, alias=DEFAULT_MT4_NAME):
"""
Notes:
run backtest
"""
from report import BacktestReport
self.optimization = False
self._prepare(alias=alias)
bt_conf = self._get_conf_abs_path(alias=alias)
mt4 = get_mt4(alias=alias)
mt4.run(self.ea_name, conf=bt_conf)
ret = BacktestReport(self)
return ret
def optimize(self, alias=DEFAULT_MT4_NAME):
"""
"""
from report import OptimizationReport
self.optimization = True
self._prepare(alias=alias)
bt_conf = self._get_conf_abs_path(alias=alias)
mt4 = get_mt4(alias=alias)
mt4.run(self.ea_name, conf=bt_conf)
ret = OptimizationReport(self)
return ret
def load_from_file(dsl_file):
pass
|
the-stack_0_19503
|
import json
from datetime import datetime
from inflection import tableize
import inspect
from ..query import QueryBuilder
from ..collection import Collection
from ..connections import ConnectionFactory, ConnectionResolver
from ..query.grammars import MySQLGrammar
from ..observers import ObservesEvents
from ..scopes import BaseScope, SoftDeleteScope, SoftDeletesMixin, TimeStampsMixin
"""This is a magic class that will help using models like User.first() instead of having to instatiate a class like
User().first()
"""
class ModelMeta(type):
def __getattr__(self, attribute, *args, **kwargs):
"""This method is called between a Model and accessing a property. This is a quick and easy
way to instantiate a class before the first method is called. This is to avoid needing
to do this:
User().where(..)
and instead, with this class inherited as a meta class, we can do this:
User.where(...)
This class (potentially magically) instantiates the class even though we really didn't instantiate it.
Args:
attribute (string): The name of the attribute
Returns:
Model|mixed: An instantiated model's attribute
"""
instantiated = self()
return getattr(instantiated, attribute)
class BoolCast:
"""Casts a value to a boolean"""
def get(self, value):
return bool(value)
class JsonCast:
"""Casts a value to JSON"""
def get(self, value):
return json.dumps(value)
class Model(TimeStampsMixin, ObservesEvents, metaclass=ModelMeta):
"""The ORM Model class
Base Classes:
TimeStampsMixin (TimeStampsMixin): Adds scopes to add timestamps when something is inserted
metaclass (ModelMeta, optional): Helps instantiate a class when it hasn't been instantiated. Defaults to ModelMeta.
"""
__fillable__ = ["*"]
__guarded__ = ["*"]
__dry__ = False
__table__ = None
__connection__ = "default"
__resolved_connection__ = None
__observers__ = []
_booted = False
_scopes = {}
__primary_key__ = "id"
__casts__ = {}
__dates__ = []
__hidden__ = []
__timestamps__ = True
__with__ = ()
date_created_at = "created_at"
date_updated_at = "updated_at"
"""Pass through will pass any method calls to the model directly through to the query builder.
Anytime one of these methods are called on the model it will actually be called on the query builder class.
"""
__passthrough__ = [
"all",
"first",
"last",
"find_or_fail",
"first_or_fail",
"find_or_404",
"get",
"has",
"delete",
"limit",
"order_by",
"select",
"set_global_scope",
"simple_paginate",
"paginate",
"where_has",
"chunk",
"where_in",
"where",
"with_",
"update",
]
__cast_map__ = {}
__internal_cast_map__ = {"bool": BoolCast, "json": JsonCast}
def __init__(self):
self.__attributes__ = {}
self.__dirty_attributes__ = {}
if not hasattr(self, "__appends__"):
self.__appends__ = []
self._relationships = {}
self._global_scopes = {}
self.get_builder()
self.boot()
@classmethod
def get_primary_key(self):
"""Gets the primary key column
Returns:
mixed
"""
return self.__primary_key__
def get_primary_key_value(self):
"""Gets the primary key value.
Raises:
AttributeError: Raises attribute error if the model does not have an
attribute with the primary key.
Returns:
str|int
"""
try:
return getattr(self, self.get_primary_key())
except AttributeError:
name = self.__class__.__name__
raise AttributeError(
f"class '{name}' has no attribute {self.get_primary_key()}. Did you set the primary key correctly on the model using the __primary_key__ attribute?"
)
def query(self):
return self.builder
def get_builder(self):
from config.database import DB
connection = self.__connection__
if connection == "default":
connection = DB.get_connection_details().get("default")
connection_driver = (
DB.get_connection_details().get(connection, {}).get("driver")
)
self.__resolved_connection__ = DB.connection_factory.make(connection_driver)
self.builder = QueryBuilder(
# grammar=self.__resolved_connection__.get_default_query_grammar(),
connection=self.__connection__,
# connection_class=self.__resolved_connection__,
table=self.get_table_name(),
connection_details=self.get_connection_details(),
model=self,
# connection_driver=connection_driver,
scopes=self._scopes,
dry=self.__dry__,
)
return self.builder
def get_connection_details(self):
from config.database import ConnectionResolver
return ConnectionResolver().get_connection_details()
def boot(self):
if not self._booted:
self.observe_events(self, "booting")
for base_class in inspect.getmro(self.__class__):
class_name = base_class.__name__
if class_name.endswith("Mixin"):
getattr(base_class(), "boot_" + class_name)(self.builder)
self._booted = True
self.observe_events(self, "booted")
@classmethod
def get_table_name(cls):
"""Gets the table name.
Returns:
str
"""
return cls.__table__ or tableize(cls.__name__)
@classmethod
def find(cls, record_id, query=False):
"""Finds a row by the primary key ID.
Arguments:
record_id {int} -- The ID of the primary key to fetch.
Returns:
Model
"""
if isinstance(record_id, (list, tuple)):
builder = cls().where_in(cls.get_primary_key(), record_id)
else:
builder = cls().where(cls.get_primary_key(), record_id)
if query:
return builder.to_sql()
else:
return builder.get()
def first_or_new(self):
pass
def first_or_create(self):
pass
def is_loaded(self):
return bool(self.__attributes__)
def is_created(self):
return self.get_primary_key() in self.__attributes__
def add_relation(self, relations):
self._relationships.update(relations)
return self
@classmethod
def hydrate(cls, result, relations=None):
"""Takes a result and loads it into a model
Args:
result ([type]): [description]
relations (dict, optional): [description]. Defaults to {}.
Returns:
[type]: [description]
"""
relations = relations or {}
if result is None:
return None
if isinstance(result, (list, tuple)):
response = []
for element in result:
response.append(cls.hydrate(element))
return cls.new_collection(response)
elif isinstance(result, dict):
model = cls()
dic = {}
for key, value in result.items():
if key in model.get_dates() and value:
value = model.get_new_date(value)
dic.update({key: value})
model.observe_events(model, "hydrating")
model.__attributes__.update(dic or {})
model.add_relation(relations)
model.observe_events(model, "hydrated")
return model
elif hasattr(result, "serialize"):
model = cls()
model.__attributes__.update(result.serialize())
return model
else:
model = cls()
model.observe_events(model, "hydrating")
model.__attributes__.update(dict(result))
model.observe_events(model, "hydrated")
return model
def fill(self, attributes):
self.__attributes__.update(attributes)
return self
@classmethod
def new_collection(cls, data):
"""Takes a result and puts it into a new collection.
This is designed to be able to be overidden by the user.
Args:
data (list|dict): Could be any data type but will be loaded directly into a collection.
Returns:
Collection
"""
return Collection(data)
@classmethod
def create(cls, dictionary=None, query=False, **kwargs):
"""Creates new records based off of a dictionary as well as data set on the model
such as fillable values.
Args:
dictionary (dict, optional): [description]. Defaults to {}.
query (bool, optional): [description]. Defaults to False.
Returns:
self: A hydrated version of a model
"""
if not dictionary:
dictionary = kwargs
if cls.__fillable__ != ["*"]:
dictionary = {x: dictionary[x] for x in cls.__fillable__}
if cls.__guarded__ != ["*"]:
for x in cls.__guarded__:
dictionary.pop(x)
if query:
return cls.builder.create(dictionary, query=True).to_sql()
return cls.builder.create(dictionary)
def serialize(self, serialized_dictionary=None):
"""Takes the data as a model and converts it into a dictionary
Args:
serialized_dictionary (dict, optional): A dictionary to start from.
If not specified then the models attributes will be used. Defaults to {}.
Returns:
dict
"""
if not serialized_dictionary:
serialized_dictionary = self.__attributes__
for key in self.__hidden__:
if key in serialized_dictionary:
serialized_dictionary.pop(key)
for date_column in self.get_dates():
if (
date_column in serialized_dictionary
and serialized_dictionary[date_column]
):
serialized_dictionary[date_column] = self.get_new_serialized_date(
serialized_dictionary[date_column]
)
serialized_dictionary.update(self.__dirty_attributes__)
# The builder is inside the attributes but should not be serialized
if "builder" in serialized_dictionary:
serialized_dictionary.pop("builder")
# Serialize relationships as well
serialized_dictionary.update(self.relations_to_dict())
for append in self.__appends__:
serialized_dictionary.update({append: getattr(self, append)})
for key, value in serialized_dictionary.items():
if isinstance(value, datetime):
value = self.get_new_serialized_date(value)
serialized_dictionary.update({key: value})
return serialized_dictionary
def to_json(self):
"""Converts a model to JSON
Returns:
string
"""
return json.dumps(self.serialize())
def update_or_create(self):
pass
def relations_to_dict(self):
"""Converts a models relationships to a dictionary
Returns:
[type]: [description]
"""
new_dic = {}
for key, value in self._relationships.items():
if value == {}:
new_dic.update({key: {}})
else:
if value is None:
new_dic.update({key: {}})
continue
new_dic.update({key: value.serialize()})
return new_dic
def touch(self, date=None, query=True):
"""Updates the current timestamps on the model"""
if not self.__timestamps__:
return False
self._update_timestamps(date=date)
return self.save(query=query)
def _update_timestamps(self, date=None):
"""Sets the updated at date to the current time or a specified date
Args:
date (datetime.datetime, optional): a date. If none is specified then it will use the current date Defaults to None.
"""
self.updated_at = date or self._current_timestamp()
def _current_timestamp(self):
return datetime.now()
@staticmethod
def set_connection_resolver(self):
pass
def __getattr__(self, attribute):
"""Magic method that is called when an attribute does not exist on the model.
Args:
attribute (string): the name of the attribute being accessed or called.
Returns:
mixed: Could be anything that a method can return.
"""
if attribute in self.__passthrough__:
def method(*args, **kwargs):
return getattr(self.builder, attribute)(*args, **kwargs)
return method
new_name_accessor = "get_" + attribute + "_attribute"
if (new_name_accessor) in self.__class__.__dict__:
return self.__class__.__dict__.get(new_name_accessor)(self)
if (
"__attributes__" in self.__dict__
and attribute in self.__dict__["__attributes__"]
):
if attribute in self.get_dates():
return (
self.get_new_date(self.get_value(attribute))
if self.get_value(attribute)
else None
)
return self.get_value(attribute)
if (
"__dirty_attributes__" in self.__dict__
and attribute in self.__dict__["__dirty_attributes__"]
):
return self.get_dirty_value(attribute)
if attribute in self.__dict__.get("_relationships", {}):
return self.__dict__["_relationships"][attribute]
if attribute not in self.__dict__:
name = self.__class__.__name__
raise AttributeError(f"class model '{name}' has no attribute {attribute}")
return None
def __setattr__(self, attribute, value):
if hasattr(self, "set_" + attribute + "_attribute"):
method = getattr(self, "set_" + attribute + "_attribute")
value = method(value)
try:
if not attribute.startswith("_"):
self.__dict__["__dirty_attributes__"].update({attribute: value})
else:
self.__dict__[attribute] = value
except KeyError:
pass
def get_raw_attribute(self, attribute):
"""Gets an attribute without having to call the models magic methods. Gets around infinite recursion loops.
Args:
attribute (string): The attribute to fetch
Returns:
mixed: Any value an attribute can be.
"""
return self.__attributes__.get(attribute)
def save(self, query=False):
builder = self.builder
self.__dirty_attributes__.pop("builder")
self.observe_events(self, "saving")
if not query:
result = builder.update(self.__dirty_attributes__)
self.observe_events(self, "saved")
return result
return builder.update(self.__dirty_attributes__, dry=True).to_sql()
def get_value(self, attribute):
if attribute in self.__casts__:
return self._cast_attribute(attribute)
return self.__attributes__[attribute]
def get_dirty_value(self, attribute):
if attribute in self.__casts__:
return self._cast_attribute(attribute)
return self.__dirty_attributes__[attribute]
def all_attributes(self):
attributes = self.__attributes__
attributes.update(self.get_dirty_attributes())
for key, value in attributes.items():
if key in self.__casts__:
attributes.update({key: self._cast_attribute(key)})
return attributes
def get_dirty_attributes(self):
if "builder" in self.__dirty_attributes__:
self.__dirty_attributes__.pop("builder")
return self.__dirty_attributes__ or {}
def get_cast_map(self):
cast_map = self.__internal_cast_map__
cast_map.update(self.__cast_map__)
return cast_map
def _cast_attribute(self, attribute):
cast_method = self.__casts__[attribute]
cast_map = self.get_cast_map()
if isinstance(cast_method, str):
return cast_map[cast_method]().get(attribute)
return cast_method(attribute)
@classmethod
def load(cls, *loads):
cls.boot()
cls._loads += loads
return cls.builder
def __getitem__(self, attribute):
return getattr(self, attribute)
def get_dates(self):
"""
Get the attributes that should be converted to dates.
:rtype: list
"""
defaults = [self.date_created_at, self.date_updated_at]
return self.__dates__ + defaults
def get_new_date(self, datetime=None):
"""
Get the attributes that should be converted to dates.
:rtype: list
"""
import pendulum
if not datetime:
return pendulum.now()
if isinstance(datetime, str):
return pendulum.parse(datetime)
return pendulum.instance(datetime)
def get_new_serialized_date(self, datetime):
"""
Get the attributes that should be converted to dates.
:rtype: list
"""
return self.get_new_date(datetime).isoformat()
def set_appends(self, appends):
"""
Get the attributes that should be converted to dates.
:rtype: list
"""
self.__appends__ += appends
return self
def save_many(self, relation, relating_records):
related = getattr(self.__class__, relation)
for related_record in relating_records:
setattr(
related_record,
related.foreign_key,
self.__attributes__[related.local_key],
)
if not related_record.is_created():
related_record.create(related_record.all_attributes())
else:
related_record.save()
def related(self, relation):
related = getattr(self.__class__, relation)
return related.where(related.foreign_key, self.get_primary_key_value())
def get_related(self, relation):
related = getattr(self.__class__, relation)
return related
def attach(self, relation, related_record):
related = getattr(self.__class__, relation)
setattr(
related_record, related.foreign_key, self.__attributes__[related.local_key]
)
if not related_record.is_created():
related_record.create(related_record.all_attributes())
else:
related_record.save()
@classmethod
def on(cls, connection):
cls.__connection__ = connection
return cls
|
the-stack_0_19505
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.optimize as opt
import cmath
import sys
import warnings
from statistics import jackknifeMean
from statistics import jackknifeCreutz
from statistics import autocorrTime
warnings.simplefilter(action='ignore', category=FutureWarning)
L = int(sys.argv[1]) # lattice size
print("L: %d" % (L))
Lz = int(sys.argv[2])
print("Lz: %d" % (Lz))
beta = float(sys.argv[3])
print("beta: %f" % (beta))
eps3 = float(sys.argv[4])
print("eps3: %f" % (eps3))
m_fermion = float(sys.argv[5])
print("m_fermion: %f" % (m_fermion))
m_sign = "p"
if m_fermion < 0.0:
m_sign = "m"
if (Lz == 1):
id = "%d_%d_%s%d" % (L, round(beta * 1000), m_sign, round(abs(m_fermion) * 1000))
path = "../jobs/2D/%s" % id
else:
id = "%d_%d_%d_%d_%s%d" % (L, Lz, round(beta * 1000), round(eps3 * 1000), \
m_sign, round(abs(m_fermion) * 1000))
path = "../jobs/3D/%s" % id
print("id: %s" % (id))
first_id = int(sys.argv[6])
print("first_id: %d" % (first_id))
last_id = int(sys.argv[7])
print("last_id: %d" % (last_id))
id_inc = int(sys.argv[8])
print("id_inc: %d" % (id_inc))
t_max = float(sys.argv[9])
print("t_max: %f" % (t_max))
dt = float(sys.argv[10])
print("dt: %f" % (dt))
# r_min = int(sys.argv[11]) - 2
# print("r_min: %d" % (r_min + 2))
#
# r_max = int(sys.argv[12]) - 2
# print("r_max: %d" % (r_max + 2))
# maximum wilson loop size
loopMax = L / 2
# number of wilson loop values
nLoops = loopMax * 3 - 2
# number of trajectories
n_traj = (last_id - first_id) / id_inc + 1
print("n_traj = %d" % (n_traj))
# number of wilson flow values
n_wf = int(t_max / dt + 1)
# first index is wilson flow time
# second index is wilson loop size
# third index is trajectory id
wilsonLoops = np.zeros((n_wf, nLoops, n_traj))
def readWilsonLoops(file, n):
lines = file.readlines()
for (l, line) in enumerate(lines):
tokens = line.split()
for (t, token) in enumerate(tokens):
if (t == 0):
continue
wilsonLoops[l, t - 1, n] = float(token)
return
print("\nReading data files...")
nWilsonLoops = 0; # number of field strength values
for i in range(first_id, last_id + 1, id_inc):
file = open("%s/wf/wilson_loops.%d" % (path, i), "r")
readWilsonLoops(file, nWilsonLoops)
nWilsonLoops += 1
file.close()
# write wilson loop statistics to data file
file = open("%s/wilson_loops.dat" % (path), "w")
for i in range(0, n_wf):
file.write("%.3f" % (i * dt))
for w in range(0, nLoops):
value = jackknifeMean(wilsonLoops[i,w,:])
tau = autocorrTime(wilsonLoops[i,w,:])
file.write(" %.12e %.12e %.12e" % (value[0], value[1], tau))
file.write("\n")
file.close();
print("\nCalculating Creutz Ratios (smeared/raw)...")
# wilson loops are in this order
# (1,1), (1,2)
# (2,1), (2,2), (2,3)
# (3,2), (3,3), (3,4) ...
# write creutz ratios to file
file = open("%s/creutz.dat" % (path), "w")
for i in range(0, n_wf):
file.write("%.3f" % (i * dt))
for r in range(0, loopMax - 1):
# measure smeared (with wilson flow)
w00 = wilsonLoops[i, r * 3]
w01 = wilsonLoops[i, r * 3 + 1]
w10 = wilsonLoops[i, r * 3 + 2]
w11 = wilsonLoops[i, r * 3 + 3]
value = jackknifeCreutz(w00, w01, w10, w11)
file.write(" %.12e %.12e" % (value[0], value[1]))
file.write("\n")
file.close();
|
the-stack_0_19506
|
from emoji import emojize
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode
from foodshare.bdd.database_communication import get_user_from_chat_id
from .digit_list import digit_buttons
# Hour keyboard
user_buttons = digit_buttons.copy()
user_buttons.append(
[
InlineKeyboardButton(emojize(':keycap_0: '), callback_data=str(0)),
InlineKeyboardButton(
emojize(':left_arrow:'), callback_data='left_arrow'
),
]
)
user_buttons.append(
[
InlineKeyboardButton(
emojize(':reverse_button:'), callback_data='backward_page'
),
InlineKeyboardButton(
emojize(':play_button:'), callback_data='forward_page'
),
]
)
user_buttons.append(
[InlineKeyboardButton(emojize('Back'), callback_data='back')]
)
user_keyboard = InlineKeyboardMarkup(user_buttons)
confirm_buttons = user_buttons.copy()
confirm_buttons.append(
[InlineKeyboardButton('Confirm', callback_data='confirm')]
)
confirm_keyboard = InlineKeyboardMarkup(confirm_buttons)
pos = [0, 5, 11, 17]
def process_user_selection(update, context):
ud = context.user_data
money = ud['money_or_meal']
callback_data = update.callback_query.data
chat_id = update.effective_chat.id
user = get_user_from_chat_id(chat_id)
members = [
member for member in user.community.members if member is not user
]
# initialize some variables in `context.user_data` when the keyboard is
# first called
if '_number' not in ud:
number = ''
ud['_number'] = ''
else:
number = ud.get('_number')
if '_page' not in ud:
page = 0
ud['_page'] = 0
else:
page = ud.get('_page')
# process the keyboard callback data
if callback_data == 'forward_page':
page += 1
elif callback_data == 'backward_page':
page -= 1
elif callback_data == 'back':
ud.pop('_number')
ud.pop('_page')
return False, True, -1
elif callback_data == 'confirm':
user_chosed = members[int(number) - 1]
ud.pop('_number')
ud.pop('_page')
return True, False, user_chosed
else:
number = callback_data
# store number for next callback
ud['_number'] = number
ud['_page'] = page
if number == '':
message = 'Please select a user to make the transaction with'
else:
user_chosed = members[int(number) - 1]
message = (
f'You chosed *'
f' {user_chosed.name}.* \n Press '
f'confirm to continue. \n'
)
# choose keyboard with a confirm button if a valid number was selected
keyboard = construct_keyboard(user, money, number != '', page)
update.callback_query.edit_message_text(
text=message, reply_markup=keyboard, parse_mode=ParseMode.MARKDOWN,
)
return False, False, -1
def construct_keyboard(user, money, confirm, page=0, number_per_page=5):
members = [
member for member in user.community.members if member is not user
]
number_of_pages = len(members) // number_per_page + (
len(members) % number_per_page > 0
)
page = page % (number_of_pages)
members_to_show = members[
number_per_page * page : number_per_page * page + number_per_page
]
i = page + 1
buttons = []
for j, member in enumerate(members_to_show):
balance = (
str(member.money_balance) + '€'
if money
else str(member.meal_balance) + ' meals'
)
member_message = f'' f'{member.name}' f', balance :' f' {balance}'
buttons.append(
[InlineKeyboardButton(member_message, callback_data=str(i + j))]
)
buttons.append(
[
InlineKeyboardButton(
emojize(':reverse_button:'), callback_data='backward_page'
),
InlineKeyboardButton(emojize('Back'), callback_data='back'),
InlineKeyboardButton(
emojize(':play_button:'), callback_data='forward_page'
),
]
)
if confirm:
buttons.append(
[InlineKeyboardButton('Confirm', callback_data='confirm')]
)
return InlineKeyboardMarkup(buttons)
|
the-stack_0_19509
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import math
import warnings
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from tlz import partition_all
import dask
import dask.dataframe.optimize
from dask import dataframe as dd
from dask.base import normalize_token, tokenize
from dask.dataframe.core import (
Scalar,
finalize,
handle_out,
make_meta as dask_make_meta,
map_partitions,
)
from dask.dataframe.utils import raise_on_meta_error
from dask.highlevelgraph import HighLevelGraph
from dask.utils import M, OperatorMethodMixin, apply, derived_from, funcname
import cudf
from cudf import _lib as libcudf
from dask_cudf import sorting
from dask_cudf.accessors import ListMethods
DASK_VERSION = LooseVersion(dask.__version__)
class _Frame(dd.core._Frame, OperatorMethodMixin):
""" Superclass for DataFrame and Series
Parameters
----------
dsk : dict
The dask graph to compute this DataFrame
name : str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta : cudf.DataFrame, cudf.Series, or cudf.Index
An empty cudf object with names, dtypes, and indices matching the
expected output.
divisions : tuple of index values
Values along which we partition our blocks on the index
"""
__dask_scheduler__ = staticmethod(dask.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisions)
def __init__(self, dsk, name, meta, divisions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = dask_make_meta(meta)
if not isinstance(meta, self._partition_type):
raise TypeError(
f"Expected meta to specify type "
f"{self._partition_type.__name__}, got type "
f"{type(meta).__name__}"
)
self._meta = meta
self.divisions = tuple(divisions)
def __getstate__(self):
return (self.dask, self._name, self._meta, self.divisions)
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def __repr__(self):
s = "<dask_cudf.%s | %d tasks | %d npartitions>"
return s % (type(self).__name__, len(self.dask), self.npartitions)
def to_dask_dataframe(self, **kwargs):
"""Create a dask.dataframe object from a dask_cudf object"""
nullable_pd_dtype = kwargs.get("nullable_pd_dtype", False)
return self.map_partitions(
M.to_pandas, nullable_pd_dtype=nullable_pd_dtype
)
concat = dd.concat
normalize_token.register(_Frame, lambda a: a._name)
class DataFrame(_Frame, dd.core.DataFrame):
_partition_type = cudf.DataFrame
def _assign_column(self, k, v):
def assigner(df, k, v):
out = df.copy()
out[k] = v
return out
meta = assigner(self._meta, k, dask_make_meta(v))
return self.map_partitions(assigner, k, v, meta=meta)
def apply_rows(self, func, incols, outcols, kwargs=None, cache_key=None):
import uuid
if kwargs is None:
kwargs = {}
if cache_key is None:
cache_key = uuid.uuid4()
def do_apply_rows(df, func, incols, outcols, kwargs):
return df.apply_rows(
func, incols, outcols, kwargs, cache_key=cache_key
)
meta = do_apply_rows(self._meta, func, incols, outcols, kwargs)
return self.map_partitions(
do_apply_rows, func, incols, outcols, kwargs, meta=meta
)
def merge(self, other, **kwargs):
if kwargs.pop("shuffle", "tasks") != "tasks":
raise ValueError(
"Dask-cudf only supports task based shuffling, got %s"
% kwargs["shuffle"]
)
on = kwargs.pop("on", None)
if isinstance(on, tuple):
on = list(on)
return super().merge(other, on=on, shuffle="tasks", **kwargs)
def join(self, other, **kwargs):
if kwargs.pop("shuffle", "tasks") != "tasks":
raise ValueError(
"Dask-cudf only supports task based shuffling, got %s"
% kwargs["shuffle"]
)
# CuDF doesn't support "right" join yet
how = kwargs.pop("how", "left")
if how == "right":
return other.join(other=self, how="left", **kwargs)
on = kwargs.pop("on", None)
if isinstance(on, tuple):
on = list(on)
return super().join(other, how=how, on=on, shuffle="tasks", **kwargs)
def set_index(self, other, sorted=False, divisions=None, **kwargs):
if kwargs.pop("shuffle", "tasks") != "tasks":
raise ValueError(
"Dask-cudf only supports task based shuffling, got %s"
% kwargs["shuffle"]
)
pre_sorted = sorted
del sorted
if (
divisions == "quantile"
or isinstance(divisions, (cudf.DataFrame, cudf.Series))
or (
isinstance(other, str)
and cudf.utils.dtypes.is_string_dtype(self[other].dtype)
)
):
# Let upstream-dask handle "pre-sorted" case
if pre_sorted:
return dd.shuffle.set_sorted_index(
self, other, divisions=divisions, **kwargs
)
by = other
if not isinstance(other, list):
by = [by]
if len(by) > 1:
raise ValueError("Dask does not support MultiIndex (yet).")
if divisions == "quantile":
divisions = None
# Use dask_cudf's sort_values
# TODO: Handle `sorted=True`
df = self.sort_values(
by,
max_branch=kwargs.get("max_branch", None),
divisions=divisions,
set_divisions=True,
ignore_index=True,
)
# Ignore divisions if its a dataframe
if isinstance(divisions, cudf.DataFrame):
divisions = None
# Set index and repartition
df2 = df.map_partitions(
sorting.set_index_post,
index_name=other,
drop=kwargs.get("drop", True),
column_dtype=df.columns.dtype,
)
npartitions = kwargs.get("npartitions", self.npartitions)
partition_size = kwargs.get("partition_size", None)
if partition_size:
return df2.repartition(partition_size=partition_size)
if not divisions and df2.npartitions != npartitions:
return df2.repartition(npartitions=npartitions)
if divisions and df2.npartitions != len(divisions) - 1:
return df2.repartition(divisions=divisions)
return df2
return super().set_index(
other,
sorted=pre_sorted,
shuffle="tasks",
divisions=divisions,
**kwargs,
)
def sort_values(
self,
by,
ignore_index=False,
max_branch=None,
divisions=None,
set_divisions=False,
**kwargs,
):
if kwargs:
raise ValueError(
f"Unsupported input arguments passed : {list(kwargs.keys())}"
)
if self.npartitions == 1:
df = self.map_partitions(M.sort_values, by)
else:
df = sorting.sort_values(
self,
by,
max_branch=max_branch,
divisions=divisions,
set_divisions=set_divisions,
ignore_index=ignore_index,
)
if ignore_index:
return df.reset_index(drop=True)
return df
def to_parquet(self, path, *args, **kwargs):
""" Calls dask.dataframe.io.to_parquet with CudfEngine backend """
from dask_cudf.io import to_parquet
return to_parquet(self, path, *args, **kwargs)
def to_orc(self, path, **kwargs):
""" Calls dask_cudf.io.to_orc """
from dask_cudf.io import to_orc
return to_orc(self, path, **kwargs)
@derived_from(pd.DataFrame)
def var(
self,
axis=None,
skipna=True,
ddof=1,
split_every=False,
dtype=None,
out=None,
naive=False,
):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.var,
self,
meta=meta,
token=self._token_prefix + "var",
axis=axis,
skipna=skipna,
ddof=ddof,
)
return handle_out(out, result)
elif naive:
return _naive_var(self, meta, skipna, ddof, split_every, out)
else:
return _parallel_var(self, meta, skipna, split_every, out)
def repartition(self, *args, **kwargs):
""" Wraps dask.dataframe DataFrame.repartition method.
Uses DataFrame.shuffle if `columns=` is specified.
"""
# TODO: Remove this function in future(0.17 release)
columns = kwargs.pop("columns", None)
if columns:
warnings.warn(
"The column argument will be removed from repartition in "
" future versions of dask_cudf. Use DataFrame.shuffle().",
DeprecationWarning,
)
warnings.warn(
"Rearranging data by column hash. Divisions will lost. "
"Set ignore_index=False to preserve Index values."
)
ignore_index = kwargs.pop("ignore_index", True)
return self.shuffle(
on=columns, ignore_index=ignore_index, **kwargs
)
return super().repartition(*args, **kwargs)
def shuffle(self, *args, **kwargs):
""" Wraps dask.dataframe DataFrame.shuffle method
"""
shuffle_arg = kwargs.pop("shuffle", None)
if shuffle_arg and shuffle_arg != "tasks":
raise ValueError("dask_cudf does not support disk-based shuffle.")
return super().shuffle(*args, shuffle="tasks", **kwargs)
def groupby(self, by=None, **kwargs):
from .groupby import CudfDataFrameGroupBy
return CudfDataFrameGroupBy(self, by=by, **kwargs)
def sum_of_squares(x):
x = x.astype("f8")._column
outcol = libcudf.reduce.reduce("sum_of_squares", x)
return cudf.Series(outcol)
def var_aggregate(x2, x, n, ddof):
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
result = (x2 / n) - (x / n) ** 2
if ddof != 0:
result = result * n / (n - ddof)
return result
except ZeroDivisionError:
return np.float64(np.nan)
def nlargest_agg(x, **kwargs):
return cudf.concat(x).nlargest(**kwargs)
def nsmallest_agg(x, **kwargs):
return cudf.concat(x).nsmallest(**kwargs)
class Series(_Frame, dd.core.Series):
_partition_type = cudf.Series
def count(self, split_every=False):
return reduction(
[self],
chunk=M.count,
aggregate=np.sum,
split_every=split_every,
meta="i8",
)
def mean(self, split_every=False):
sum = self.sum(split_every=split_every)
n = self.count(split_every=split_every)
return sum / n
@derived_from(pd.DataFrame)
def var(
self,
axis=None,
skipna=True,
ddof=1,
split_every=False,
dtype=None,
out=None,
naive=False,
):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.var,
self,
meta=meta,
token=self._token_prefix + "var",
axis=axis,
skipna=skipna,
ddof=ddof,
)
return handle_out(out, result)
elif naive:
return _naive_var(self, meta, skipna, ddof, split_every, out)
else:
return _parallel_var(self, meta, skipna, split_every, out)
def groupby(self, *args, **kwargs):
from .groupby import CudfSeriesGroupBy
return CudfSeriesGroupBy(self, *args, **kwargs)
@property
def list(self):
return ListMethods(self)
class Index(Series, dd.core.Index):
_partition_type = cudf.Index
def _naive_var(ddf, meta, skipna, ddof, split_every, out):
num = ddf._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = ddf._token_prefix + "var"
result = map_partitions(
var_aggregate, x2, x, n, token=name, meta=meta, ddof=ddof
)
if isinstance(ddf, DataFrame):
result.divisions = (min(ddf.columns), max(ddf.columns))
return handle_out(out, result)
def _parallel_var(ddf, meta, skipna, split_every, out):
def _local_var(x, skipna):
if skipna:
n = x.count(skipna=skipna)
avg = x.mean(skipna=skipna)
else:
# Not skipping nulls, so might as well
# avoid the full `count` operation
n = len(x)
avg = x.sum(skipna=skipna) / n
m2 = ((x - avg) ** 2).sum(skipna=skipna)
return n, avg, m2
def _aggregate_var(parts):
n, avg, m2 = parts[0]
for i in range(1, len(parts)):
n_a, avg_a, m2_a = n, avg, m2
n_b, avg_b, m2_b = parts[i]
n = n_a + n_b
avg = (n_a * avg_a + n_b * avg_b) / n
delta = avg_b - avg_a
m2 = m2_a + m2_b + delta ** 2 * n_a * n_b / n
return n, avg, m2
def _finalize_var(vals):
n, _, m2 = vals
return m2 / (n - 1)
# Build graph
nparts = ddf.npartitions
if not split_every:
split_every = nparts
name = "var-" + tokenize(skipna, split_every, out)
local_name = "local-" + name
num = ddf._get_numeric_data()
dsk = {
(local_name, n, 0): (_local_var, (num._name, n), skipna)
for n in range(nparts)
}
# Use reduction tree
widths = [nparts]
while nparts > 1:
nparts = math.ceil(nparts / split_every)
widths.append(nparts)
height = len(widths)
for depth in range(1, height):
for group in range(widths[depth]):
p_max = widths[depth - 1]
lstart = split_every * group
lstop = min(lstart + split_every, p_max)
node_list = [
(local_name, p, depth - 1) for p in range(lstart, lstop)
]
dsk[(local_name, group, depth)] = (_aggregate_var, node_list)
if height == 1:
group = depth = 0
dsk[(name, 0)] = (_finalize_var, (local_name, group, depth))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[num, ddf])
result = dd.core.new_dd_object(graph, name, meta, (None, None))
if isinstance(ddf, DataFrame):
result.divisions = (min(ddf.columns), max(ddf.columns))
return handle_out(out, result)
def _extract_meta(x):
"""
Extract internal cache data (``_meta``) from dask_cudf objects
"""
if isinstance(x, (Scalar, _Frame)):
return x._meta
elif isinstance(x, list):
return [_extract_meta(_x) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x) for _x in x])
elif isinstance(x, dict):
return {k: _extract_meta(v) for k, v in x.items()}
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args), **_extract_meta(kwargs))
def align_partitions(args):
"""Align partitions between dask_cudf objects.
Note that if all divisions are unknown, but have equal npartitions, then
they will be passed through unchanged."""
dfs = [df for df in args if isinstance(df, _Frame)]
if not dfs:
return args
divisions = dfs[0].divisions
if not all(df.divisions == divisions for df in dfs):
raise NotImplementedError("Aligning mismatched partitions")
return args
def reduction(
args,
chunk=None,
aggregate=None,
combine=None,
meta=None,
token=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
split_every=None,
**kwargs,
):
"""Generic tree reduction operation.
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function list-of-blocks -> block
Function to operate on the list of results of chunk
combine : function list-of-blocks -> block, optional
Function to operate on intermediate lists of results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(
arg.npartitions for arg in args if isinstance(arg, _Frame)
)
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(
token or (chunk, aggregate),
meta,
args,
chunk_kwargs,
aggregate_kwargs,
combine_kwargs,
split_every,
)
# Chunk
a = "{0}-chunk-{1}".format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {
(a, 0, i): (chunk, key)
for i, key in enumerate(args[0].__dask_keys__())
}
else:
dsk = {
(a, 0, i): (
apply,
chunk,
[(x._name, i) if isinstance(x, _Frame) else x for x in args],
chunk_kwargs,
)
for i in range(args[0].npartitions)
}
# Combine
b = "{0}-combine-{1}".format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
conc = (list, [(a, depth, i) for i in inds])
dsk[(b, depth + 1, part_i)] = (
(apply, combine, [conc], combine_kwargs)
if combine_kwargs
else (combine, conc)
)
k = part_i + 1
a = b
depth += 1
# Aggregate
b = "{0}-agg-{1}".format(token or funcname(aggregate), token_key)
conc = (list, [(a, depth, i) for i in range(k)])
if aggregate_kwargs:
dsk[(b, 0)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, 0)] = (aggregate, conc)
if meta is None:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [[meta_chunk]], aggregate_kwargs)
meta = dask_make_meta(meta)
graph = HighLevelGraph.from_collections(b, dsk, dependencies=args)
return dd.core.new_dd_object(graph, b, meta, (None, None))
def from_cudf(data, npartitions=None, chunksize=None, sort=True, name=None):
if isinstance(getattr(data, "index", None), cudf.MultiIndex):
raise NotImplementedError(
"dask_cudf does not support MultiIndex Dataframes."
)
name = name or ("from_cudf-" + tokenize(data, npartitions or chunksize))
return dd.from_pandas(
data,
npartitions=npartitions,
chunksize=chunksize,
sort=sort,
name=name,
)
from_cudf.__doc__ = (
"Wraps main-line Dask from_pandas...\n" + dd.from_pandas.__doc__
)
def from_dask_dataframe(df):
return df.map_partitions(cudf.from_pandas)
for name in [
"add",
"sub",
"mul",
"truediv",
"floordiv",
"mod",
"pow",
"radd",
"rsub",
"rmul",
"rtruediv",
"rfloordiv",
"rmod",
"rpow",
]:
meth = getattr(cudf.DataFrame, name)
kwargs = {"original": cudf.DataFrame} if DASK_VERSION >= "2.11.1" else {}
DataFrame._bind_operator_method(name, meth, **kwargs)
meth = getattr(cudf.Series, name)
kwargs = {"original": cudf.Series} if DASK_VERSION >= "2.11.1" else {}
Series._bind_operator_method(name, meth, **kwargs)
for name in ["lt", "gt", "le", "ge", "ne", "eq"]:
meth = getattr(cudf.Series, name)
kwargs = {"original": cudf.Series} if DASK_VERSION >= "2.11.1" else {}
Series._bind_comparison_method(name, meth, **kwargs)
|
the-stack_0_19510
|
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for third_party.py.mesh_tensorflow.experimental.input_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
import mesh_tensorflow.experimental.input_reader as input_reader
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf.tpu import topology_pb2
from tensorflow.python.tpu import device_assignment
from tensorflow.python.tpu import tpu
class MtfInputReaderTest(parameterized.TestCase, tf.test.TestCase):
def initialize_system(self, sess):
"""Run tpu.initialize_system and return the number of TPU devices."""
topology_object = topology_pb2.TopologyProto()
topology = sess.run(tf.tpu.initialize_system())
topology_object.ParseFromString(topology)
num_cores = topology_object.num_tasks * (
topology_object.num_tpu_devices_per_task)
return topology, num_cores
@parameterized.parameters((True,), (False,))
def test_get_laidout_tensors(self, is_eval_mode):
mesh_shape = "mesh_x:2, mesh_y:1"
layout = "batch:mesh_x, io:mesh_y"
batch_io_dim = 4
with tf.Session() as sess:
topology, num_cores = self.initialize_system(sess)
# Get a device_assignment object for mtf.
d_assignment = device_assignment.device_assignment(
topology,
computation_shape=[1,] * mtf.utils.topology_rank(topology),
num_replicas=num_cores)
# Hacked dataset creator: creates different datasets for the first and
# second call, in order to test SimdMeshImplInputReader.
self.sub_batch_created_times = 0
def stateful_ds_creator():
whole_batch = tf.eye(batch_io_dim, dtype=tf.float32)
sub_batch = tf.slice(whole_batch,
[self.sub_batch_created_times * 2, 0],
[2, 4])
self.sub_batch_created_times += 1
return tf.data.Dataset.from_tensors(sub_batch).repeat().unbatch()
batch_dim = mtf.Dimension("batch", batch_io_dim)
io_dim = mtf.Dimension("io", batch_io_dim)
mtf_input_shapes = [mtf.Shape([batch_dim, io_dim])]
# Get mesh_impl.
mesh_shape = mtf.convert_to_shape(mesh_shape)
layout_rules = mtf.convert_to_layout_rules(layout)
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, None, d_assignment)
simd_input_reader = input_reader.SimdMeshImplInputReader(
mesh_impl, stateful_ds_creator, mtf_input_shapes,
external_worker=False,
is_eval_mode=is_eval_mode)
def model_fn(features):
return features
replicated_computation = tpu.replicate(
computation=model_fn,
inputs=[[]] * num_cores,
infeed_queue=simd_input_reader.infeed_queue,
device_assignment=d_assignment)
simd_input_reader.start_infeed_thread(sess, 1)
results = sess.run(replicated_computation)
print("results: {}".format(results))
core_0_data = results[0][0]
core_1_data = results[1][0]
print("core_0_data: {}".format(core_0_data))
print("core_1_data: {}".format(core_1_data))
if is_eval_mode:
# If there is only one dataset object, then the stateful_ds_creator()
# should be called only once.
self.assertAllClose(
np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32),
core_0_data)
self.assertAllClose(
np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32),
core_1_data)
else:
# If there are two dataset objects, then the stateful_ds_creator()
# should be called twice.
self.assertAllClose(
np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32),
core_0_data)
self.assertAllClose(
np.array([[0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32),
core_1_data)
sess.run(tf.tpu.shutdown_system())
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_19512
|
"""santosh_plat_5499_m_2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "santosh-plat-5499-m"
admin.site.site_title = "santosh-plat-5499-m Admin Portal"
admin.site.index_title = "santosh-plat-5499-m Admin"
# swagger
api_info = openapi.Info(
title="santosh-plat-5499-m API",
default_version="v1",
description="API documentation for santosh-plat-5499-m App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
the-stack_0_19515
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from itertools import cycle
from random import choice
author = 'Santiago Sequeda & Mayra Riascos & Edgar Rangel'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'malversacion'
players_per_group = 5
num_rounds = 10
dotacion = c(3000)
multiplicador = 2
class Subsession(BaseSubsession):
def creating_session(self):
tipos_grupo = cycle([1,2,3])
for grupo in self.get_groups():
grupo.id_grupo = next(tipos_grupo)
class Group(BaseGroup):
id_grupo = models.IntegerField(doc="""Identificador del tipo de grupo de los integrantes.
1 - Presidente al azar
2 - Presidente por competencia
3 - Presidente por votacion""")
orden_llegada = models.StringField(doc="""Sera un array de letras que contendra el orden de
llegada de los jugadores en las diferentes rondas:
Ej: 'id_jugador_xid_jugador_y' o '231...' """)
BolsaPublica = models.CurrencyField(min=0,max=Constants.dotacion)
CuentaPrivadaPresidente = models.CurrencyField(min=0,max=Constants.dotacion)
contador = models.IntegerField()
def inicializar_orden_llegada(self):
self.orden_llegada = ""
def contador_jugadores(self):
contador = 5
for id in self.orden_llegada:
if int(id) in [1,2,3,4,5]:
contador = contador - 1
return contador
def get_jugadores_aceptaron(self):
jugadores = []
for j in self.get_players():
if (j.in_round(1).consentimiento):
jugadores.append(j)
return jugadores
def set_presidente(self,presidente):
presidente.es_presidente = True
for otros in presidente.get_others_in_group():
otros.es_presidente = False
def set_Presidente_Azar(self):
jugadores = self.get_jugadores_aceptaron()
presidente = choice(jugadores)
self.set_presidente(presidente)
def set_presidente_competencia(self):
jugadores = self.get_jugadores_aceptaron()
puntajes = [j.puntaje for j in jugadores]
for jugador in jugadores:
if (jugador.puntaje == max(puntajes)):
presidente = jugador
self.set_presidente(presidente)
def agregar_jugador(self, jugador):
extra = self.contador_jugadores()
jugador.puntaje = jugador.puntaje + extra
self.orden_llegada = self.orden_llegada + str(jugador.id_in_group)
def set_presidente_votacion(self):
jugadores = self.get_jugadores_aceptaron()
votos = [p.voto for p in jugadores]
contador = 0
for i in jugadores:
if votos.count( 'Jugador ' + str( i.id_in_group)) >= int(len(jugadores)/2) +1 :
presidente = i
break
else:
contador = contador + 1
if contador == len(jugadores):
return False
else:
self.set_presidente(presidente)
return True
def set_presidente_votacion_azar(self):
jugadores = self.get_jugadores_aceptaron()
votos = [p.voto for p in jugadores]
numero_votos = [votos.count('Jugador ' + str(j.id_in_group)) for j in jugadores]
posibles_presidentes =[]
for i,cantidad in enumerate(numero_votos):
if cantidad == max(numero_votos):
posibles_presidentes.append(i+1)
id_presidente = choice(posibles_presidentes)
presidente = self.get_player_by_id(id_presidente)
self.set_presidente(presidente)
def calcularGananciasJugadores(self):
jugadores = self.get_jugadores_aceptaron()
rentabilidad = (self.BolsaPublica * Constants.multiplicador)/len(jugadores)
for j in jugadores:
if j.es_presidente == True:
j.cuenta = rentabilidad + self.CuentaPrivadaPresidente
else:
j.cuenta = rentabilidad
j.payoff = j.cuenta
class Player(BasePlayer):
propuesta = models.LongStringField(max_length=140)
cuenta = models.CurrencyField()
es_presidente = models.BooleanField()
puntaje = models.IntegerField()
voto = models.StringField()
opinion = models.BooleanField(choices=[[True, 'Si' ], [False, 'No']])
nombre= models.StringField()
celular= models.IntegerField()
correo= models.StringField()
genero = models.StringField(choices=['Femenino','Masculino'])
edad = models.IntegerField()
semestre = models.IntegerField()
participacion = models.BooleanField(choices=[[True, 'Si' ], [False, 'No']])
estudiante = models.BooleanField(choices=[[True, 'Si' ], [False, 'No']])
carrera= models.StringField(blank=True)
universidad= models.StringField(blank=True)
consentimiento = models.BooleanField(choices=[[True, 'Si autorizo'], [False, 'No autorizo']])
profesion = models.StringField(blank=True)
|
the-stack_0_19516
|
import os
import numpy as np
import pybullet as p
from .robot import Robot
class Panda(Robot):
def __init__(self, controllable_joints='right'):
right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints
left_arm_joint_indices = right_arm_joint_indices # Controllable arm joints
wheel_joint_indices = []
right_end_effector = 11 # Used to get the pose of the end effector
left_end_effector = right_end_effector # Used to get the pose of the end effector
right_gripper_indices = [9, 10] # Gripper actuated joints
left_gripper_indices = right_gripper_indices # Gripper actuated joints
right_tool_joint = 11 # Joint that tools are attached to
left_tool_joint = right_tool_joint # Joint that tools are attached to
right_gripper_collision_indices = [7, 8, 9, 10, 11] # Used to disable collision between gripper and tools
left_gripper_collision_indices = right_gripper_collision_indices # Used to disable collision between gripper and tools
gripper_pos = {'scratch_itch': [0.02]*2, # Gripper open position for holding tools, in [0, 0.04]
'feeding': [0.001]*2,
'drinking': [0.035]*2,
'bed_bathing': [0.02]*2,
'dressing': [0.001]*2,
'arm_manipulation': [0.02]*2}
tool_pos_offset = {'scratch_itch': [0, 0, 0], # Position offset between tool and robot tool joint
'feeding': [-0.11, 0.0175, 0],
'drinking': [0.05, 0, 0.01],
'bed_bathing': [0, 0, 0],
'arm_manipulation': [0.075, 0, 0.12]}
tool_orient_offset = {'scratch_itch': [0, -np.pi/2.0, 0], # RPY orientation offset between tool and robot tool joint
'feeding': [-0.1, -np.pi/2.0, np.pi],
'drinking': [0, -np.pi/2.0, np.pi/2.0],
'bed_bathing': [0, -np.pi/2.0, 0],
'arm_manipulation': [np.pi/2.0, -np.pi/2.0, 0]}
pos = [-0.4, -0.35, 0.2]
toc_base_pos_offset = {'scratch_itch': pos, # Robot base offset before TOC base pose optimization
'feeding': pos,
'drinking': pos,
'bed_bathing': [-0.05, 1.05, 0.67],
'dressing': [0.35, -0.35, 0.2],
'arm_manipulation': [-0.25, 1.15, 0.67]}
toc_ee_orient_rpy = {'scratch_itch': [0, np.pi/2.0, 0], # Initial end effector orientation
'feeding': [-np.pi/2.0, 0, -np.pi/2.0],
'drinking': [0, np.pi/2.0, 0],
'bed_bathing': [0, np.pi/2.0, 0],
'dressing': [[0, -np.pi/2.0, 0]],
'arm_manipulation': [0, np.pi/2.0, 0]}
wheelchair_mounted = True
super(Panda, self).__init__(controllable_joints, right_arm_joint_indices, left_arm_joint_indices, wheel_joint_indices, right_end_effector, left_end_effector, right_gripper_indices, left_gripper_indices, gripper_pos, right_tool_joint, left_tool_joint, tool_pos_offset, tool_orient_offset, right_gripper_collision_indices, left_gripper_collision_indices, toc_base_pos_offset, toc_ee_orient_rpy, wheelchair_mounted, half_range=False)
def init(self, directory, id, np_random, fixed_base=True):
self.body = p.loadURDF(os.path.join(directory, 'panda', 'panda.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)
super(Panda, self).init(self.body, id, np_random)
|
the-stack_0_19517
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import numpy as np
import tensorflow.compat.v1 as tf
from absl import app
from absl import flags
from cta.cta_remixmatch import CTAReMixMatch
from libml import data, utils
FLAGS = flags.FLAGS
class AB_FixMatch_Momentum(CTAReMixMatch):
def model(self, batch, lr, wd, wu, confidence, uratio, use_nesterov, momentum, ema=0.999, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # Training labeled
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x') # Eval images
y_in = tf.placeholder(tf.float32, [batch * uratio, 2] + hwc, 'y') # Training unlabeled (weak, strong)
l_in = tf.placeholder(tf.int32, [batch], 'labels') # Labels
lrate = tf.clip_by_value(tf.to_float(self.step) / (FLAGS.train_kimg << 10), 0, 1)
lr *= tf.cos(lrate * (7 * np.pi) / (2 * 8))
tf.summary.scalar('monitors/lr', lr)
# Compute logits for xt_in and y_in
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
skip_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
x = utils.interleave(tf.concat([xt_in, y_in[:, 0], y_in[:, 1]], 0), 2 * uratio + 1)
logits = utils.para_cat(lambda x: classifier(x, training=True), x)
logits = utils.de_interleave(logits, 2 * uratio+1)
post_ops = [v for v in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if v not in skip_ops]
logits_x = logits[:batch]
logits_weak, logits_strong = tf.split(logits[batch:], 2)
del logits, skip_ops
# Labeled cross-entropy
loss_xe = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=l_in, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
tf.summary.scalar('losses/xe', loss_xe)
# Pseudo-label cross entropy for unlabeled data
pseudo_labels = tf.stop_gradient(tf.nn.softmax(logits_weak))
loss_xeu = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(pseudo_labels, axis=1),
logits=logits_strong)
pseudo_mask = tf.to_float(tf.reduce_max(pseudo_labels, axis=1) >= confidence)
tf.summary.scalar('monitors/mask', tf.reduce_mean(pseudo_mask))
loss_xeu = tf.reduce_mean(loss_xeu * pseudo_mask)
tf.summary.scalar('losses/xeu', loss_xeu)
# L2 regularization
loss_wd = sum(tf.nn.l2_loss(v) for v in utils.model_vars('classify') if 'kernel' in v.name)
tf.summary.scalar('losses/wd', loss_wd)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
train_op = tf.train.MomentumOptimizer(lr, momentum, use_nesterov=use_nesterov).minimize(
loss_xe + wu * loss_xeu + wd * loss_wd, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return utils.EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.PAIR_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = AB_FixMatch_Momentum(
os.path.join(FLAGS.train_dir, dataset.name, AB_FixMatch_Momentum.cta_name()),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
uratio=FLAGS.uratio,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat,
use_nesterov=FLAGS.nesterov,
momentum=FLAGS.momentum)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('confidence', 0.95, 'Confidence threshold.')
flags.DEFINE_float('wd', 0.0005, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Pseudo label loss weight.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('uratio', 7, 'Unlabeled batch size ratio.')
flags.DEFINE_boolean('nesterov', True, 'Use Nesterov in the optimizer or not')
flags.DEFINE_float('momentum', 0.9, 'Momentum of SGD optimizer')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-1')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.03)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
the-stack_0_19521
|
# -*- coding: utf-8 -*-
""" S3 Pivot Table Reports Method
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{Python 2.6}} <http://www.python.org>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Report",
"S3PivotTable",
"S3ReportRepresent",
)
import datetime
import json
import os
import re
import sys
from itertools import product
from gluon import current
from gluon.contenttype import contenttype
from gluon.html import BUTTON, DIV, FIELDSET, FORM, INPUT, LABEL, LEGEND, TAG, XML
from gluon.languages import regex_translate
from gluon.sqlhtml import OptionsWidget
from gluon.storage import Storage
from gluon.validators import IS_IN_SET, IS_EMPTY_OR
from s3compat import INTEGER_TYPES, basestring, xrange
from .s3query import FS
from .s3rest import S3Method
from .s3utils import s3_flatlist, s3_has_foreign_key, s3_str, S3MarkupStripper, s3_represent_value
from .s3xml import S3XMLFormat
from .s3validators import IS_NUMBER, JSONERRORS
# Compact JSON encoding
DEFAULT = lambda: None
SEPARATORS = (",", ":")
LAYER = re.compile(r"([a-zA-Z]+)\((.*)\)\Z")
FACT = re.compile(r"([a-zA-Z]+)\(([a-zA-Z0-9_.$:\,~]+)\),*(.*)\Z")
SELECTOR = re.compile(r"^[a-zA-Z0-9_.$:\~]+\Z")
# =============================================================================
class S3Report(S3Method):
""" RESTful method for pivot table reports """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Page-render entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.http == "GET":
if r.representation == "geojson":
output = self.geojson(r, **attr)
else:
output = self.report(r, **attr)
elif r.http == "POST":
if r.representation == "json":
# NB can additionally check for ?explore=1 to
# distinguish from other POSTs (if necessary)
output = self.explore(r, **attr)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def report(self, r, **attr):
"""
Pivot table report page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = {}
resource = self.resource
get_config = resource.get_config
show_filter_form = False
if r.representation in ("html", "iframe"):
filter_widgets = get_config("filter_widgets", None)
if filter_widgets and not self.hide_filter:
# Apply filter defaults (before rendering the data!)
from .s3filter import S3FilterForm
show_filter_form = True
S3FilterForm.apply_filter_defaults(r, resource)
widget_id = "pivottable"
# @todo: make configurable:
maxrows = 20
maxcols = 20
# Extract the relevant GET vars
report_vars = ("rows", "cols", "fact", "totals")
get_vars = {k: v for k, v in r.get_vars.items() if k in report_vars}
# Fall back to report options defaults
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
if not any (k in get_vars for k in ("rows", "cols", "fact")):
get_vars = defaults
get_vars["chart"] = r.get_vars.get("chart",
defaults.get("chart", None))
get_vars["table"] = r.get_vars.get("table",
defaults.get("table", None))
# Generate the pivot table
if get_vars:
rows = get_vars.get("rows", None)
if type(rows) is list:
rows = rows[-1]
cols = get_vars.get("cols", None)
if type(cols) is list:
cols = cols[-1]
layer = get_vars.get("fact", "id")
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
current.log.error(sys.exc_info()[1])
facts = None
if not facts or not any([rows, cols]):
pivottable = None
else:
prefix = resource.prefix_selector
get_vars["rows"] = prefix(rows) if rows else None
get_vars["cols"] = prefix(cols) if cols else None
get_vars["fact"] = ",".join("%s(%s)" % (fact.method, fact.selector) for fact in facts)
pivottable = S3PivotTable(resource, rows, cols, facts,
precision = report_options.get("precision"),
)
else:
pivottable = None
representation = r.representation
if representation in ("html", "iframe", "json"):
# Generate JSON-serializable dict
if pivottable is not None:
pivotdata = pivottable.json(maxrows=maxrows, maxcols=maxcols)
else:
pivotdata = None
if r.representation in ("html", "iframe"):
tablename = resource.tablename
# Filter widgets
if show_filter_form:
advanced = False
for widget in filter_widgets:
if not widget:
continue
if "hidden" in widget.opts and widget.opts.hidden:
advanced = resource.get_config("report_advanced", True)
break
filter_formstyle = get_config("filter_formstyle", None)
filter_form = S3FilterForm(filter_widgets,
formstyle = filter_formstyle,
advanced = advanced,
submit = False,
_class = "filter-form",
_id = "%s-filter-form" % widget_id,
)
fresource = current.s3db.resource(tablename)
alias = resource.alias if r.component else None
filter_widgets = filter_form.fields(fresource,
r.get_vars,
alias = alias,
)
else:
# Render as empty string to avoid the exception in the view
filter_widgets = None
# Generate the report form
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_url = r.url(method = "",
representation = "",
vars = ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)))
ajaxurl = attr.get("ajaxurl", r.url(method = "report",
representation = "json",
vars = ajax_vars,
))
output = S3ReportForm(resource).html(pivotdata,
get_vars = get_vars,
filter_widgets = filter_widgets,
ajaxurl = ajaxurl,
filter_url = filter_url,
widget_id = widget_id,
)
output["title"] = self.crud_string(tablename, "title_report")
output["report_type"] = "pivottable"
# Detect and store theme-specific inner layout
self._view(r, "pivottable.html")
# View
current.response.view = self._view(r, "report.html")
elif r.representation == "json":
output = json.dumps(pivotdata, separators=SEPARATORS)
elif r.representation == "xls":
if pivottable:
# Report title
title = self.crud_string(r.tablename, "title_report")
if title is None:
title = current.T("Report")
# TODO: include current date?
filename = "%s_%s.xls" % (r.env.server_name,
s3_str(title).replace(" ", "_"),
)
disposition = "attachment; filename=\"%s\"" % filename
# Response headers
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
# Convert pivot table to XLS
stream = pivottable.xls(title)
#stream.seek(0) # already done in encoder
output = stream.read()
else:
r.error(400, "No report parameters specified")
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def geojson(self, r, **attr):
"""
Render the pivot table data as a dict ready to be exported as
GeoJSON for display on a Map.
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
resource = self.resource
response = current.response
s3 = response.s3
# Set response headers
response.headers["Content-Type"] = s3.content_type.get("geojson",
"application/json")
if not resource.count():
# No Data
return json.dumps({})
# Extract the relevant GET vars
get_vars = r.get_vars
layer_id = r.get_vars.get("layer", None)
level = get_vars.get("level", "L0")
# Fall back to report options defaults
get_config = resource.get_config
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
# The rows dimension
context = get_config("context")
if context and "location" in context:
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired
rows = "(location)$%s" % level
else:
# Fallback to location_id
rows = "location_id$%s" % level
# Fallback we can add if-required
#rows = "site_id$location_id$%s" % level
# Filter out null values
resource.add_filter(FS(rows) != None)
# Set XSLT stylesheet
stylesheet = os.path.join(r.folder, r.XSLT_PATH, "geojson", "export.xsl")
# Do we have any data at this level of aggregation?
fallback_to_points = True # @ToDo: deployment_setting?
output = None
if fallback_to_points:
if resource.count() == 0:
# Show Points
resource.clear_query()
# Apply URL filters (especially BBOX)
resource.build_query(filter=s3.filter, vars=get_vars)
# Extract the Location Data
xmlformat = S3XMLFormat(stylesheet)
include, exclude = xmlformat.get_fields(resource.tablename)
resource.load(fields=include,
skip=exclude,
start=0,
limit=None,
orderby=None,
virtual=False,
cacheable=True)
gis = current.gis
attr_fields = []
style = gis.get_style(layer_id=layer_id,
aggregate=False)
popup_format = style.popup_format
if popup_format:
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
style.popup_format = popup_format
# Extract the attr_fields
parts = popup_format.split("{")
# Skip the first part
parts = parts[1:]
for part in parts:
attribute = part.split("}")[0]
attr_fields.append(attribute)
attr_fields = ",".join(attr_fields)
location_data = gis.get_location_data(resource,
attr_fields=attr_fields)
# Export as GeoJSON
current.xml.show_ids = True
output = resource.export_xml(fields = include,
mcomponents = None,
references = [],
stylesheet = stylesheet,
as_json = True,
location_data = location_data,
map_data = {"style": style},
)
# Transformation error?
if not output:
r.error(400, "XSLT Transformation Error: %s " % current.xml.error)
else:
while resource.count() == 0:
# Try a lower level of aggregation
level = int(level[1:])
if level == 0:
# Nothing we can display
return json.dumps({})
resource.clear_query()
# Apply URL filters (especially BBOX)
resource.build_query(filter=s3.filter, vars=get_vars)
level = "L%s" % (level - 1)
if context and "location" in context:
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired
rows = "(location)$%s" % level
else:
# Fallback to location_id
rows = "location_id$%s" % level
# Fallback we can add if-required
#rows = "site_id$location_id$%s" % level
resource.add_filter(FS(rows) != None)
if not output:
# Build the Pivot Table
cols = None
layer = get_vars.get("fact", defaults.get("fact", "count(id)"))
facts = S3PivotTableFact.parse(layer)[:1]
pivottable = S3PivotTable(resource, rows, cols, facts,
precision = report_options.get("precision"),
)
# Extract the Location Data
#attr_fields = []
style = current.gis.get_style(layer_id=layer_id,
aggregate=True)
popup_format = style.popup_format
if popup_format:
if"T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
style.popup_format = popup_format
# Extract the attr_fields
# No need as defaulted inside S3PivotTable.geojson()
#parts = popup_format.split("{")
## Skip the first part
#parts = parts[1:]
#for part in parts:
# attribute = part.split("}")[0]
# attr_fields.append(attribute)
#attr_fields = ",".join(attr_fields)
ids, location_data = pivottable.geojson(fact=facts[0], level=level)
# Export as GeoJSON
current.xml.show_ids = True
gresource = current.s3db.resource("gis_location", id=ids)
output = gresource.export_xml(fields = [],
mcomponents = None,
references = [],
stylesheet = stylesheet,
as_json = True,
location_data = location_data,
# Tell the client that we are
# displaying aggregated data and
# the level it is aggregated at
map_data = {"level": int(level[1:]),
"style": style,
},
)
# Transformation error?
if not output:
r.error(400, "XSLT Transformation Error: %s " % current.xml.error)
return output
# -------------------------------------------------------------------------
def widget(self, r, method=None, widget_id=None, visible=True, **attr):
"""
Pivot table report widget
@param r: the S3Request
@param method: the widget method
@param widget_id: the widget ID
@param visible: whether the widget is initially visible
@param attr: controller attributes
"""
output = {}
resource = self.resource
get_config = resource.get_config
# @todo: make configurable:
maxrows = 20
maxcols = 20
# Extract the relevant GET vars
report_vars = ("rows", "cols", "fact", "totals")
get_vars = {k: v for k, v in r.get_vars.items() if k in report_vars}
# Fall back to report options defaults
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
if not any (k in get_vars for k in ("rows", "cols", "fact")):
get_vars = defaults
get_vars["chart"] = r.get_vars.get("chart",
defaults.get("chart", None))
get_vars["table"] = r.get_vars.get("table",
defaults.get("table", None))
# Generate the pivot table
if get_vars:
rows = get_vars.get("rows", None)
cols = get_vars.get("cols", None)
layer = get_vars.get("fact", "id")
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
current.log.error(sys.exc_info()[1])
facts = None
if not facts or not any([rows, cols]):
pivottable = None
else:
prefix = resource.prefix_selector
get_vars["rows"] = prefix(rows) if rows else None
get_vars["cols"] = prefix(cols) if cols else None
get_vars["fact"] = ",".join("%s(%s)" % (fact.method, fact.selector) for fact in facts)
if visible:
pivottable = S3PivotTable(resource, rows, cols, facts,
precision = report_options.get("precision"),
)
else:
pivottable = None
else:
pivottable = None
# Render as JSON-serializable dict
if pivottable is not None:
pivotdata = pivottable.json(maxrows=maxrows, maxcols=maxcols)
else:
pivotdata = None
if r.representation in ("html", "iframe"):
# Generate the report form
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_form = attr.get("filter_form", None)
filter_tab = attr.get("filter_tab", None)
filter_url = r.url(method="",
representation="",
vars=ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)),
)
ajaxurl = attr.get("ajaxurl", r.url(method="report",
representation="json",
vars=ajax_vars))
output = S3ReportForm(resource).html(pivotdata,
get_vars = get_vars,
filter_widgets = None,
ajaxurl = ajaxurl,
filter_url = filter_url,
filter_form = filter_form,
filter_tab = filter_tab,
widget_id = widget_id)
# Detect and store theme-specific inner layout
view = self._view(r, "pivottable.html")
# Render inner layout (outer page layout is set by S3Summary)
output["title"] = None
output = XML(current.response.render(view, output))
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def explore(self, r, **attr):
"""
Ajax-lookup of representations for items contributing to the
aggregate value in a pivot table cell (cell explore)
- called with a body JSON containing the record IDs to represent,
and the URL params for the pivot table (rows, cols, fact)
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
# Read+parse body JSON
s = r.body
s.seek(0)
try:
record_ids = json.load(s)
except JSONERRORS:
record_ids = None
# Must be a list of record IDs
if not isinstance(record_ids, list):
r.error(404, current.ERROR.BAD_RECORD)
# Create filtered resource
resource = current.s3db.resource(self.tablename, id=record_ids)
prefix = resource.prefix_selector
pkey = prefix(resource._id.name)
pkey_colname = str(resource._id)
# Parse the facts
get_vars = r.get_vars
facts = S3PivotTableFact.parse(get_vars.get("fact"))
selectors = set() # all fact selectors other than "id"
ofacts = [] # all facts other than "count(id)"
for fact in facts:
selector = prefix(fact.selector)
is_pkey = selector == pkey
if not is_pkey:
selectors.add(selector)
if not is_pkey or fact.method != "count":
ofacts.append(fact)
# Extract the data
if len(selectors):
selectors.add(pkey)
records = resource.select(selectors,
raw_data = True,
represent = True,
limit = None,
).rows
else:
# All we need is the record IDs, so skip the select and
# construct some pseudo-rows
records = []
for record_id in record_ids:
record = Storage({pkey_colname: record_id})
record._row = record
records.append(record)
# Get the record representation method and initialize it with the
# report context (rows, cols, facts)
represent = resource.get_config("report_represent", S3ReportRepresent)
if represent:
rows = get_vars.get("rows")
cols = get_vars.get("cols")
represent = represent(resource, rows=rows, cols=cols, facts=facts)
# Determine what the items list should contain
rfields = {} # resolved fact selectors
key = None
aggregate = True
if len(ofacts) == 1:
fact = ofacts[0]
if fact.method == "count":
# When counting foreign keys in the master record, then
# show a list of all unique values of that foreign key
# rather than the number of unique values per master
# record (as that would always be 1)
selector = prefix(fact.selector)
rfield = resource.resolve_selector(selector)
field = rfield.field
if field and s3_has_foreign_key(field):
multiple = True
if rfield.tname == resource.tablename or \
selector[:2] == "~." and "." not in selector[2:]:
multiple = False
else:
# Get the component prefix
alias = selector.split("$", 1)[0].split(".", 1)[0]
component = resource.components.get(alias)
if component:
multiple = component.multiple
if not multiple:
represent = None
key = rfield.colname
aggregate = False
rfields[selector] = rfield
# Get the record representations
records_repr = represent(record_ids) if represent else None
# Build the output items (as dict, will be alpha-sorted on client-side)
output = {}
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
for record in records:
raw = record._row
record_id = raw[pkey_colname]
values = []
for fact in ofacts:
# Resolve the selector
selector = prefix(fact.selector)
rfield = rfields.get(selector)
if not rfield:
rfield = rfields[selector] = resource.resolve_selector(selector)
# Get the value, sub-aggregate
if aggregate:
value = raw[rfield.colname]
if type(value) is list:
value = fact.compute(value)
else:
value = fact.compute([value])
if fact.method != "count":
field = rfield.field
if field and field.represent:
value = field.represent(value)
else:
value = record[rfield.colname]
# Extend values list
if len(values):
values.extend([" / ", value])
else:
values.append(value)
repr_items = [TAG[""](values)] if values else []
# Add the record representation
if records_repr is not None:
repr_items.insert(0, records_repr.get(record_id, UNKNOWN_OPT))
if len(repr_items) == 2:
repr_items.insert(1, ": ")
# Build output item
# - using TAG not str.join() to allow representations to contain
# XML helpers like A, SPAN or DIV
repr_str = s3_str(TAG[""](repr_items).xml())
if key:
# Include raw field value for client-side de-duplication
output[record_id] = [repr_str, s3_str(raw[key])]
else:
output[record_id] = repr_str
current.response.headers["Content-Type"] = "application/json"
return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
@staticmethod
def inject_d3():
"""
Re-usable helper function to inject D3/NVD3 scripts
into the current page
"""
appname = current.request.application
s3 = current.response.s3
scripts_append = s3.scripts.append
if s3.debug:
if s3.cdn:
scripts_append("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.17/d3.js")
# We use a patched v1.8.5 currently, so can't use the CDN version
#scripts_append("https://cdnjs.cloudflare.com/ajax/libs/nvd3/1.8.5/nv.d3.js")
else:
scripts_append("/%s/static/scripts/d3/d3.js" % appname)
scripts_append("/%s/static/scripts/d3/nv.d3.js" % appname)
else:
if s3.cdn:
scripts_append("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.17/d3.min.js")
# We use a patched v1.8.5 currently, so can't use the CDN version
#scripts_append("https://cdnjs.cloudflare.com/ajax/libs/nvd3/1.8.5/nv.d3.min.js")
else:
scripts_append("/%s/static/scripts/d3/d3.min.js" % appname)
scripts_append("/%s/static/scripts/d3/nv.d3.min.js" % appname)
# =============================================================================
class S3ReportForm(object):
""" Helper class to render a report form """
def __init__(self, resource):
self.resource = resource
self.show_totals = True
# -------------------------------------------------------------------------
def html(self,
pivotdata,
filter_widgets=None,
get_vars=None,
ajaxurl=None,
filter_url=None,
filter_form=None,
filter_tab=None,
widget_id=None):
"""
Render the form for the report
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element base ID for the widgets
"""
T = current.T
appname = current.request.application
# Report options
report_options = self.report_options(get_vars = get_vars,
widget_id = widget_id,
)
# Pivot data
hidden = {"pivotdata": json.dumps(pivotdata, separators=SEPARATORS)}
empty = T("No report specified.")
hide = T("Hide Table")
show = T("Show Table")
throbber = "/%s/static/img/indicator.gif" % appname
# Filter options
if filter_widgets is not None:
filter_options = self._fieldset(T("Filter Options"),
filter_widgets,
_id="%s-filters" % widget_id,
_class="filter-form")
else:
filter_options = ""
# Report form submit element
resource = self.resource
submit = resource.get_config("report_submit", True)
if submit:
_class = "pt-submit"
if submit is True:
label = T("Update Report")
elif isinstance(submit, (list, tuple)):
label = submit[0]
_class = "%s %s" % (submit[1], _class)
else:
label = submit
submit = TAG[""](
INPUT(_type="button",
_value=label,
_class=_class))
else:
submit = ""
# Form
form = FORM(filter_options,
report_options,
submit,
hidden = hidden,
_class = "pt-form",
_id = "%s-pt-form" % widget_id,
)
# View variables
output = {"form": form,
"throbber": throbber,
"hide": hide,
"show": show,
"empty": empty,
"widget_id": widget_id,
}
# Script options
settings = current.deployment_settings
opts = {
#"renderFilter": True,
#"collapseFilter": False,
#"renderOptions": True,
"collapseOptions": settings.get_ui_hide_report_options(),
"renderTable": True,
"collapseTable": False,
"showTotals": self.show_totals,
"ajaxURL": ajaxurl,
"renderChart": True,
"collapseChart": True,
"defaultChart": None,
"exploreChart": True,
"filterURL": filter_url,
"filterTab": filter_tab,
"filterForm": filter_form,
"autoSubmit": settings.get_ui_report_auto_submit(),
"timeout": settings.get_ui_report_timeout(),
"thousandSeparator": settings.get_L10n_thousands_separator(),
"thousandGrouping": settings.get_L10n_thousands_grouping(),
"textAll": str(T("All")),
"textRecords": str(T("Records")),
}
chart_opt = get_vars["chart"]
if chart_opt is not None:
if str(chart_opt).lower() in ("0", "off", "false"):
opts["renderChart"] = False
elif ":" in chart_opt:
opts["collapseChart"] = False
ctype, caxis = chart_opt.split(":", 1)
opts["defaultChart"] = {"type": ctype, "axis": caxis}
table_opt = get_vars["table"]
if table_opt is not None:
table_opt = str(table_opt).lower()
if table_opt in ("0", "off", "false"):
opts["renderTable"] = False
elif table_opt == "collapse":
opts["collapseTable"] = True
# Scripts
S3Report.inject_d3()
s3 = current.response.s3
scripts = s3.scripts
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.pivottable.js" % appname
if script not in scripts:
scripts.append(script)
else:
script = "/%s/static/scripts/S3/s3.ui.pivottable.min.js" % appname
if script not in scripts:
scripts.append(script)
# Instantiate widget
script = '''$('#%(widget_id)s').pivottable(%(opts)s)''' % \
{"widget_id": widget_id,
"opts": json.dumps(opts,
separators=SEPARATORS,
),
}
s3.jquery_ready.append(script)
return output
# -------------------------------------------------------------------------
def report_options(self, get_vars=None, widget_id="pivottable"):
"""
Render the widgets for the report options form
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element base ID for the widgets
"""
T = current.T
SHOW_TOTALS = T("Show totals")
REPORT = T("Report of")
ROWS = T("Grouped by")
COLS = T("and")
resource = self.resource
get_config = resource.get_config
options = get_config("report_options")
# Specific formstyle?
settings = current.deployment_settings
formstyle = settings.get_ui_report_formstyle()
# Fall back to inline-variant of current formstyle
if formstyle is None:
formstyle = settings.get_ui_inline_formstyle()
# Helper for labels
label = lambda s, **attr: LABEL("%s:" % s, **attr)
formfields = []
# Layer selector
layer_id = "%s-fact" % widget_id
layer_widget = self.layer_options(options=options,
get_vars=get_vars,
widget_id=layer_id)
formfields.append((layer_id + "-row",
label(REPORT, _for=layer_id),
layer_widget,
"",
))
# Rows/Columns selectors
axis_options = self.axis_options
rows_id = "%s-rows" % widget_id
cols_id = "%s-cols" % widget_id
rows_options = axis_options("rows",
options=options,
get_vars=get_vars,
widget_id=rows_id)
cols_options = axis_options("cols",
options=options,
get_vars=get_vars,
widget_id=cols_id)
axis_widget = DIV(rows_options,
label(COLS, _for=cols_id),
cols_options,
_class="pt-axis-options",
)
formfields.append(("%s-axis-row" % widget_id,
label(ROWS, _for=rows_id),
axis_widget,
"",
))
# Show Totals switch
show_totals = True
if get_vars and "totals" in get_vars and \
str(get_vars["totals"]).lower() in ("0", "false", "off"):
show_totals = False
self.show_totals = show_totals
show_totals_id = "%s-totals" % widget_id
totals_widget = INPUT(_type="checkbox",
_id=show_totals_id,
_name="totals",
_class="pt-totals",
value=show_totals
)
formfields.append(("%s-show-totals-row" % widget_id,
label(SHOW_TOTALS, _for=show_totals_id),
totals_widget,
"",
))
try:
widgets = formstyle(FIELDSET(), formfields)
except:
# Old style (should be avoided)
widgets = TAG[""]([formstyle(*formfield) for formfield in formfields])
# Render fieldset
fieldset = self._fieldset(T("Report Options"),
widgets,
_id="%s-options" % widget_id,
_class="report-options")
return fieldset
# -------------------------------------------------------------------------
def axis_options(self, axis,
options=None,
get_vars=None,
widget_id=None):
"""
Construct an OptionsWidget for rows or cols axis
@param axis: "rows" or "cols"
@param options: the report options
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element ID for the widget
"""
resource = self.resource
prefix = resource.prefix_selector
# Get all selectors
if options and axis in options:
fields = options[axis]
else:
fields = resource.get_config("list_fields")
if not fields:
fields = [f.name for f in resource.readable_fields()]
# Resolve the selectors
pkey = str(resource._id)
resolve_selector = resource.resolve_selector
rfields = []
append = rfields.append
for f in fields:
if not f:
continue
elif isinstance(f, (tuple, list)):
label, selector = f[:2]
else:
label, selector = None, f
rfield = resolve_selector(selector)
if rfield.colname == pkey:
continue
if label:
rfield.label = label
append(rfield)
# Get current value
if get_vars and axis in get_vars:
value = get_vars[axis]
else:
value = ""
if value:
value = prefix(value)
# Dummy field
opts = [(prefix(rfield.selector), rfield.label) for rfield in rfields]
dummy_field = Storage(name=axis, requires=IS_IN_SET(opts))
# Construct widget
return OptionsWidget.widget(dummy_field,
value,
_id=widget_id,
_name=axis,
_class="pt-%s" % axis)
# -------------------------------------------------------------------------
def layer_options(self,
options=None,
get_vars=None,
widget_id=None):
"""
Construct an OptionsWidget for the fact layer
@param options: the report options
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element ID for the widget
"""
resource = self.resource
all_methods = S3PivotTableFact.METHODS
# Get all layers
layers = None
methods = None
if options:
if "methods" in options:
methods = options["methods"]
if "fact" in options:
layers = options["fact"]
if not layers:
layers = resource.get_config("list_fields")
if not layers:
layers = [f.name for f in resource.readable_fields()]
if not methods:
methods = all_methods
# Resolve layer options
T = current.T
RECORDS = T("Records")
mname = S3PivotTableFact._get_method_label
def layer_label(rfield, method):
""" Helper to construct a layer label """
mlabel = mname(method)
flabel = rfield.label if rfield.label != "Id" else RECORDS
# @ToDo: Exclude this string from admin/translate exports
return T("%s (%s)") % (flabel, mlabel)
prefix = resource.prefix_selector
layer_opts = []
for option in layers:
if not option:
continue
elif isinstance(option, tuple):
title, layer = option
else:
title, layer = None, option
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
continue
if len(facts) > 1:
# Multi-fact layer
labels = []
expressions = []
for fact in facts:
if not title:
rfield = resource.resolve_selector(fact.selector)
labels.append(fact.get_label(rfield, layers))
expressions.append("%s(%s)" % (fact.method, fact.selector))
if not title:
title = " / ".join(labels)
layer_opts.append((",".join(expressions), title))
continue
else:
fact = facts[0]
label = fact.label or title
if fact.default_method:
s, m = fact.selector, None
else:
s, m = fact.selector, fact.method
# Resolve the selector
selector = prefix(s)
rfield = resource.resolve_selector(selector)
if not rfield.field and not rfield.virtual:
continue
if m is None and label:
rfield.label = label
if m is None:
# Only field given -> auto-detect aggregation methods
is_amount = None
ftype = rfield.ftype
if ftype == "integer":
is_amount = True
requires = rfield.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
for r in requires:
if isinstance(r, IS_IN_SET) or \
isinstance(r, IS_EMPTY_OR) and \
isinstance(r.other, IS_IN_SET):
is_amount = False
elif ftype == "double":
is_amount = True
elif ftype[:9] == "reference" or \
ftype[:5] == "list:" or \
ftype in ("id", "string", "text"):
is_amount = False
if ftype in ("datetime", "date", "time"):
mopts = ["min", "max", "list"]
elif is_amount is None:
mopts = ["sum", "min", "max", "avg", "count", "list"]
elif is_amount:
mopts = ["sum", "min", "max", "avg"]
else:
mopts = ["count", "list"]
for method in mopts:
if method in methods:
label = layer_label(rfield, method)
layer_opts.append(("%s(%s)" % (method, selector), label))
else:
# Explicit method specified
if label is None:
label = layer_label(rfield, m)
layer_opts.append(("%s(%s)" % (m, selector), label))
# Get current value
if get_vars and "fact" in get_vars:
layer = get_vars["fact"]
else:
layer = ""
if layer:
match = LAYER.match(layer)
if match is None:
layer = ""
else:
selector, method = match.group(2), match.group(1)
selector = prefix(selector)
layer = "%s(%s)" % (method, selector)
if len(layer_opts) == 1:
# Field is read-only if there is only 1 option
default = layer_opts[0]
widget = TAG[""](default[1],
INPUT(_type="hidden",
_id=widget_id,
_name=widget_id,
_value=default[0],
_class="pt-fact-single-option"))
else:
# Render Selector
dummy_field = Storage(name="fact",
requires=IS_IN_SET(layer_opts))
widget = OptionsWidget.widget(dummy_field,
layer,
_id=widget_id,
_name="fact",
_class="pt-fact")
return widget
# -------------------------------------------------------------------------
@staticmethod
def _fieldset(title, widgets, **attr):
"""
Helper method to wrap widgets in a FIELDSET container with
show/hide option
@param title: the title for the field set
@param widgets: the widgets
@param attr: HTML attributes for the field set
"""
T = current.T
SHOW = T("Show")
HIDE = T("Hide")
return FIELDSET(LEGEND(title,
BUTTON(SHOW,
_type="button",
_class="toggle-text",
),
BUTTON(HIDE,
_type="button",
_class="toggle-text",
)
),
widgets,
**attr)
# =============================================================================
class S3ReportRepresent(object):
"""
Method to represent the contributing records in a pivot table
cell (cell explore)
The cell-explore record list will typically look like:
- <record representation>: <fact value(s)>
- ...
This method controls the first part of each entry.
For customization, configure for the table as:
report_represent = <subclass>
"""
def __init__(self, resource, rows=None, cols=None, facts=None):
"""
Constructor, initializes the method with the report context
to allow it to adapt the representation (e.g. it may often
be desirable to not repeat the report axes in the record list)
@param resource: the resource of the report
@param rows: the rows-selector (can be None)
@param cols: the columns-selector (can be None)
@param facts: the list of S3PivotTableFacts showing in
the pivot table
"""
self.resource = resource
self.rows = rows
self.cols = cols
self.facts = facts
# -------------------------------------------------------------------------
def __call__(self, record_ids):
"""
Represent record IDs, can be overloaded in subclasses
@param record_ids: list of record IDs
@returns: a JSON-serializable dict {recordID: representation},
or None to suppress recordID representation in the
cell explorer
NB default behavior is not sensitive for report axes
"""
# Take a list of record ids
resource = self.resource
table = resource.table
represent = self.repr_method()
if represent:
if hasattr(represent, "bulk"):
# Bulk-represent the record IDs
output = represent.bulk(record_ids)
else:
# Represent the record IDs one by one
output = {record_id: represent(record_id)
for record_id in record_ids}
elif "name" in table.fields:
# Extract the names and return dict {id: name}
query = table._id.belongs(record_ids)
rows = current.db(query).select(table._id, table.name)
output = {}
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
for row in rows:
name = row.name
if not name:
name = UNKNOWN_OPT
output[row[table._id]] = s3_str(row.name)
else:
# No reasonable default
# Render as record IDs (just as useful as nothing):
#output = {record_id: s3_str(record_id) for record_id in record_ids}
# Return None to reduces the list to the fact values
# NB if fact is ID, this will suppress the record list
# altogether and show the number of records instead
output = None
return output
# -------------------------------------------------------------------------
def repr_method(self):
"""
Return a representation method for the id-field of
self.resource, can be overloaded in subclasses (simpler
than implementing __call__ if producing a representation
method is sufficient)
@returns: a representation method (preferrably a S3Represent)
"""
s3db = current.s3db
resource = self.resource
pkey = resource._id
represent = pkey.represent
if not represent:
# Standard representation methods can be listed here
# (if they don't normally depend on the report context)
if resource.tablename == "pr_person":
represent = s3db.pr_PersonRepresent()
return represent
# =============================================================================
class S3PivotTableFact(object):
""" Class representing a fact layer """
#: Supported aggregation methods
METHODS = {"list": "List",
"count": "Count",
"min": "Minimum",
"max": "Maximum",
"sum": "Total",
"avg": "Average",
#"std": "Standard Deviation"
}
def __init__(self, method, selector, label=None, default_method=True):
"""
Constructor
@param method: the aggregation method
@param selector: the field selector
@param label: the fact label
@param default_method: using default method (used by parser)
"""
if method is None:
method = "count"
default_method = True
if method not in self.METHODS:
raise SyntaxError("Unsupported aggregation function: %s" % method)
self.method = method
self.selector = selector
self._layer = None
self.label = label
self.resource = None
self.rfield = None
self.column = selector
self.default_method = default_method
# -------------------------------------------------------------------------
@property
def layer(self):
"""
@todo: docstring
"""
layer = self._layer
if not layer:
layer = self._layer = (self.selector, self.method)
return layer
# -------------------------------------------------------------------------
def compute(self, values, method=DEFAULT, totals=False, precision=None):
"""
Aggregate a list of values.
@param values: iterable of values
@param method: the aggregation method
@param totals: this call is computing row/column/grand totals
@param precision: limit the precision of the computation to this
number of decimals (@todo: consider a default of 6)
"""
if values is None:
return None
if method is DEFAULT:
method = self.method
if totals and method == "list":
method = "count"
if method is None or method == "list":
return values if values else None
if method == "count":
# Count all non-null values
return len([v for v in values if v is not None])
else:
# Numeric values required - some virtual fields
# return '-' for None, so must type-check here:
values = [v for v in values if isinstance(v, INTEGER_TYPES + (float,))]
if method == "min":
try:
result = min(values)
except (TypeError, ValueError):
return None
elif method == "max":
try:
result = max(values)
except (TypeError, ValueError):
return None
elif method == "sum":
try:
result = sum(values)
except (TypeError, ValueError):
return None
elif method == "avg":
try:
number = len(values)
if number:
result = sum(values) / float(number)
else:
return 0.0
except (TypeError, ValueError):
return None
#elif method == "std":
#import numpy
#if not values:
#return 0.0
#try:
#result = numpy.std(values)
#except (TypeError, ValueError):
#return None
if type(result) is float and precision is not None:
return round(result, precision)
else:
return result
return None
# -------------------------------------------------------------------------
def aggregate_totals(self, totals):
"""
Aggregate totals for this fact (hyper-aggregation)
@param totals: iterable of totals
"""
if self.method in ("list", "count"):
total = self.compute(totals, method="sum")
else:
total = self.compute(totals)
return total
# -------------------------------------------------------------------------
@classmethod
def parse(cls, fact):
"""
Parse fact expression
@param fact: the fact expression
"""
if isinstance(fact, tuple):
label, fact = fact
else:
label = None
if isinstance(fact, list):
facts = []
for f in fact:
facts.extend(cls.parse(f))
if not facts:
raise SyntaxError("Invalid fact expression: %s" % fact)
return facts
# Parse the fact
other = None
default_method = False
if not fact:
method, parameters = "count", "id"
else:
match = FACT.match(fact)
if match:
method, parameters, other = match.groups()
if other:
other = cls.parse((label, other) if label else other)
elif SELECTOR.match(fact):
method, parameters, other = "count", fact, None
default_method = True
else:
raise SyntaxError("Invalid fact expression: %s" % fact)
# Validate method
if method not in cls.METHODS:
raise SyntaxError("Unsupported aggregation method: %s" % method)
# Extract parameters
parameters = parameters.split(",")
selector = parameters[0]
facts = [cls(method,
selector,
label=label,
default_method=default_method,
),
]
if other:
facts.extend(other)
return facts
# -------------------------------------------------------------------------
@classmethod
def _get_method_label(cls, code):
"""
Get a label for a method
@param code: the method code
@return: the label (lazyT), or None for unsupported methods
"""
methods = cls.METHODS
if code is None:
code = "list"
if code in methods:
return current.T(methods[code])
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def _get_field_label(rfield, fact_options=None):
"""
Get the label for a field
@param rfield: the S3ResourceField
@param fact_options: the corresponding subset of the report
options ("fact", "rows" or "cols")
"""
label = None
if not rfield:
return ""
resource = rfield.resource
fields = list(fact_options) if fact_options else []
list_fields = resource.get_config("list_fields")
if list_fields:
fields.extend(list_fields)
prefix = resource.prefix_selector
# Search through the field labels in report options
selector = prefix(rfield.selector)
for f in fields:
if type(f) is tuple and \
isinstance(f[1], basestring) and \
prefix(f[1]) == selector:
label = f[0]
break
if not label and rfield:
if rfield.ftype == "id":
label = current.T("Records")
else:
label = rfield.label
return label if label else ""
# -------------------------------------------------------------------------
def get_label(self, rfield, fact_options=None):
"""
Get a label for this fact
@param rfield: the S3ResourceField
@param fact_options: the "fact" list of the report options
"""
label = self.label
if label:
# Already set
return label
if fact_options:
# Lookup the label from the fact options
prefix = rfield.resource.prefix_selector
for fact_option in fact_options:
facts = self.parse(fact_option)
for fact in facts:
if fact.method == self.method and \
prefix(fact.selector) == prefix(self.selector):
label = fact.label
break
if label:
break
if not label:
# Construct a label from the field label and the method name
field_label = self._get_field_label(rfield, fact_options)
method_label = self._get_method_label(self.method)
label = "%s (%s)" % (field_label, method_label)
self.label = label
return label
# =============================================================================
class S3PivotTable(object):
""" Class representing a pivot table of a resource """
def __init__(self, resource, rows, cols, facts, strict=True, precision=None):
"""
Constructor - extracts all unique records, generates a
pivot table from them with the given dimensions and
computes the aggregated values for each cell.
@param resource: the S3Resource
@param rows: field selector for the rows dimension
@param cols: field selector for the columns dimension
@param facts: list of S3PivotTableFacts to compute
@param strict: filter out dimension values which don't match
the resource filter
@param precision: maximum precision of aggregate computations,
a dict {selector:number_of_decimals}
"""
# Initialize ----------------------------------------------------------
#
if not rows and not cols:
raise SyntaxError("No rows or columns specified for pivot table")
self.resource = resource
self.lfields = None
self.dfields = None
self.rfields = None
self.rows = rows
self.cols = cols
self.facts = facts
self.precision = precision if isinstance(precision, dict) else {}
# API variables -------------------------------------------------------
#
self.records = None
""" All records in the pivot table as a Storage like:
{
<record_id>: <Row>
}
"""
self.empty = False
""" Empty-flag (True if no records could be found) """
self.numrows = None
""" The number of rows in the pivot table """
self.numcols = None
""" The number of columns in the pivot table """
self.cell = None
""" Array of pivot table cells in [rows[columns]]-order, each
cell is a Storage like:
{
records: <list_of_record_ids>,
(<fact>, <method>): <aggregated_value>, ...per layer
}
"""
self.row = None
""" List of row headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.col = None
""" List of column headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.totals = Storage()
""" The grand total values for each layer, as a Storage like:
{
(<fact>, <method): <total value>, ...per layer
}
"""
self.values = {}
# Get the fields ------------------------------------------------------
#
tablename = resource.tablename
# The "report_fields" table setting defines which additional
# fields shall be included in the report base layer. This is
# useful to provide easy access to the record data behind a
# pivot table cell.
fields = current.s3db.get_config(tablename, "report_fields", [])
self._get_fields(fields=fields)
rows = self.rows
cols = self.cols
# Exclude records with empty axis values ------------------------------
#
exclude_empty = current.s3db.get_config(tablename, "report_exclude_empty")
if exclude_empty is True:
# Exclude empty axis values for all fields
query = (FS(rows) != None) & (FS(cols) != None)
resource.add_filter(query)
elif type(exclude_empty) is tuple:
# Exclude empty axis values for some fields
for axis in (cols, rows):
if axis in exclude_empty:
resource.add_filter(FS(axis) != None)
# Retrieve the records ------------------------------------------------
#
data = resource.select(list(self.rfields.keys()), limit=None)
drows = data["rows"]
if drows:
key = str(resource.table._id)
records = Storage([(i[key], i) for i in drows])
# Generate the data frame -----------------------------------------
#
gfields = self.gfields
pkey_colname = gfields[self.pkey]
rows_colname = gfields[rows]
cols_colname = gfields[cols]
if strict:
rfields = self.rfields
axes = (rfield
for rfield in (rfields[rows], rfields[cols])
if rfield != None)
axisfilter = resource.axisfilter(axes)
else:
axisfilter = None
dataframe = []
extend = dataframe.extend
expand = self._expand
for _id in records:
row = records[_id]
item = {key: _id}
if rows_colname:
item[rows_colname] = row[rows_colname]
if cols_colname:
item[cols_colname] = row[cols_colname]
extend(expand(item, axisfilter=axisfilter))
self.records = records
# Group the records -----------------------------------------------
#
matrix, rnames, cnames = self._pivot(dataframe,
pkey_colname,
rows_colname,
cols_colname)
# Initialize columns and rows -------------------------------------
#
if cols:
self.col = [Storage({"value": v}) for v in cnames]
self.numcols = len(self.col)
else:
self.col = [Storage({"value": None})]
self.numcols = 1
if rows:
self.row = [Storage({"value": v}) for v in rnames]
self.numrows = len(self.row)
else:
self.row = [Storage({"value": None})]
self.numrows = 1
# Add the layers --------------------------------------------------
#
add_layer = self._add_layer
for fact in self.facts:
add_layer(matrix, fact)
else:
# No items to report on -------------------------------------------
#
self.empty = True
# -------------------------------------------------------------------------
# API methods
# -------------------------------------------------------------------------
def __len__(self):
""" Total number of records in the report """
items = self.records
if items is None:
return 0
else:
return len(self.records)
# -------------------------------------------------------------------------
def geojson(self,
fact=None,
level="L0"):
"""
Render the pivot table data as a dict ready to be exported as
GeoJSON for display on a Map.
Called by S3Report.geojson()
@param layer: the layer. e.g. ("id", "count")
- we only support methods "count" & "sum"
- @ToDo: Support density: 'per sqkm' and 'per population'
@param level: the aggregation level (defaults to Country)
"""
if fact is None:
fact = self.facts[0]
layer = fact.layer
# The rows dimension
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() if-desired
context = self.resource.get_config("context")
if context and "location" in context:
rows_dim = "(location)$%s" % level
else:
# Fallback to location_id
rows_dim = "location_id$%s" % level
# Fallback we can add if-required
#rows_dim = "site_id$location_id$%s" % level
# The data
attributes = {}
geojsons = {}
if self.empty:
location_ids = []
else:
numeric = lambda x: isinstance(x, INTEGER_TYPES + (float,))
row_repr = s3_str
ids = {}
irows = self.row
rows = []
# Group and sort the rows
is_numeric = None
for i in xrange(self.numrows):
irow = irows[i]
total = irow[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(irow.records)
header = Storage(value = irow.value,
text = irow.text if "text" in irow
else row_repr(irow.value))
rows.append((i, total, header))
self._sortdim(rows, self.rfields[rows_dim])
# Aggregate the grouped values
db = current.db
gtable = current.s3db.gis_location
query = (gtable.level == level) & (gtable.deleted == False)
for _, rtotal, rtitle in rows:
rval = rtitle.value
if rval:
# @ToDo: Handle duplicate names ;)
if rval in ids:
_id = ids[rval]
else:
q = query & (gtable.name == rval)
row = db(q).select(gtable.id,
gtable.parent,
limitby=(0, 1)
).first()
try:
_id = row.id
except AttributeError:
continue
# Cache
ids[rval] = _id
attribute = dict(name=s3_str(rval),
value=rtotal)
attributes[_id] = attribute
location_ids = [ids[r] for r in ids]
query = (gtable.id.belongs(location_ids))
geojsons = current.gis.get_locations(gtable,
query,
join=False,
geojson=True)
# Prepare for export via xml.gis_encode() and geojson/export.xsl
location_data = {}
geojsons = dict(gis_location = geojsons)
location_data["geojsons"] = geojsons
attributes = dict(gis_location = attributes)
location_data["attributes"] = attributes
return location_ids, location_data
# -------------------------------------------------------------------------
def json(self, maxrows=None, maxcols=None):
"""
Render the pivot table data as JSON-serializable dict
@param layer: the layer
@param maxrows: maximum number of rows (None for all)
@param maxcols: maximum number of columns (None for all)
@param least: render the least n rows/columns rather than
the top n (with maxrows/maxcols)
{
labels: {
layer:
rows:
cols:
total:
},
method: <aggregation method>,
cells: [rows[cols]],
rows: [rows[index, value, label, total]],
cols: [cols[index, value, label, total]],
total: <grand total>,
filter: [rows selector, cols selector]
}
"""
rfields = self.rfields
resource = self.resource
T = current.T
OTHER = "__other__"
rows_dim = self.rows
cols_dim = self.cols
# The output data
orows = []
rappend = orows.append
ocols = []
cappend = ocols.append
ocells = []
lookups = {}
facts = self.facts
if not self.empty:
# Representation methods for row and column keys
row_repr = self._represent_method(rows_dim)
col_repr = self._represent_method(cols_dim)
# Label for the "Others" row/columns
others = s3_str(T("Others"))
# Get the layers (fact.selector, fact.method),
# => used as keys to access the pivot data
layers = [fact.layer for fact in facts]
least = facts[0].method == "min"
# Group and sort the rows (grouping = determine "others")
irows = self.row
rows = []
rtail = (None, None)
for i in xrange(self.numrows):
irow = irows[i]
totals = [irow[layer] for layer in layers]
sort_total = totals[0]
header = {"value": irow.value,
"text": irow.text if "text" in irow
else row_repr(irow.value),
}
rows.append((i, sort_total, totals, header))
if maxrows is not None:
rtail = self._tail(rows, maxrows, least=least, facts=facts)
self._sortdim(rows, rfields[rows_dim])
if rtail[1] is not None:
values = [irows[i]["value"] for i in rtail[0]]
rows.append((OTHER,
rtail[1],
rtail[2],
{"value": values, "text":others},
))
# Group and sort the cols (grouping = determine "others")
icols = self.col
cols = []
ctail = (None, None)
for i in xrange(self.numcols):
icol = icols[i]
totals = [icol[layer] for layer in layers]
sort_total = totals[0]
header = {"value": icol.value,
"text": icol.text if "text" in icol
else col_repr(icol.value),
}
cols.append((i, sort_total, totals, header))
if maxcols is not None:
ctail = self._tail(cols, maxcols, least=least, facts=facts)
self._sortdim(cols, rfields[cols_dim])
if ctail[1] is not None:
values = [icols[i]["value"] for i in ctail[0]]
cols.append((OTHER,
ctail[1],
ctail[2],
{"value": values, "text": others},
))
rothers = rtail[0] or set()
cothers = ctail[0] or set()
# Group and sort the cells accordingly
# @todo: break up into subfunctions
icell = self.cell
cells = {}
for i in xrange(self.numrows):
irow = icell[i]
ridx = (i, OTHER) if rothers and i in rothers else (i,)
for j in xrange(self.numcols):
cell = irow[j]
cidx = (j, OTHER) if cothers and j in cothers else (j,)
cell_records = cell["records"]
for layer_index, layer in enumerate(layers):
# Get cell items for the layer
# => items can be a single numeric value, or a list
items = cell[layer]
# Get cell value for the layer
if isinstance(items, list):
value = len(items)
else:
value = items
for ri in ridx:
if ri not in cells:
orow = cells[ri] = {}
else:
orow = cells[ri]
for ci in cidx:
if ci not in orow:
# Create a new output cell
ocell = orow[ci] = {"values": [],
"items": [],
"records": [],
}
else:
ocell = orow[ci]
if layer_index == 0:
# Extend the list of records
ocell["records"].extend(cell_records)
value_array = ocell["values"]
items_array = ocell["items"]
if len(value_array) <= layer_index:
value_array.append(value)
items_array.append(items)
else:
ovalue = value_array[layer_index]
oitems = items_array[layer_index]
if isinstance(ovalue, list):
ovalue.append(value)
oitems.append(items)
else:
value_array[layer_index] = [ovalue, value]
items_array[layer_index] = [oitems, items]
# Get field representation methods
represents = self._represents(layers)
# Aggregate the grouped values
add_columns = True # do this only once
for rindex, rtotal, rtotals, rtitle in rows:
orow = []
# Row value for filter construction
rval = rtitle["value"]
if rindex == OTHER and isinstance(rval, list):
rval = ",".join(s3_str(v) for v in rval)
elif rval is not None:
rval = s3_str(rval)
# The output row summary
rappend((rindex,
rindex in rothers,
rtotals,
rval,
rtitle["text"],
))
for cindex, ctotal, ctotals, ctitle in cols:
# Get the corresponding cell
cell = cells[rindex][cindex]
value_array = cell["values"]
items_array = cell["items"]
# Initialize the output cell
ocell = {"i": [], "v": []}
okeys = None
for layer_index, fact in enumerate(facts):
selector, method = fact.layer
# The value(s) to render in this cell
items = items_array[layer_index]
# The cell total for this layer (for charts)
value = value_array[layer_index]
if type(value) is list:
# "Others" cell with multiple totals
value = fact.aggregate_totals(value)
ocell["v"].append(value)
rfield = self.rfields[selector]
if method == "list":
# Build a look-up table with field value representations
if selector not in lookups:
lookup = lookups[selector] = {}
else:
lookup = lookups[selector]
represent = represents[selector]
keys = []
for record_id in cell["records"]:
record = self.records[record_id]
try:
fvalue = record[rfield.colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = represent(v)
# Sort the keys by their representations
keys.sort(key=lambda i: lookup[i])
items = [lookup[key] for key in keys if key in lookup]
elif method in ("sum", "count") and okeys is None:
# Include only cell records in okeys which actually
# contribute to the aggregate
okeys = []
for record_id in cell["records"]:
record = self.records[record_id]
try:
fvalue = record[rfield.colname]
except AttributeError:
continue
if method == "sum" and \
isinstance(fvalue, INTEGER_TYPES + (float,)) and fvalue:
okeys.append(record_id)
elif method == "count" and \
fvalue is not None:
okeys.append(record_id)
else:
# Include all cell records in okeys
okeys = cell["records"]
ocell["i"].append(items)
if okeys:
ocell["k"] = okeys
orow.append(ocell)
if add_columns:
# Column value for filter construction
cval = ctitle["value"]
if cindex == OTHER and isinstance(cval, list):
cval = ",".join(s3_str(v) for v in cval)
elif cval is not None:
cval = s3_str(cval)
# The output column summary
cappend((cindex,
cindex in cothers,
ctotals,
cval,
ctitle["text"],
))
add_columns = False
ocells.append(orow)
# Lookup labels
report_options = resource.get_config("report_options", {})
if report_options:
fact_options = report_options.get("fact")
else:
fact_options = ()
# @todo: lookup report title before constructing from fact labels
fact_data = []
fact_labels = []
for fact in facts:
rfield = rfields[fact.selector]
fact_label = str(fact.get_label(rfield, fact_options))
fact_data.append((fact.selector, fact.method, fact_label))
fact_labels.append(fact_label)
get_label = S3PivotTableFact._get_field_label
if rows_dim:
rows_label = str(get_label(rfields[rows_dim], report_options.get("rows")))
else:
rows_label = ""
if cols_dim:
cols_label = str(get_label(rfields[cols_dim], report_options.get("cols")))
else:
cols_label = ""
labels = {"total": str(T("Total")),
"none": str(current.messages["NONE"]),
"per": str(T("per")),
"breakdown": str(T("Breakdown")),
# @todo: use report title:
"layer": " / ".join(fact_labels),
"rows": rows_label,
"cols": cols_label,
}
# Compile the output dict
output = {"rows": orows,
"cols": ocols,
"facts": fact_data,
"cells": ocells,
"total": self._totals(self.totals, [fact]),
"nodata": None if not self.empty else str(T("No data available")),
"labels": labels,
}
# Add axis selectors for filter-URL construction
prefix = resource.prefix_selector
output["filter"] = (prefix(rows_dim) if rows_dim else None,
prefix(cols_dim) if cols_dim else None,
)
return output
# -------------------------------------------------------------------------
def xls(self, title):
"""
Convert this pivot table into an XLS file
@param title: the title of the report
@returns: the XLS file as stream
"""
from .s3codec import S3Codec
exporter = S3Codec.get_codec("xls")
return exporter.encode_pt(self, title)
# -------------------------------------------------------------------------
def _represents(self, layers):
"""
Get the representation functions per fact field
@param layers: the list of layers, tuples (selector, method)
"""
rfields = self.rfields
represents = {}
values = self.values
for selector, method in layers:
if selector in represents:
continue
# Get the field
rfield = rfields[selector]
f = rfield.field
# Utilize bulk-representation for field values
if method == "list" and \
f is not None and hasattr(f.represent, "bulk"):
all_values = values[(selector, method)]
if all_values:
f.represent.bulk(list(s3_flatlist(all_values)))
# Get the representation method
has_fk = f is not None and s3_has_foreign_key(f)
if has_fk:
represent = lambda v, f=f: s3_str(f.represent(v))
else:
m = self._represent_method(selector)
represent = lambda v, m=m: s3_str(m(v))
represents[selector] = represent
return represents
# -------------------------------------------------------------------------
@staticmethod
def _sortdim(items, rfield, index=3):
"""
Sort a dimension (sorts items in-place)
@param items: the items as list of tuples
(index, sort-total, totals, header)
@param rfield: the dimension (S3ResourceField)
@param index: alternative index of the value/text dict
within each item
"""
if not rfield:
return
ftype = rfield.ftype
sortby = "value"
if ftype in ("integer", "string"):
# Sort option keys by their representation
requires = rfield.requires
if requires:
if isinstance(requires, (tuple, list)):
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if isinstance(requires, IS_IN_SET):
sortby = "text"
elif ftype[:9] == "reference" or ftype[:8] == "list:ref":
# Sort foreign keys by their representation
sortby = "text"
# Replacements for None when sorting
minnum = -float('inf')
minval = {"integer": minnum,
"float": minnum,
"string": "",
"date": datetime.date.min,
"datetime": datetime.datetime.min,
"boolean": 1,
"id": minnum,
}
# Sorting key function
def key(item):
value = item[index][sortby]
if value is None:
return "" if sortby == "text" else minval.get(ftype)
elif ftype == "boolean":
return -int(value)
else:
return value
items.sort(key=key)
# -------------------------------------------------------------------------
@classmethod
def _tail(cls, items, length=10, least=False, facts=None):
"""
Find the top/least <length> items (by total)
@param items: the items as list of tuples
(index, sort-total, totals, header)
@param length: the maximum number of items
@param least: find least rather than top
@param facts: the facts to aggregate the tail totals
"""
try:
if len(items) > length:
l = list(items)
l.sort(lambda x, y: int(y[1]-x[1]))
if least:
l.reverse()
keys = [item[0] for item in l[length-1:]]
totals = []
for i, fact in enumerate(facts):
subtotals = [item[2][i] for item in l[length-1:]]
totals.append(fact.aggregate_totals(subtotals))
return (keys, totals[0], totals)
except (TypeError, ValueError):
pass
return (None, None)
# -------------------------------------------------------------------------
@staticmethod
def _totals(values, facts, append=None):
"""
Get the totals of a row/column/report
@param values: the values dictionary
@param facts: the facts
@param append: callback to collect the totals for JSON data
(currently only collects the first layer)
"""
totals = []
number_represent = IS_NUMBER.represent
for fact in facts:
value = values[fact.layer]
#if fact.method == "list":
#value = value and len(value) or 0
if not len(totals) and append is not None:
append(value)
totals.append(s3_str(number_represent(value)))
totals = " / ".join(totals)
return totals
# -------------------------------------------------------------------------
# Internal methods
# -------------------------------------------------------------------------
@staticmethod
def _pivot(items, pkey_colname, rows_colname, cols_colname):
"""
2-dimensional pivoting of a list of unique items
@param items: list of unique items as dicts
@param pkey_colname: column name of the primary key
@param rows_colname: column name of the row dimension
@param cols_colname: column name of the column dimension
@return: tuple of (cell matrix, row headers, column headers),
where cell matrix is a 2-dimensional array [rows[columns]]
and row headers and column headers each are lists (in the
same order as the cell matrix)
"""
rvalues = Storage()
cvalues = Storage()
cells = Storage()
# All unique rows values
rindex = 0
cindex = 0
for item in items:
rvalue = item[rows_colname] if rows_colname else None
cvalue = item[cols_colname] if cols_colname else None
if rvalue not in rvalues:
r = rvalues[rvalue] = rindex
rindex += 1
else:
r = rvalues[rvalue]
if cvalue not in cvalues:
c = cvalues[cvalue] = cindex
cindex += 1
else:
c = cvalues[cvalue]
if (r, c) not in cells:
cells[(r, c)] = [item[pkey_colname]]
else:
cells[(r, c)].append(item[pkey_colname])
matrix = []
for r in xrange(len(rvalues)):
row = []
for c in xrange(len(cvalues)):
row.append(cells[(r, c)])
matrix.append(row)
rnames = [None] * len(rvalues)
for k, v in rvalues.items():
rnames[v] = k
cnames = [None] * len(cvalues)
for k, v in cvalues.items():
cnames[v] = k
return matrix, rnames, cnames
# -------------------------------------------------------------------------
def _add_layer(self, matrix, fact):
"""
Compute an aggregation layer, updates:
- self.cell: the aggregated values per cell
- self.row: the totals per row
- self.col: the totals per column
- self.totals: the overall totals per layer
@param matrix: the cell matrix
@param fact: the fact field
@param method: the aggregation method
"""
rows = self.row
cols = self.col
records = self.records
extract = self._extract
resource = self.resource
RECORDS = "records"
VALUES = "values"
table = resource.table
pkey = table._id.name
layer = fact.layer
precision = self.precision.get(fact.selector)
numcols = len(self.col)
numrows = len(self.row)
# Initialize cells
if self.cell is None:
self.cell = [[Storage()
for i in xrange(numcols)]
for j in xrange(numrows)]
cells = self.cell
all_values = []
for r in xrange(numrows):
# Initialize row header
row = rows[r]
row[RECORDS] = []
row[VALUES] = []
row_records = row[RECORDS]
row_values = row[VALUES]
for c in xrange(numcols):
# Initialize column header
col = cols[c]
if RECORDS not in col:
col[RECORDS] = []
col_records = col[RECORDS]
if VALUES not in col:
col[VALUES] = []
col_values = col[VALUES]
# Get the records
cell = cells[r][c]
if RECORDS in cell and cell[RECORDS] is not None:
ids = cell[RECORDS]
else:
data = matrix[r][c]
if data:
remove = data.remove
while None in data:
remove(None)
ids = data
else:
ids = []
cell[RECORDS] = ids
row_records.extend(ids)
col_records.extend(ids)
# Get the values
if fact.selector is None:
fact.selector = pkey
values = ids
row_values = row_records
col_values = row_records
all_values = list(records.keys())
else:
values = []
append = values.append
for i in ids:
value = extract(records[i], fact.selector)
if value is None:
continue
append(value)
values = list(s3_flatlist(values))
if fact.method in ("list", "count"):
values = list(set(values))
row_values.extend(values)
col_values.extend(values)
all_values.extend(values)
# Aggregate values
value = fact.compute(values, precision=precision)
cell[layer] = value
# Compute row total
row[layer] = fact.compute(row_values,
totals = True,
precision = precision,
)
del row[VALUES]
# Compute column total
for c in xrange(numcols):
col = cols[c]
col[layer] = fact.compute(col[VALUES],
totals = True,
precision = precision,
)
del col[VALUES]
# Compute overall total
self.totals[layer] = fact.compute(all_values,
totals = True,
precision = precision,
)
self.values[layer] = all_values
# -------------------------------------------------------------------------
def _get_fields(self, fields=None):
"""
Determine the fields needed to generate the report
@param fields: fields to include in the report (all fields)
"""
resource = self.resource
table = resource.table
# Lambda to prefix all field selectors
alias = resource.alias
def prefix(s):
if isinstance(s, (tuple, list)):
return prefix(s[-1])
if "." not in s.split("$", 1)[0]:
return "%s.%s" % (alias, s)
elif s[:2] == "~.":
return "%s.%s" % (alias, s[2:])
else:
return s
self.pkey = pkey = prefix(table._id.name)
self.rows = rows = prefix(self.rows) if self.rows else None
self.cols = cols = prefix(self.cols) if self.cols else None
if not fields:
fields = ()
# dfields (data-fields): fields to generate the layers
dfields = [prefix(s) for s in fields]
if rows and rows not in dfields:
dfields.append(rows)
if cols and cols not in dfields:
dfields.append(cols)
if pkey not in dfields:
dfields.append(pkey)
# Normalize fact selectors
for fact in self.facts:
fact.selector = selector = prefix(fact.selector)
if selector not in dfields:
dfields.append(selector)
self.dfields = dfields
# Normalize precision selectors
precision = {}
for selector, decimals in self.precision.items():
precision[prefix(selector)] = decimals
self.precision = precision
# rfields (resource-fields): dfields resolved into a ResourceFields map
rfields = resource.resolve_selectors(dfields)[0]
rfields = Storage([(f.selector.replace("~", alias), f) for f in rfields])
self.rfields = rfields
# gfields (grouping-fields): fields to group the records by
self.gfields = {pkey: rfields[pkey].colname,
rows: rfields[rows].colname
if rows and rows in rfields else None,
cols: rfields[cols].colname
if cols and cols in rfields else None,
}
# -------------------------------------------------------------------------
def _represent_method(self, field):
"""
Get the representation method for a field in the report
@param field: the field selector
"""
rfields = self.rfields
default = lambda value: None
if field and field in rfields:
rfield = rfields[field]
if rfield.field:
def repr_method(value):
return s3_represent_value(rfield.field,
value,
strip_markup = True,
)
elif rfield.virtual:
# If rfield defines a represent, use it
represent = rfield.represent
if not represent:
represent = s3_str
# Wrap with markup stripper
stripper = S3MarkupStripper()
def repr_method(val):
if val is None:
return "-"
text = represent(val)
if "<" in text:
stripper.feed(text)
return stripper.stripped()
else:
return text
else:
repr_method = default
else:
repr_method = default
return repr_method
# -------------------------------------------------------------------------
def _extract(self, row, field):
"""
Extract a field value from a DAL row
@param row: the row
@param field: the fieldname (list_fields syntax)
"""
rfields = self.rfields
if field not in rfields:
raise KeyError("Invalid field name: %s" % field)
rfield = rfields[field]
try:
return rfield.extract(row)
except AttributeError:
return None
# -------------------------------------------------------------------------
def _expand(self, row, axisfilter=None):
"""
Expand a data frame row into a list of rows for list:type values
@param row: the row
@param field: the field to expand (None for all fields)
@param axisfilter: dict of filtered field values by column names
"""
pairs = []
append = pairs.append
for colname in self.gfields.values():
if not colname:
continue
value = row[colname]
if type(value) is list:
if not value:
value = [None]
if axisfilter and colname in axisfilter:
p = [(colname, v) for v in value
if v in axisfilter[colname]]
if not p:
raise RuntimeError("record does not match query")
else:
append(p)
else:
append([(colname, v) for v in value])
else:
append([(colname, value)])
result = [dict(i) for i in product(*pairs)]
return result
# END =========================================================================
|
the-stack_0_19522
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='R2FM4T2mYA6XNuu0lTs8IJ4Ctiqc9i9A8KcSBh2t1kP7nAOmNE6OeLSxnFrC73MC')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
|
the-stack_0_19524
|
'''
Evaluate script before submit
'''
import zip
import os
import zipfile
import numpy as np
import pandas as pd
from PIL import Image
import argparse
from skimage.measure import compare_ssim
from tqdm import tqdm
import MCS2018
from torchvision import transforms
#import MCS2018_CPU as MCS2018 if you are using only CPU black-box model
import shutil
SSIM_THR = 0.95
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
parser = argparse.ArgumentParser(description='pre-submit evaluate')
parser.add_argument('--original_root',
type=str,
help='original data root path',
default='../data/imgs/')
parser.add_argument('--attack_root',
required=True,
type=str,
help='changed data root path')
parser.add_argument('--submit_list',
type=str,
help='path of datalist',
default='../data/submit_list.csv')
parser.add_argument('--target_dscr',
required=True,
type=str,
help="target descriptors path (.npy),"\
" will be created if file doesn't exist")
parser.add_argument('--submit_name',
required=True,
type=str)
parser.add_argument('--gpu_id',
type=int,
help='GPU ID for black box, default = -1 (CPU)',
default=-1)
parser.add_argument('--pairs_list',
type=str,
help='attack pairs list',
default='../data/pairs_list.csv')
args = parser.parse_args()
def save_submit_file(img_list, descriptor_path):
submit_directory = './submits'
if not os.path.isdir(submit_directory):
os.makedirs(submit_directory)
submit_file = os.path.join(submit_directory, args.submit_name + '.zip')
with zipfile.ZipFile(submit_file,'w') as myzip:
for img_name in tqdm(img_list.path.values,
desc='archive images'):
img_path = os.path.join(args.attack_root, img_name)
myzip.write(img_path, arcname=img_name)
myzip.write(descriptor_path, arcname='descriptors.npy')
def euclid_dist(x,y, axis=1):
return np.sqrt(((x - y) ** 2).sum(axis=axis))
def main(args):
# loading black-box model
net = MCS2018.Predictor(args.gpu_id)
img_list = pd.read_csv(args.submit_list)
descriptors = np.ones((len(img_list), 512))
cropping = transforms.Compose([transforms.CenterCrop(224),
transforms.Scale(112)])
cropped_img_preprocessing = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)
])
# SSIM checking
error=False
for idx, img_name in tqdm(enumerate(img_list.path.values),
total=len(img_list.path.values),
desc='SSIM'):
img = Image.open(os.path.join(args.attack_root, img_name))
org_img = Image.open(os.path.join(args.original_root,
img_name.replace('.png', '.jpg')))
org_img = cropping(org_img)
ssim = compare_ssim(np.array(img), np.array(org_img), multichannel=True)
if idx%100==0:
print(ssim)
if ssim < SSIM_THR:
error=True
os.remove(os.path.join(args.attack_root, img_name))
continue
# assert ssim >= SSIM_THR, '{0}\n ssim < {1}'.format(img_name,SSIM_THR)
# Creating batch with one element
batch_from_one_img = np.array(cropped_img_preprocessing(img).unsqueeze(0),
dtype=np.float32)
# Getting batch result from black-box
res = net.submit(batch_from_one_img).squeeze()
descriptors[idx] = res
assert error==False
# Saving descriptors for submit
descriptor_path = os.path.join(args.attack_root, 'descriptors.npy')
np.save(descriptor_path, descriptors)
# axis 0 - number of imgs for each class
# axis 1 - number of classes
# axis 2 - descriptor size
descriptors = descriptors.reshape((5,1000,512), order='F')
if not os.path.isfile(args.target_dscr):
pairs_list = pd.read_csv(args.pairs_list)
preprocessing = transforms.Compose([
transforms.CenterCrop(224),
transforms.Scale(112),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD),
])
val_img_list = []
for target_imgs in pairs_list.target_imgs.values:
for img_name in target_imgs.split('|'):
img_name = os.path.join(args.original_root, img_name)
val_img_list.append(img_name)
target_descriptors = np.ones((5000, 512), dtype=np.float32)
for idx, img_name in tqdm(enumerate(val_img_list),
total=len(val_img_list),
desc='get descriptors'):
img = Image.open(img_name)
img_arr = preprocessing(img).unsqueeze(0).numpy()
res = net.submit(img_arr).squeeze()
target_descriptors[idx] = res
target_descriptors = target_descriptors.reshape((5,1000,512),
order='F')
np.save(args.target_dscr, target_descriptors)
#target descriptors shape: (5,1000,512)
target_descriptors = np.load(args.target_dscr)
# axis 0 - img number for source class
# axis 1 - img number for target class
# axis 2 - number of classes
dist_all = np.zeros((5,5,1000))
for idx, dscr_row in enumerate(descriptors):
for jdx, target_dscr_row in enumerate(target_descriptors):
assert(dscr_row.shape == target_dscr_row.shape)
dist = euclid_dist(target_dscr_row, dscr_row)
dist_all[idx][jdx] = dist
score_value = dist_all.mean(axis=1).mean(axis=0).mean()
print ('Validation score: {0:.3f}'.format(score_value))
submit_file = os.path.join('./submit', args.submit_name + '.zip')
with zipfile.ZipFile(submit_file, 'w') as myzip:
for img_name in tqdm(img_list.path.values,
desc='archive images'):
img_path = os.path.join(args.attack_root, img_name)
myzip.write(img_path, arcname=img_name)
myzip.write(descriptor_path, arcname='descriptors.npy')
if __name__ == '__main__':
main(args)
|
the-stack_0_19525
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import torch
import torch.nn as nn
__all__ = ['profile']
def count_convNd(m, _, y):
cin = m.in_channels
kernel_ops = m.weight.size()[2] * m.weight.size()[3]
ops_per_element = kernel_ops
output_elements = y.nelement()
# cout x oW x oH
total_ops = cin * output_elements * ops_per_element // m.groups
m.total_ops = torch.zeros(1).fill_(total_ops)
def count_linear(m, _, __):
total_ops = m.in_features * m.out_features
m.total_ops = torch.zeros(1).fill_(total_ops)
register_hooks = {
nn.Conv1d: count_convNd,
nn.Conv2d: count_convNd,
nn.Conv3d: count_convNd,
######################################
nn.Linear: count_linear,
######################################
nn.Dropout: None,
nn.Dropout2d: None,
nn.Dropout3d: None,
nn.BatchNorm2d: None,
}
def profile(model, input_size, custom_ops=None):
handler_collection = []
custom_ops = {} if custom_ops is None else custom_ops
def add_hooks(m_):
if len(list(m_.children())) > 0:
return
m_.register_buffer('total_ops', torch.zeros(1))
m_.register_buffer('total_params', torch.zeros(1))
for p in m_.parameters():
m_.total_params += torch.zeros(1).fill_(p.numel())
m_type = type(m_)
fn = None
if m_type in custom_ops:
fn = custom_ops[m_type]
elif m_type in register_hooks:
fn = register_hooks[m_type]
if fn is not None:
_handler = m_.register_forward_hook(fn)
handler_collection.append(_handler)
original_device = model.parameters().__next__().device
training = model.training
model.eval()
model.apply(add_hooks)
#input_size[0] *= 2
#print(input_size)
#x = [torch.zeros(input_s).unsqueeze(0).to(original_device) for input_s in input_size]
x = torch.zeros([1, 6, 576, 960]).to(original_device)
with torch.no_grad():
model(x)
total_ops = 0
total_params = 0
for m in model.modules():
if len(list(m.children())) > 0: # skip for non-leaf module
continue
total_ops += m.total_ops
total_params += m.total_params
total_ops = total_ops.item()
total_params = total_params.item()
model.train(training).to(original_device)
for handler in handler_collection:
handler.remove()
return total_ops, total_params
|
the-stack_0_19526
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Matthew Stone <[email protected]>
# Distributed under terms of the MIT license.
"""
Classification of reciprocal translocations.
"""
def classify_insertion(plus, minus, mh_buffer=50):
plus_A = plus.pos
minus_A = minus.pos
plus_B = plus.stop
minus_B = minus.stop
# Buffer comparisons
def _greater(p1, p2):
return p1 > p2 - mh_buffer
if _greater(minus_A, plus_A) and _greater(minus_B, plus_B):
return 'INS_B2A'
elif _greater(plus_A, minus_A) and _greater(plus_B, minus_B):
return 'INS_A2B'
else:
return 'INS_UNCLASSIFIED'
def classify_simple_translocation(plus, minus, mh_buffer=10):
"""
Resolve a pair of interchromosomal breakends.
Parameters
----------
FF : pysam.VariantRecord
FF inversion breakpoint.
RR : pysam.VariantRecord
RR inversion breakpoint.
cnvs : list of pysam.VariantRecord
List of overlapping CNVs.
Returns
-------
svtype : str
Complex SV class.
"""
# plus refers to breakend whose strand begins with '+'
if plus.chrom != minus.chrom or plus.info['CHR2'] != minus.info['CHR2']:
return 'TLOC_MISMATCH_CHROM'
# Reference chromosomes are labeled A and B
# Breakpoints/Derivative chromosomes are labeled plus and minus, based on
# ref chromosome A's strandedness on each breakpoint
# plus_A = the breakend of ref chrom A on derivative chrom where A is
# forward-stranded
# get positions
plus_A = plus.pos
minus_A = minus.pos
plus_B = plus.stop
minus_B = minus.stop
plus_strands = plus.info['STRANDS']
# Buffer comparisons
def _greater(p1, p2):
return p1 > p2 - mh_buffer
# Check for PE evidence
def _hasPE(recA, recB):
if 'EVIDENCE' in recA.info.keys() \
and 'EVIDENCE' in recB.info.keys():
if 'PE' in recA.info['EVIDENCE'] \
and 'PE' in recB.info['EVIDENCE']:
return True
else:
return False
else:
return False
if plus_strands == '+-':
if _greater(minus_A, plus_A) and _greater(plus_B, minus_B):
if _hasPE(plus, minus):
return 'CTX_PP/QQ'
else:
return 'CTX_UNR'
if _greater(minus_A, plus_A) and _greater(minus_B, plus_B):
return 'CTX_INS_B2A'
if _greater(plus_A, minus_A) and _greater(plus_B, minus_B):
return 'CTX_INS_A2B'
else:
if _greater(minus_A, plus_A) and _greater(minus_B, plus_B):
if _hasPE(plus, minus):
return 'CTX_PQ/QP'
else:
return 'CTX_UNR'
if _greater(minus_A, plus_A) and _greater(plus_B, minus_B):
return 'CTX_INV_INS_B2A'
if _greater(plus_A, minus_A) and _greater(minus_B, plus_B):
return 'CTX_INV_INS_A2B'
return 'CTX_UNR'
|
the-stack_0_19529
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 11:25:33 2019
@author: ntr002
"""
import WaPOR
from datetime import datetime
import requests
import os
from WaPOR import GIS_functions as gis
def main(Dir, data='AETI',Startdate='2009-01-01', Enddate='2018-12-31',
latlim=[-40.05, 40.05], lonlim=[-30.5, 65.05],level=1,
version = 2, Waitbar = 1,cached_catalog=True):
"""
This function downloads yearly WAPOR LCC data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -40.05 and 40.05)
lonlim -- [xmin, xmax] (values must be between -30.05 and 65.05)
cached_catalog -- True Use a cached catalog. False Load a new catalog from the database
"""
print(f'\nDownload WaPOR Level {level} yearly {data} data for the period {Startdate} till {Enddate}')
# Download data
WaPOR.API.version=version
bbox=[lonlim[0],latlim[0],lonlim[1],latlim[1]]
catalog=WaPOR.API.getCatalog(cached=cached_catalog)
if level==1:
cube_code=f"L1_{data}_A"
elif level==2:
cube_code=f'L2_{data}_A'
elif level==3:
print('Level 3 data only available in some areas with specific data cube code below: ')
for i,row in catalog.iterrows():
if (f'L3' in row['code'])&(f'{data}' in row['code'])&(row['code'][-1]=='A'):
print('%s: %s'%(row['caption'],row['code']))
cube_code=input('Insert Level 3 cube code for the selected area: ')
else:
print('Invalid Level')
try:
cube_info=WaPOR.API.getCubeInfo(cube_code)
multiplier=cube_info['measure']['multiplier']
except:
print('ERROR: Cannot get cube info. Check if WaPOR version has cube %s'%(cube_code))
return None
time_range='{0},{1}'.format(Startdate,Enddate)
try:
df_avail=WaPOR.API.getAvailData(cube_code,time_range=time_range)
except:
print('ERROR: cannot get list of available data')
return None
if Waitbar == 1:
import WaPOR.WaitbarConsole as WaitbarConsole
total_amount = len(df_avail)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
Dir=os.path.join(Dir,'WAPOR.v%s_yearly_%s' %(version,cube_code))
if not os.path.exists(Dir):
os.makedirs(Dir)
for index,row in df_avail.iterrows():
download_url=WaPOR.API.getCropRasterURL(bbox,cube_code,
row['time_code'],
row['raster_id'],
WaPOR.API.Token,
print_job=False)
filename='{0}.tif'.format(row['raster_id'])
outfilename=os.path.join(Dir,filename)
download_file=os.path.join(Dir,'raw_{0}.tif'.format(row['raster_id']))
#Download raster file
resp=requests.get(download_url)
open(download_file,'wb').write(resp.content)
driver, NDV, xsize, ysize, GeoT, Projection= gis.GetGeoInfo(download_file)
Array = gis.OpenAsArray(download_file,nan_values=True)
CorrectedArray=Array*multiplier
gis.CreateGeoTiff(outfilename,CorrectedArray,
driver, NDV, xsize, ysize, GeoT, Projection)
os.remove(download_file)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount,
prefix = 'Progress:',
suffix = 'Complete',
length = 50)
return Dir
|
the-stack_0_19532
|
import csv
from urllib import request
'''
Usamos o Request para fazer o download de algum arquivo.
'''
def read(url):
with request.urlopen(url) as entrada:
'''
Fazemos o request do csv.
'''
print('Baixando Arquivos CSV ...')
dados = entrada.read().decode('latin1')# Fazemos a leitura e decoficamos o arquivo...
print('Download Completo!!!')
for cidade in csv.reader(dados.splitlines()):
print(f'{cidade[8]} : {cidade[3]}')
if entrada.close:
print('Arquivo fechado com sucesso')
if __name__ == "__main__":
read(r'http://files.cod3r.com.br/curso-python/desafio-ibge.csv')
|
the-stack_0_19536
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test Loan - setcollateraltoken."""
from test_framework.test_framework import DefiTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import calendar
import time
class LoanSetCollateralTokenTest (DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [
['-txnotokens=0', '-amkheight=50', '-bayfrontheight=50', '-eunosheight=50', '-fortcanningheight=50', '-fortcanninghillheight=50', '-fortcanningcrunchheight=150', '-txindex=1']]
def run_test(self):
assert_equal(len(self.nodes[0].listtokens()), 1) # only one token == DFI
print("Generating initial chain...")
self.nodes[0].generate(101)
symbolDFI = "DFI"
symbolBTC = "BTC"
symbolGOOGL = "GOOGL"
self.nodes[0].createtoken({
"symbol": symbolBTC,
"name": symbolBTC,
"isDAT": True,
"collateralAddress": self.nodes[0].get_genesis_keys().ownerAuthAddress
})
self.nodes[0].generate(1)
self.nodes[0].createtoken({
"symbol": symbolGOOGL,
"name": symbolGOOGL,
"isDAT": True,
"collateralAddress": self.nodes[0].get_genesis_keys().ownerAuthAddress
})
self.nodes[0].generate(1)
idDFI = list(self.nodes[0].gettoken(symbolDFI).keys())[0]
idBTC = list(self.nodes[0].gettoken(symbolBTC).keys())[0]
idGOOGL = list(self.nodes[0].gettoken(symbolGOOGL).keys())[0]
try:
self.nodes[0].setcollateraltoken({
'token': "DOGE",
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Token DOGE does not exist" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Price feed DFI/USD does not belong to any oracle" in errorString)
oracle_address1 = self.nodes[0].getnewaddress("", "legacy")
price_feeds1 = [
{"currency": "USD", "token": symbolDFI},
{"currency": "USD", "token": symbolBTC},
{"currency": "USD", "token": symbolGOOGL},
]
oracle_id1 = self.nodes[0].appointoracle(oracle_address1, price_feeds1, 10)
self.nodes[0].generate(1)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("no live oracles for specified request" in errorString)
oracle1_prices = [
{"currency": "USD", "tokenAmount": f'1@{symbolDFI}'},
{"currency": "USD", "tokenAmount": f'1@{symbolBTC}'},
{"currency": "USD", "tokenAmount": f'1@{symbolGOOGL}'},
]
timestamp = calendar.timegm(time.gmtime())
self.nodes[0].setoracledata(oracle_id1, timestamp, oracle1_prices)
self.nodes[0].generate(1)
assert_raises_rpc_error(-32600, "setCollateralToken factor must be lower or equal than 1", self.nodes[0].setcollateraltoken, {
'token': idDFI,
'factor': 2,
'fixedIntervalPriceId': "DFI/USD"})
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': -1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Amount out of range" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "Blabla"})
except JSONRPCException as e:
errorString = e.error['message']
assert("price feed not in valid format - token/currency" in errorString)
collTokenTx1 = self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 0.5,
'fixedIntervalPriceId': "DFI/USD"})
collTokenTx3 = self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD",
'activateAfterBlock': 135})
self.nodes[0].generate(1)
dfi_activation_height = self.nodes[0].getblockcount()
collTokens = self.nodes[0].listcollateraltokens()
assert_equal(len(collTokens), 2)
collToken1 = [token for token in collTokens if token["tokenId"] == collTokenTx1][0]
assert_equal(collToken1["token"], symbolDFI)
assert_equal(collToken1["factor"], Decimal('0.5'))
assert_equal(collToken1["fixedIntervalPriceId"], "DFI/USD")
collTokenTx2 = self.nodes[0].setcollateraltoken({
'token': idBTC,
'factor': 0.9,
'fixedIntervalPriceId': "BTC/USD"})
self.nodes[0].generate(1)
btc_activation_height = self.nodes[0].getblockcount()
collTokens = self.nodes[0].listcollateraltokens()
assert_equal(len(collTokens), 3)
collToken2 = [token for token in collTokens if token["tokenId"] == collTokenTx2][0]
assert_equal(collToken2["token"], symbolBTC)
assert_equal(collToken2["factor"], Decimal('0.9'))
assert_equal(collToken2["fixedIntervalPriceId"], "BTC/USD")
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens()
assert_equal(len(collTokens), 3)
collToken3 = [token for token in collTokens if token["tokenId"] == collTokenTx3][0]
assert_equal(collToken3["token"], symbolDFI)
assert_equal(collToken3["factor"], Decimal('1'))
collTokens = self.nodes[0].getcollateraltoken(idDFI)
assert_equal(collTokens["token"], symbolDFI)
assert_equal(collTokens["factor"], Decimal('0.5'))
assert_equal(collTokens["activateAfterBlock"], dfi_activation_height)
collTokens = self.nodes[0].getcollateraltoken(idBTC)
assert_equal(collTokens["token"], symbolBTC)
assert_equal(collTokens["factor"], Decimal('0.9'))
assert_equal(collTokens["activateAfterBlock"], btc_activation_height)
self.nodes[0].generate(30)
collTokens = self.nodes[0].getcollateraltoken(idDFI)
assert_equal(collTokens["token"], symbolDFI)
assert_equal(collTokens["factor"], Decimal('1'))
assert_equal(collTokens["activateAfterBlock"], 135)
collTokens = self.nodes[0].getcollateraltoken(idBTC)
assert_equal(collTokens["token"], symbolBTC)
assert_equal(collTokens["factor"], Decimal('0.9'))
assert_equal(collTokens["activateAfterBlock"], btc_activation_height)
self.nodes[0].setcollateraltoken({
'token': idBTC,
'factor': 0,
'fixedIntervalPriceId': "BTC/USD"})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens()
assert_equal(len(collTokens), 4)
# Move to fork height
self.nodes[0].generate(150 - self.nodes[0].getblockcount())
# Check errors on FCC
assert_raises_rpc_error(-32600, "setCollateralToken factor must be lower or equal than 1.00000000", self.nodes[0].setcollateraltoken, {
'token': idDFI,
'factor': 1.01,
'fixedIntervalPriceId': "DFI/USD"})
self.nodes[0].generate(1)
# Check errors
assert_raises_rpc_error(-32600, "Percentage exceeds 100%", self.nodes[0].setcollateraltoken, {
'token': idDFI,
'factor': 1.01,
'fixedIntervalPriceId': "DFI/USD"})
# Create collateral token
self.nodes[0].setcollateraltoken({
'token': idGOOGL,
'factor': 0.12345678,
'fixedIntervalPriceId': "GOOGL/USD"})
self.nodes[0].generate(1)
# Check attributess
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result[f'v0/token/{idGOOGL}/loan_collateral_enabled'], 'true')
assert_equal(result[f'v0/token/{idGOOGL}/loan_collateral_factor'], '0.12345678')
assert_equal(result[f'v0/token/{idGOOGL}/fixed_interval_price_id'], 'GOOGL/USD')
# Get token creation TX
token = self.nodes[0].gettoken(idGOOGL)[idGOOGL]
# Check entry in list collateral tokens
result = self.nodes[0].listcollateraltokens()[2]
assert_equal(result['token'], 'GOOGL')
assert_equal(result['tokenId'], token['creationTx'])
assert_equal(result['factor'], Decimal('0.12345678'))
assert_equal(result['fixedIntervalPriceId'], 'GOOGL/USD')
if __name__ == '__main__':
LoanSetCollateralTokenTest().main()
|
the-stack_0_19537
|
#!/usr/bin/env python3
import re
import rospy
from std_msgs.msg import String
from lg_common import AdhocBrowserPool
from lg_msg_defs.msg import AdhocBrowsers
from lg_common import AdhocBrowserDirectorBridge
from lg_common.helpers import make_soft_relaunch_callback, handle_initial_state
from lg_common.helpers import run_with_influx_exception_handler
from interactivespaces_msgs.msg import GenericMessage
from lg_msg_defs.msg import Ready
NODE_NAME = 'lg_adhoc_browser'
def main():
rospy.init_node(NODE_NAME, anonymous=True)
extensions_root = rospy.get_param('~extensions_root', '/opt/endpoint/chrome/extensions/')
viewport_name = rospy.get_param('~viewport', None)
rosbridge_port = rospy.get_param('~rosbridge_port', 9090)
rosbridge_host = rospy.get_param('~rosbridge_port', 'localhost')
depend_on_rosbridge = rospy.get_param('~depend_on_rosbridge', True)
global_dependency_timeout = rospy.get_param('/global_dependency_timeout', 15)
hide_delay = rospy.get_param('~hide_delay', 0.5)
destroy_delay = rospy.get_param('~destroy_delay', 2)
if not viewport_name:
rospy.logerr("Viewport is not set in the roslaunch file. Exiting.")
exit(1)
"""
Initialize adhoc browser pool
"""
topic_name = 'browser_service/{}'.format(viewport_name)
common_topic_name = 'browser_service/browsers'
adhocbrowser_pool = AdhocBrowserPool(viewport_name=viewport_name,
extensions_root=extensions_root,
hide_delay=hide_delay,
destroy_delay=destroy_delay)
make_soft_relaunch_callback(adhocbrowser_pool.handle_soft_relaunch,
groups=["media"])
rospy.Subscriber(
topic_name,
AdhocBrowsers,
adhocbrowser_pool.handle_ros_message
)
"""
Initialize director => browser pool bridge that translates director GenericMessage to AdhocBrowsers.msg
"""
adhocbrowser_viewport_publisher = rospy.Publisher(
topic_name, AdhocBrowsers, queue_size=3)
adhocbrowser_aggregate_topic_publisher = rospy.Publisher(common_topic_name,
AdhocBrowsers,
queue_size=3)
adhocbrowser_director_bridge = AdhocBrowserDirectorBridge(
adhocbrowser_aggregate_topic_publisher,
adhocbrowser_viewport_publisher,
viewport_name)
rospy.Subscriber('director/scene', GenericMessage, adhocbrowser_director_bridge.translate_director)
rospy.Subscriber('director/ready', Ready, adhocbrowser_pool.unhide_browsers)
handle_initial_state(adhocbrowser_director_bridge.translate_director)
"""
Initialize overlay hiding listener
"""
def getBrowserIds(msg):
s = msg.data
if '[' in s and ']' in s:
ids = [sp for sp in re.split('\[\]\, ', s) if len(sp) > 0]
adhocbrowser_pool.minimize_browsers(ids)
else:
adhocbrowser_pool.minimize_browsers([s])
rospy.Subscriber('director/minimize', String, getBrowserIds)
"""
Spin FTW
"""
rospy.spin()
if __name__ == "__main__":
run_with_influx_exception_handler(main, NODE_NAME)
|
the-stack_0_19538
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='len',
parent_name='scattercarpet.marker.colorbar',
**kwargs
):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_0_19541
|
"""Test Mikrotik setup process."""
from asynctest import CoroutineMock, Mock, patch
from homeassistant.components import mikrotik
from homeassistant.setup import async_setup_component
from . import MOCK_DATA
from tests.common import MockConfigEntry
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a hub."""
assert await async_setup_component(hass, mikrotik.DOMAIN, {}) is True
assert mikrotik.DOMAIN not in hass.data
async def test_successful_config_entry(hass):
"""Test config entry successful setup."""
entry = MockConfigEntry(domain=mikrotik.DOMAIN, data=MOCK_DATA,)
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(mikrotik, "MikrotikHub") as mock_hub, patch(
"homeassistant.helpers.device_registry.async_get_registry",
return_value=mock_registry,
):
mock_hub.return_value.async_setup = CoroutineMock(return_value=True)
mock_hub.return_value.serial_num = "12345678"
mock_hub.return_value.model = "RB750"
mock_hub.return_value.hostname = "mikrotik"
mock_hub.return_value.firmware = "3.65"
assert await mikrotik.async_setup_entry(hass, entry) is True
assert len(mock_hub.mock_calls) == 2
p_hass, p_entry = mock_hub.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
"config_entry_id": entry.entry_id,
"connections": {("mikrotik", "12345678")},
"manufacturer": mikrotik.ATTR_MANUFACTURER,
"model": "RB750",
"name": "mikrotik",
"sw_version": "3.65",
}
async def test_hub_fail_setup(hass):
"""Test that a failed setup will not store the hub."""
entry = MockConfigEntry(domain=mikrotik.DOMAIN, data=MOCK_DATA,)
entry.add_to_hass(hass)
with patch.object(mikrotik, "MikrotikHub") as mock_hub:
mock_hub.return_value.async_setup = CoroutineMock(return_value=False)
assert await mikrotik.async_setup_entry(hass, entry) is False
assert mikrotik.DOMAIN not in hass.data
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=mikrotik.DOMAIN, data=MOCK_DATA,)
entry.add_to_hass(hass)
with patch.object(mikrotik, "MikrotikHub") as mock_hub, patch(
"homeassistant.helpers.device_registry.async_get_registry", return_value=Mock(),
):
mock_hub.return_value.async_setup = CoroutineMock(return_value=True)
mock_hub.return_value.serial_num = "12345678"
mock_hub.return_value.model = "RB750"
mock_hub.return_value.hostname = "mikrotik"
mock_hub.return_value.firmware = "3.65"
assert await mikrotik.async_setup_entry(hass, entry) is True
assert len(mock_hub.return_value.mock_calls) == 1
assert await mikrotik.async_unload_entry(hass, entry)
assert entry.entry_id not in hass.data[mikrotik.DOMAIN]
|
the-stack_0_19542
|
import discord
import src.modules.toxicity_helper as toxicity_helper
from src.modules.catfact_helper import get_catfact
from src.modules.repeat_helper import message_author, is_repeat, cycle, flush, message_author_debug
from src.tools.botfunction import BotFunction
from src.tools.message_return import message_data
from src.modules.discord_helper import generate_embed
BAN_EMOJI_ID = 338384063691751424
def super_toxic_heuristic(scores):
return False
class auto_on_message(BotFunction):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def action(self, message, *args, **kwargs):
raise NotImplementedError
class unsubscribe(auto_on_message):
"""
Extension of $catfact
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def action(self, message, *args, **kwargs):
if message.content.lower().strip() == "unsubscribe":
return message_data(message.channel, get_catfact())
# print(help(unsubscribe))
class private_message(auto_on_message):
"""
Yang will respond to private messages with a notice to not message him privately
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def action(self, message, *args, **kwargs):
if isinstance(message.channel, (discord.DMChannel, discord.GroupChannel)):
return message_data(message.channel,
"I do not reply to private messages. If you have any questions, please message one of the mods.")
return None
class check_toxicity(auto_on_message):
"""
Notifies admins if a message is toxic (>.83) and removes it if super toxic (>.91)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def remove_toxicity(self, message, scores, toxic_message):
if message is None:
return
if super_toxic_heuristic(scores):
await toxic_message.delete()
await toxic_message.channel.send("We didn't accept you into this school to be toxic.")
else:
ban_emoji = await message.guild.fetch_emoji(BAN_EMOJI_ID)
await message.add_reaction(ban_emoji)
def check(reaction, user):
return reaction.message.id == message.id and not user.bot and (reaction.emoji == ban_emoji)
reaction, user = await self.bot.client.wait_for("reaction_add", check=check)
try:
await toxic_message.delete()
except:
await message.channel.send("Message unable to be deleted")
async def action(self, message, *args, **kwargs):
send_message, scores = toxicity_helper.get_toxicity(message)
m = None if send_message is None else ""
toxic_notif_channel = self.bot.client.get_channel(self.bot.config["toxic_notif_channel"])
if m is not None:
toxic_notif_message = await toxic_notif_channel.send(embed=generate_embed(send_message))
await self.remove_toxicity(toxic_notif_message, scores, message)
class mission_complete(auto_on_message):
"""
Repeats a message if it has been repeated bot.repeat_n times in a row in a channel
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def debug_reset(self):
self.repeated_messages_dict = {(channel.id):[] for channel in self.bot.channels}
async def action(self, message, *args, **kwargs):
m_a = message_author(message.content, message.author, self.bot.debug)
cycle(self.bot.repeated_messages_dict[message.channel.id], m_a, self.bot.repeat_n)
if is_repeat(self.bot.repeated_messages_dict[message.channel.id], self.bot.repeat_n):
send = self.bot.repeated_messages_dict[message.channel.id][-1].message
flush(self.bot.repeated_messages_dict[message.channel.id])
return message_data(message.channel, send)
return None
async def debug_action(self, message, *args, **kwargs):
m_a = message_author(message.content, message.author, self.bot.debug)
cycle(self.bot.repeated_messages_dict[message.channel.id], m_a, self.bot.repeat_n)
if is_repeat(self.bot.repeated_messages_dict[message.channel.id], self.bot.repeat_n):
send = self.bot.repeated_messages_dict[message.channel.id][-1].message
flush(self.bot.repeated_messages_dict[message.channel.id])
return message_data(message.channel, send)
# @bot.auto_on_message(timedelta(minutes=1),None,True)
# def fire(message):
# """
# fire
# """
# if "fire" in message.content.lower().split() and "update" in message.content.lower().split():
# return message_data(message.channel,"There is no threat to the campus")
# return None
# @bot.auto_on_message(None,None,True)
# def test(message):
# print(message.author.nick)
# return message_data(message.channel,message.author.nick)
|
the-stack_0_19544
|
import unittest
from caffe2.python import convnet_benchmarks as cb
from caffe2.python import test_util, workspace
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestConvnetBenchmarks(test_util.TestCase):
def testConvnetBenchmarks(self):
all_args = [
'--batch_size 16 --order NCHW --iterations 1 '
'--warmup_iterations 1',
'--batch_size 16 --order NCHW --iterations 1 '
'--warmup_iterations 1 --forward_only',
]
for model in [cb.AlexNet, cb.OverFeat, cb.VGGA, cb.Inception]:
for arg_str in all_args:
args = cb.GetArgumentParser().parse_args(arg_str.split(' '))
cb.Benchmark(model, args)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_19545
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from setuptools import setup, find_packages
VERSION = open('VERSION').read().lstrip('version: ').rstrip('\n')
setup(
name='django-labjs',
packages=find_packages(),
version=VERSION,
description='Django labjs templatetags.',
long_description=open('README.rst').read(),
author='Ashley Camba Garrido',
author_email='[email protected]',
maintainer='Luke Pomfrey',
maintainer_email='[email protected]',
url='https://github.com/lpomfrey/django-labjs',
setup_requires=['setuptools_git >= 0.3'],
install_requires=[
'django-appconf>=0.4',
'django-compressor>=0.9.2',
],
test_suite='runtests.runtests',
include_package_data=True,
zip_safe=False, # because we're including media that Django needs
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
the-stack_0_19548
|
import logging
from base64 import b64decode
from dateutil import parser
from requests import Session
from xlsxwriter.utility import xl_col_to_name
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
from redash.query_runner.i18n_dataSource import zh
logger = logging.getLogger(__name__)
try:
import gspread
from gspread.httpsession import HTTPSession
from oauth2client.service_account import ServiceAccountCredentials
enabled = True
except ImportError:
enabled = False
def _load_key(filename):
with open(filename, "rb") as f:
return json_loads(f.read())
def _get_columns_and_column_names(row):
column_names = []
columns = []
duplicate_counter = 1
for i, column_name in enumerate(row):
if not column_name:
column_name = 'column_{}'.format(xl_col_to_name(i))
if column_name in column_names:
column_name = u"{}{}".format(column_name, duplicate_counter)
duplicate_counter += 1
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': TYPE_STRING
})
return columns, column_names
def _value_eval_list(row_values, col_types):
value_list = []
raw_values = zip(col_types, row_values)
for typ, rval in raw_values:
try:
if rval is None or rval == '':
val = None
elif typ == TYPE_BOOLEAN:
val = True if unicode(rval).lower() == 'true' else False
elif typ == TYPE_DATETIME:
val = parser.parse(rval)
elif typ == TYPE_FLOAT:
val = float(rval)
elif typ == TYPE_INTEGER:
val = int(rval)
else:
# for TYPE_STRING and default
val = unicode(rval)
value_list.append(val)
except (ValueError, OverflowError):
value_list.append(rval)
return value_list
HEADER_INDEX = 0
class WorksheetNotFoundError(Exception):
def __init__(self, worksheet_num, worksheet_count):
message = "Worksheet number {} not found. Spreadsheet has {} worksheets. Note that the worksheet count is zero based.".format(worksheet_num, worksheet_count)
super(WorksheetNotFoundError, self).__init__(message)
def parse_query(query):
values = query.split("|")
key = values[0] # key of the spreadsheet
worksheet_num = 0 if len(values) != 2 else int(values[1]) # if spreadsheet contains more than one worksheet - this is the number of it
return key, worksheet_num
def parse_worksheet(worksheet):
if not worksheet:
return {'columns': [], 'rows': []}
columns, column_names = _get_columns_and_column_names(worksheet[HEADER_INDEX])
if len(worksheet) > 1:
for j, value in enumerate(worksheet[HEADER_INDEX + 1]):
columns[j]['type'] = guess_type(value)
column_types = [c['type'] for c in columns]
rows = [dict(zip(column_names, _value_eval_list(row, column_types))) for row in worksheet[HEADER_INDEX + 1:]]
data = {'columns': columns, 'rows': rows}
return data
def parse_spreadsheet(spreadsheet, worksheet_num):
worksheets = spreadsheet.worksheets()
worksheet_count = len(worksheets)
if worksheet_num >= worksheet_count:
raise WorksheetNotFoundError(worksheet_num, worksheet_count)
worksheet = worksheets[worksheet_num].get_all_values()
return parse_worksheet(worksheet)
class TimeoutSession(Session):
def request(self, *args, **kwargs):
kwargs.setdefault('timeout', 300)
return super(TimeoutSession, self).request(*args, **kwargs)
class GoogleSpreadsheet(BaseQueryRunner):
def __init__(self, configuration):
super(GoogleSpreadsheet, self).__init__(configuration)
self.syntax = 'custom'
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "google_spreadsheets"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'jsonKeyFile': {
"type": "string",
'title': zh.get('JSON Key File', 'JSON Key File')
}
},
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
def _get_spreadsheet_service(self):
scope = [
'https://spreadsheets.google.com/feeds',
]
key = json_loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
timeout_session = HTTPSession()
timeout_session.requests_session = TimeoutSession()
spreadsheetservice = gspread.Client(auth=creds, http_session=timeout_session)
spreadsheetservice.login()
return spreadsheetservice
def test_connection(self):
self._get_spreadsheet_service()
def is_url_key(self, key):
if key.startswith('https://'):
return True
return False
def run_query(self, query, user):
logger.debug("Spreadsheet is about to execute query: %s", query)
key, worksheet_num = parse_query(query)
try:
spreadsheet_service = self._get_spreadsheet_service()
if self.is_url_key(key):
spreadsheet = spreadsheet_service.open_by_url(key)
else:
spreadsheet = spreadsheet_service.open_by_key(key)
data = parse_spreadsheet(spreadsheet, worksheet_num)
return json_dumps(data), None
except gspread.SpreadsheetNotFound:
return None, "Spreadsheet ({}) not found. Make sure you used correct id.".format(key)
register(GoogleSpreadsheet)
|
the-stack_0_19549
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import re
import sys
import urllib
import tarfile
import zipfile
import os.path as osp
from scipy.io import loadmat
import numpy as np
import h5py
from scipy.misc import imsave
from utils.iotools import mkdir_if_missing, write_json, read_json
from .base import BaseImgDataset
class iLIDS(BaseImgDataset):
"""
iLIDS (for single shot setting)
Reference:
Wang et al. Person Re-Identification by Video Ranking. ECCV 2014.
URL: http://www.eecs.qmul.ac.uk/~xiatian/downloads_qmul_iLIDS-VID_ReID_dataset.html
Dataset statistics:
# identities: 300
# images: 600
# cameras: 2
"""
dataset_dir = 'ilids-vid'
def __init__(self, root='data', split_id=0, verbose=True, use_lmdb=False, **kwargs):
super(iLIDS, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = 'http://www.eecs.qmul.ac.uk/~xiatian/iLIDS-VID/iLIDS-VID.tar'
self.data_dir = osp.join(self.dataset_dir, 'i-LIDS-VID')
self.split_dir = osp.join(self.dataset_dir, 'train-test people splits')
self.split_mat_path = osp.join(self.split_dir, 'train_test_splits_ilidsvid.mat')
self.split_path = osp.join(self.dataset_dir, 'splits.json')
self.cam_1_path = osp.join(self.dataset_dir, 'i-LIDS-VID/images/cam1') # differ from video
self.cam_2_path = osp.join(self.dataset_dir, 'i-LIDS-VID/images/cam2')
self._download_data()
self._check_before_run()
self._prepare_split()
splits = read_json(self.split_path)
if split_id >= len(splits):
raise ValueError("split_id exceeds range, received {}, but expected between 0 and {}".format(split_id, len(splits)-1))
split = splits[split_id]
train_dirs, test_dirs = split['train'], split['test']
print("# train identites: {}, # test identites {}".format(len(train_dirs), len(test_dirs)))
train, num_train_imgs, num_train_pids = self._process_data(train_dirs, cam1=True, cam2=True)
query, num_query_imgs, num_query_pids = self._process_data(test_dirs, cam1=True, cam2=False)
gallery, num_gallery_imgs, num_gallery_pids = self._process_data(test_dirs, cam1=False, cam2=True)
num_total_pids = num_train_pids + num_query_pids
num_total_imgs = num_train_imgs + num_query_imgs
if verbose:
print("=> iLIDS (single-shot) loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # images")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" query | {:5d} | {:8d}".format(num_query_pids, num_query_imgs))
print(" gallery | {:5d} | {:8d}".format(num_gallery_pids, num_gallery_imgs))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_imgs))
print(" ------------------------------")
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids = num_train_pids
self.num_query_pids = num_query_pids
self.num_gallery_pids = num_gallery_pids
if use_lmdb:
self.generate_lmdb()
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading iLIDS-VID dataset")
urllib.urlretrieve(self.dataset_url, fpath)
print("Extracting files")
tar = tarfile.open(fpath)
tar.extractall(path=self.dataset_dir)
tar.close()
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.data_dir):
raise RuntimeError("'{}' is not available".format(self.data_dir))
if not osp.exists(self.split_dir):
raise RuntimeError("'{}' is not available".format(self.split_dir))
def _prepare_split(self):
if not osp.exists(self.split_path):
print("Creating splits ...")
mat_split_data = loadmat(self.split_mat_path)['ls_set']
num_splits = mat_split_data.shape[0]
num_total_ids = mat_split_data.shape[1]
assert num_splits == 10
assert num_total_ids == 300
num_ids_each = num_total_ids // 2
# pids in mat_split_data are indices, so we need to transform them
# to real pids
person_cam1_dirs = sorted(glob.glob(osp.join(self.cam_1_path, '*')))
person_cam2_dirs = sorted(glob.glob(osp.join(self.cam_2_path, '*')))
person_cam1_dirs = [osp.basename(item) for item in person_cam1_dirs]
person_cam2_dirs = [osp.basename(item) for item in person_cam2_dirs]
# make sure persons in one camera view can be found in the other camera view
assert set(person_cam1_dirs) == set(person_cam2_dirs)
splits = []
for i_split in range(num_splits):
# first 50% for testing and the remaining for training, following Wang et al. ECCV'14.
train_idxs = sorted(list(mat_split_data[i_split,num_ids_each:]))
test_idxs = sorted(list(mat_split_data[i_split,:num_ids_each]))
train_idxs = [int(i)-1 for i in train_idxs]
test_idxs = [int(i)-1 for i in test_idxs]
# transform pids to person dir names
train_dirs = [person_cam1_dirs[i] for i in train_idxs]
test_dirs = [person_cam1_dirs[i] for i in test_idxs]
split = {'train': train_dirs, 'test': test_dirs}
splits.append(split)
print("Totally {} splits are created, following Wang et al. ECCV'14".format(len(splits)))
print("Split file is saved to {}".format(self.split_path))
write_json(splits, self.split_path)
def _process_data(self, dirnames, cam1=True, cam2=True):
dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}
dataset = []
for i, dirname in enumerate(dirnames):
if cam1:
pdir = osp.join(self.cam_1_path, dirname)
img_path = glob.glob(osp.join(pdir, '*.png'))
# only one image is available in one folder
assert len(img_path) == 1
img_path = img_path[0]
pid = dirname2pid[dirname]
dataset.append((img_path, pid, 0))
if cam2:
pdir = osp.join(self.cam_2_path, dirname)
img_path = glob.glob(osp.join(pdir, '*.png'))
# only one image is available in one folder
assert len(img_path) == 1
img_path = img_path[0]
pid = dirname2pid[dirname]
dataset.append((img_path, pid, 1))
num_imgs = len(dataset)
num_pids = len(dirnames)
return dataset, num_imgs, num_pids
|
the-stack_0_19553
|
from rltorch.network import BaseNetwork, create_dqn_base,\
create_linear_network
class DiscreteConvQNetwork(BaseNetwork):
def __init__(self, num_channels, output_dim, initializer='xavier'):
super(DiscreteConvQNetwork, self).__init__()
self.base = create_dqn_base(num_channels, initializer=initializer)
self.V_stream = create_linear_network(
7*7*64, 1, hidden_units=[512], initializer=initializer)
self.A_stream = create_linear_network(
7*7*64, output_dim, hidden_units=[512], initializer=initializer)
def forward(self, states):
h = self.base(states)
V = self.V_stream(h)
A = self.A_stream(h)
Q = V + A - A.mean(1, keepdim=True)
return Q
class TwinedDiscreteConvQNetwork(BaseNetwork):
def __init__(self, num_channels, output_dim, initializer='xavier'):
super(TwinedDiscreteConvQNetwork, self).__init__()
self.Q1 = DiscreteConvQNetwork(
num_channels, output_dim, initializer)
self.Q2 = DiscreteConvQNetwork(
num_channels, output_dim, initializer)
def forward(self, states):
Q1 = self.Q1(states)
Q2 = self.Q2(states)
return Q1, Q2
|
the-stack_0_19555
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import socket
import requests
import urllib3
import jsonpickle
from typing import List
NETWORK_TYPE = "carrier_wifi_network"
admin_cert = (
"/var/opt/magma/certs/rest_admin.crt",
"/var/opt/magma/certs/rest_admin.key",
)
# Disable warnings about SSL verification since its a local VM
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class AllowedGREPeers:
def __init__(self, ip: str, key: int):
self.ip = ip
self.key = key
class CarrierWiFiConfig:
def __init__(self, grePeers: List[AllowedGREPeers]):
self.allowed_gre_peers = grePeers
class NetworkDNSConfig:
def __init__(self, enable_caching: bool, local_ttl: int):
self.enable_caching = enable_caching
self.local_ttl = local_ttl
class XwFMNetwork:
def __init__(self, id: str, name: str, description: str):
self.id = id
self.name = name
self.description = description
self.type = NETWORK_TYPE
self.dns = NetworkDNSConfig(enable_caching=False, local_ttl=60)
class TierImage:
def __init__(self, name: str, order: int):
self.name = name
self.order = order
class Tier:
def __init__(
self,
id: str,
name: str,
version: str,
images: List[TierImage],
gateways: List[str],
):
self.id = id
self.name = name
self.images = images
self.version = version
self.gateways = gateways
class MagmadGatewayConfigs:
def __init__(
self,
autoupgrade_enabled: bool,
autoupgrade_poll_interval: int,
checkin_interval: int,
checkin_timeout: int,
):
self.autoupgrade_enabled = autoupgrade_enabled
self.autoupgrade_poll_interval = autoupgrade_poll_interval
self.checkin_interval = checkin_interval
self.checkin_timeout = checkin_timeout
class ChallengeKey:
def __init__(self, key_type: str):
self.key_type = key_type
class GatewayDevice:
def __init__(self, hardware_id: str, key: ChallengeKey):
self.hardware_id = hardware_id
self.key = key
class Gateway:
def __init__(
self,
id: str,
name: str,
description: str,
magmad: MagmadGatewayConfigs,
device: GatewayDevice,
tier: str,
carrier_wifi: CarrierWiFiConfig,
):
self.id = id
self.name, self.description = name, description
self.magmad = magmad
self.device = device
self.tier = tier
self.carrier_wifi = carrier_wifi
def cloud_get(url: str):
resp = requests.get(url, verify=False, cert=admin_cert)
if resp.status_code != 200:
raise Exception("Received a %d response: %s" % (resp.status_code, resp.text))
return
return resp.json()
def cloud_post(url: str, data: str):
resp = requests.post(
url,
data=data,
headers={"content-type": "application/json"},
verify=False,
cert=admin_cert,
)
if resp.status_code not in [200, 201, 204]:
raise Exception("Received a %d response: %s" % (resp.status_code, resp.text))
def create_network_if_not_exists(url: str, network_id: str):
values = cloud_get(url + "/networks")
if network_id in values:
print(f"NMS XWF-M Network exists already - {network_id}")
else:
data = XwFMNetwork(
id=network_id, name="XWFM Network", description="XWFM Network"
)
cloud_post(url + "/networks", jsonpickle.pickler.encode(data))
# create tier
tier_payload = Tier(
id="default", name="default", version="0.0.0-0", images=[], gateways=[]
)
cloud_post(
url + f"/networks/{network_id}/tiers",
jsonpickle.pickler.encode(tier_payload),
)
print(f"{network_id} NMS XWF-M Network created successfully")
def get_next_gateway_id(url: str, network_id: str, hw_id: str) -> (bool, str):
gateways = cloud_get(url + f"/cwf/{network_id}/gateways")
for gw in gateways.values():
if gw['device']['hardware_id'] == hw_id:
return True, gw['id']
nbr = len(gateways) + 1
return False, str(nbr)
def register_gateway(url: str, network_id: str, hardware_id: str, tier_id: str):
"""
Register XwF-M Gateway in the requested network.
"""
found, gid = get_next_gateway_id(url, network_id, hardware_id)
if found:
print(f"XWF-M Gateway exists already - {hardware_id}")
else:
grePeer = AllowedGREPeers(ip="192.168.128.2", key=100)
data = Gateway(
name=socket.gethostname().strip(),
description=f"XWFM Gateway {gid}",
tier="default",
id=f"fbc_gw_{gid}",
device=GatewayDevice(
hardware_id=hardware_id, key=ChallengeKey(key_type="ECHO")
),
magmad=MagmadGatewayConfigs(
autoupgrade_enabled=True,
autoupgrade_poll_interval=60,
checkin_interval=60,
checkin_timeout=30,
),
carrier_wifi=CarrierWiFiConfig(grePeers=[grePeer]),
)
cloud_post(url + f"/cwf/{network_id}/gateways", jsonpickle.pickler.encode(data))
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
description="Provision XwF-M Gateway",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--partner", dest="partner", action="store", help="Partner Short Name"
)
parser.add_argument(
"--hwid", dest="hwid", action="store", help="Gateway Hardware ID"
)
parser.add_argument(
"--url", dest="url", action="store", help="Orchestrator URL Address"
)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not (args.hwid and args.url and args.partner):
parser.print_usage()
exit(1)
# Create XwF-M Network
partner = args.partner.strip()
create_network_if_not_exists(args.url, partner)
register_gateway(args.url, partner, args.hwid, "default")
if __name__ == "__main__":
main()
|
the-stack_0_19557
|
# -*- coding: utf-8 -*-
# Copyright 2004-2020 Davide Alberani <[email protected]>
# 2008-2018 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the classes (and the instances) that are used to parse
the IMDb pages on the www.imdb.com server about a movie.
For example, for Brian De Palma's "The Untouchables", the referred pages
would be:
combined details
http://www.imdb.com/title/tt0094226/reference
plot summary
http://www.imdb.com/title/tt0094226/plotsummary
...and so on.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import re
from imdb import PY2
from imdb import imdbURL_base
from imdb.Company import Company
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.utils import _Container, KIND_MAP
from .piculet import Path, Rule, Rules, preprocessors, transformers
from .utils import DOMParserBase, analyze_imdbid, build_person, build_movie
if PY2:
from urllib import unquote
else:
from urllib.parse import unquote
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'costume departmen': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
re_space = re.compile(r'\s+')
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = '/'
else:
roleID += '/'
newRoles.append('<div class="_imdbpyrole" roleid="%s">%s</div>' % (
roleID, role.strip()
))
return firstHalf + ' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="character">)(.*?)(</td>)', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x:
return x
x = x.strip()
if not x:
return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = [_f for _f in [j.strip() for j in lx] if _f]
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
_re_og_title = re.compile(
r'(.*) \((?:(?:(.+)(?= ))? ?(\d{4})(?:(–)(\d{4}| ))?|(.+))\)',
re.UNICODE
)
def analyze_og_title(og_title):
data = {}
match = _re_og_title.match(og_title)
if og_title and not match:
# assume it's a title in production, missing release date information
return {'title': og_title}
data['title'] = match.group(1)
if match.group(3):
data['year'] = int(match.group(3))
kind = match.group(2) or match.group(6)
if kind is None:
kind = 'movie'
else:
kind = kind.lower()
kind = KIND_MAP.get(kind, kind)
data['kind'] = kind
year_separator = match.group(4)
# There is a year separator so assume an ongoing or ended series
if year_separator is not None:
end_year = match.group(5)
if end_year is not None:
data['series years'] = '%(year)d-%(end_year)s' % {
'year': data['year'],
'end_year': end_year.strip(),
}
elif kind.endswith('series'):
data['series years'] = '%(year)d-' % {'year': data['year']}
# No year separator and series, so assume that it ended the same year
elif kind.endswith('series') and 'year' in data:
data['series years'] = '%(year)d-%(year)d' % {'year': data['year']}
if data['kind'] == 'episode' and data['title'][0] == '"':
quote_end = data['title'].find('"', 1)
data['tv series title'] = data['title'][1:quote_end]
data['title'] = data['title'][quote_end + 1:].strip()
return data
def analyze_certificates(certificates):
def reducer(acc, el):
cert_re = re.compile(r'^(.+):(.+)$', re.UNICODE)
if cert_re.match(el):
acc.append(el)
elif acc:
acc[-1] = u'{}::{}'.format(
acc[-1],
el,
)
return acc
certificates = [el.strip() for el in certificates.split('\n') if el.strip()]
return functools.reduce(reducer, certificates, [])
def clean_akas(aka):
aka = re_space.sub(' ', aka).strip()
if aka.lower().startswith('see more'):
aka = ''
return aka
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "reference" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
mparser = DOMHTMLMovieParser()
result = mparser.parse(reference_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='title',
extractor=Path('//meta[@property="og:title"]/@content',
transform=analyze_og_title)
),
Rule(
key='localized title',
extractor=Path('//div[@class="titlereference-header"]//span[@class="titlereference-title-year"]/preceding-sibling::text()',
transform=lambda x: re_space.sub(' ', x).strip())
),
Rule(
key='original title',
extractor=Path('//div[@class="titlereference-header"]//span[@class="titlereference-original-title-label"]/preceding-sibling::text()',
transform=lambda x: re_space.sub(' ', x).strip())
),
# parser for misc sections like 'casting department', 'stunts', ...
Rule(
key='misc sections',
extractor=Rules(
foreach='//h4[contains(@class, "ipl-header__content")]',
rules=[
Rule(
key=Path('./@name', transform=lambda x: x.replace('_', ' ').strip()),
extractor=Rules(
foreach='../../following-sibling::table[1]//tr',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[1]/a[@href]/@href')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link'))
)
)
)
]
)
),
Rule(
key='cast',
extractor=Rules(
foreach='//table[@class="cast_list"]//tr',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[2]/a/@href')
),
Rule(
key='roleID',
extractor=Path('./td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/')
)
)
),
Rule(
key='recommendations',
extractor=Rules(
foreach='//div[@class="rec_overview"]',
rules=[
Rule(
key='movieID',
extractor=Path(
'./@data-tconst',
transform=lambda x: (x or '').replace('tt', '')
)
),
Rule(
key='title',
extractor=Path(
'.//div[@class="rec-title"]//text()',
transform=lambda x: re_space.sub(' ', x or '').strip()
)
),
],
transform=lambda x: build_movie(x.get('title', ''), movieID=x.get('movieID'))
)
),
Rule(
key='myrating',
extractor=Path('//span[@id="voteuser"]//text()')
),
Rule(
key='plot summary',
extractor=Path('//td[starts-with(text(), "Plot")]/..//p/text()',
transform=lambda x: x.strip().rstrip('|').rstrip())
),
Rule(
key='genres',
extractor=Path(
foreach='//td[starts-with(text(), "Genre")]/..//li/a',
path='./text()'
)
),
Rule(
key='runtimes',
extractor=Path(
foreach='//td[starts-with(text(), "Runtime")]/..//li',
path='./text()',
transform=lambda x: x.strip().replace(' min', '')
)
),
Rule(
key='countries',
extractor=Path(
foreach='//td[starts-with(text(), "Countr")]/..//li/a',
path='./text()'
)
),
Rule(
key='country codes',
extractor=Path(
foreach='//td[starts-with(text(), "Countr")]/..//li/a',
path='./@href',
transform=lambda x: x.split('/')[2].strip().lower()
)
),
Rule(
key='language',
extractor=Path(
foreach='//td[starts-with(text(), "Language")]/..//li/a',
path='./text()'
)
),
Rule(
key='language codes',
extractor=Path(
foreach='//td[starts-with(text(), "Language")]/..//li/a',
path='./@href',
transform=lambda x: x.split('/')[2].strip()
)
),
Rule(
key='color info',
extractor=Path(
foreach='//td[starts-with(text(), "Color")]/..//li/a',
path='./text()',
transform=lambda x: x.replace(' (', '::(')
)
),
Rule(
key='aspect ratio',
extractor=Path(
'//td[starts-with(text(), "Aspect")]/..//li/text()',
transform=transformers.strip
)
),
Rule(
key='sound mix',
extractor=Path(
foreach='//td[starts-with(text(), "Sound Mix")]/..//li/a',
path='./text()',
transform=lambda x: x.replace(' (', '::(')
)
),
Rule(
key='box office',
extractor=Rules(
foreach='//section[contains(@class, "titlereference-section-box-office")]'
'//table[contains(@class, "titlereference-list")]//tr',
rules=[
Rule(
key='box_office_title',
extractor=Path('./td[1]/text()')
),
Rule(
key='box_office_detail',
extractor=Path('./td[2]/text()')
)
],
transform=lambda x: (x['box_office_title'].strip(),
x['box_office_detail'].strip())
),
),
Rule(
key='certificates',
extractor=Path(
'//td[starts-with(text(), "Certificat")]/..//text()',
transform=analyze_certificates
)
),
# Collects akas not encosed in <i> tags.
Rule(
key='other akas',
extractor=Path(
foreach='//section[contains(@class, "listo")]//td[starts-with(text(), "Also Known As")]/..//ul/li',
path='.//text()',
transform=clean_akas
)
),
Rule(
key='creator',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Creator")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='thin writer',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Writer")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='thin director',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Director")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='top/bottom rank',
extractor=Path(
'//li[@class="ipl-inline-list__item"]//a[starts-with(@href, "/chart/")]/text()'
)
),
Rule(
key='original air date',
extractor=Path('//span[@imdbpy="airdate"]/text()')
),
Rule(
key='series years',
extractor=Path(
'//div[@id="tn15title"]//span[starts-with(text(), "TV series")]/text()',
transform=lambda x: x.replace('TV series', '').strip()
)
),
Rule(
key='season/episode',
extractor=Path(
'//div[@class="titlereference-overview-season-episode-section"]/ul//text()',
transform=transformers.strip
)
),
Rule(
key='number of episodes',
extractor=Path(
'//a[starts-with(text(), "All Episodes")]/text()',
transform=lambda x: int(x.replace('All Episodes', '').strip()[1:-1])
)
),
Rule(
key='episode number',
extractor=Path(
'//div[@id="tn15epnav"]/text()',
transform=lambda x: int(re.sub(r'[^a-z0-9 ]', '',
x.lower()).strip().split()[0]))
),
Rule(
key='previous episode',
extractor=Path(
'//span[@class="titlereference-overview-episodes-links"]'
'//a[contains(text(), "Previous")]/@href',
transform=analyze_imdbid
)
),
Rule(
key='next episode',
extractor=Path(
'//span[@class="titlereference-overview-episodes-links"]'
'//a[contains(text(), "Next")]/@href',
transform=analyze_imdbid
)
),
Rule(
key='number of seasons',
extractor=Path(
'//span[@class="titlereference-overview-years-links"]/../a[1]/text()',
transform=int
)
),
Rule(
key='tv series link',
extractor=Path('//a[starts-with(text(), "All Episodes")]/@href')
),
Rule(
key='akas',
extractor=Path(
foreach='//i[@class="transl"]',
path='./text()',
transform=lambda x: x
.replace(' ', ' ')
.rstrip('-')
.replace('" - ', '"::', 1)
.strip('"')
.replace(' ', ' ')
)
),
Rule(
key='production status',
extractor=Path(
'//td[starts-with(text(), "Status:")]/..//div[@class="info-content"]//text()',
transform=lambda x: x.strip().split('|')[0].strip().lower()
)
),
Rule(
key='production status updated',
extractor=Path(
'//td[starts-with(text(), "Status Updated:")]/'
'..//div[@class="info-content"]//text()',
transform=transformers.strip
)
),
Rule(
key='production comments',
extractor=Path(
'//td[starts-with(text(), "Comments:")]/'
'..//div[@class="info-content"]//text()',
transform=transformers.strip
)
),
Rule(
key='production note',
extractor=Path(
'//td[starts-with(text(), "Note:")]/'
'..//div[@class="info-content"]//text()',
transform=transformers.strip
)
),
Rule(
key='companies',
extractor=Rules(
foreach="//ul[@class='simpleList']",
rules=[
Rule(
key=Path('preceding-sibling::header[1]/div/h4/text()', transform=transformers.lower),
extractor=Rules(
foreach='./li',
rules=[
Rule(
key='name',
extractor=Path('./a//text()')
),
Rule(
key='comp-link',
extractor=Path('./a/@href')
),
Rule(
key='notes',
extractor=Path('./text()')
)
],
transform=lambda x: Company(
name=x.get('name') or '',
accessSystem='http',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or '').strip()
)
)
)
]
)
),
Rule(
key='rating',
extractor=Path('(//span[@class="ipl-rating-star__rating"])[1]/text()')
),
Rule(
key='votes',
extractor=Path('//span[@class="ipl-rating-star__total-votes"][1]/text()')
),
Rule(
key='cover url',
extractor=Path('//img[@alt="Poster"]/@src')
),
Rule(
key='imdbID',
extractor=Path('//meta[@property="pageId"]/@content',
transform=lambda x: (x or '').replace('tt', ''))
)
]
preprocessors = [
('/releaseinfo">', '"><span imdbpy="airdate">'),
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I), r'</div><div>\1'),
('<small>Full cast and crew for<br>', ''),
('<td> </td>', '<td>...</td>'),
(re.compile(r'<span class="tv-extra">TV mini-series(\s+.*?)</span>', re.I),
r'<span class="tv-extra">TV series\1</span> (mini)'),
(_reRolesMovie, _manageRoles)
]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
preprocessors.remove(dom, '//span[@class="pro-link"]')
# Remove some 'more' links (keep others, like the one around
# the number of votes).
preprocessors.remove(dom, '//a[@class="tn15more"][starts-with(@href, "/title/")]')
# Remove the "rest of list" in cast.
preprocessors.remove(dom, '//td[@colspan="4"]/..')
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in list(data.keys()):
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = [x for x in value if x.personID is not None]
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
for key in ['title']:
if (key in data) and isinstance(data[key], dict):
subdata = data[key]
del data[key]
data.update(subdata)
misc_sections = data.get('misc sections')
if misc_sections is not None:
for section in misc_sections:
# skip sections with their own parsers
if 'cast' in section.keys():
continue
data.update(section)
del data['misc sections']
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
nakas = []
for aka in akas:
aka = aka.strip()
if not aka:
continue
if aka.endswith('" -'):
aka = aka[:-3].rstrip()
nakas.append(aka)
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if nakas:
data['akas'] = nakas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', '')
for x in data['runtimes']]
if 'number of seasons' in data:
data['seasons'] = [str(i) for i in range(1, data['number of seasons'] + 1)]
if 'season/episode' in data:
tokens = data['season/episode'].split('Episode')
try:
data['season'] = int(tokens[0].split('Season')[1])
except:
data['season'] = 'unknown'
try:
data['episode'] = int(tokens[1])
except:
data['episode'] = 'unknown'
del data['season/episode']
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top rated movies: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom rated movies: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
data['episode of']['kind'] = 'tv series'
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if data['rating'] == 0:
del data['rating']
if 'votes' in data:
try:
votes = data['votes'].replace('(', '').replace(')', '').replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
companies = data.get('companies')
if companies:
for section in companies:
for key, value in section.items():
if key in data:
key = '%s companies' % key
data.update({key: value})
del data['companies']
if 'box office' in data:
data['box office'] = dict(data['box office'])
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
xplot = x.get('plot', '').strip()
if xauthor:
xplot += '::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example::
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
rules = [
Rule(
key='plot',
extractor=Rules(
foreach='//ul[@id="plot-summaries-content"]/li',
rules=[
Rule(
key='plot',
extractor=Path('./p//text()')
),
Rule(
key='author',
extractor=Path('.//div[@class="author-container"]//a/text()')
)
],
transform=_process_plotsummary
)
),
Rule(
key='synopsis',
extractor=Path(
foreach='//ul[@id="plot-synopsis-content"]',
path='.//li//text()'
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//li[@id="no-summary-content"]')
return dom
def postprocess_data(self, data):
if 'synopsis' in data and data['synopsis'][0] and 'a Synopsis for this title' in data['synopsis'][0]:
del data['synopsis']
return data
def _process_award(x):
award = {}
_award = x.get('award')
if _award is not None:
_award = _award.strip()
award['award'] = _award
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip().split('\n', 2)[0]
notes = re_space.sub(' ', notes)
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
rules = [
Rule(
key='awards',
extractor=Rules(
foreach='//*[@id="main"]/div[1]/div/table//tr',
rules=[
Rule(
key='year',
extractor=Path('normalize-space(./ancestor::table/preceding-sibling::*[1]/a/text())')
),
Rule(
key='result',
extractor=Path('./td[1]/b/text()')
),
Rule(
key='award',
extractor=Path('./td[1]/span/text()')
),
Rule(
key='category',
extractor=Path('normalize-space(./ancestor::table/preceding-sibling::*[1]/text())')
),
Rule(
key='notes',
extractor=Path('./td[2]/text()')
),
Rule(
key='anchor',
extractor=Path('.//text()')
)
],
transform=_process_award
)
),
Rule(
key='recipients',
extractor=Rules(
foreach='//*[@id="main"]/div[1]/div/table//tr/td[2]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
),
Rule(
key='anchor',
extractor=Path('./ancestor::tr//text()')
)
]
)
)
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span - 1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for award in data['awards']:
matches = [p for p in data.get('recipients', [])
if 'nm' in p.get('link') and award.get('anchor') == p.get('anchor')]
if self.subject == 'title':
recipients = [
Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches
]
award['to'] = recipients
elif self.subject == 'name':
recipients = [
Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches
]
award['for'] = recipients
nd.append(award)
if 'anchor' in award:
del award['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
rules = [
Rule(
key='taglines',
extractor=Path(
foreach='//div[@id="taglines_content"]/div',
path='.//text()'
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//div[@id="taglines_content"]/div[@class="header"]')
preprocessors.remove(dom, '//div[@id="taglines_content"]/div[@id="no_content"]')
return dom
def postprocess_data(self, data):
if 'taglines' in data:
data['taglines'] = [tagline.strip() for tagline in data['taglines']]
return data
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
rules = [
Rule(
key='keywords',
extractor=Path(
foreach='//td[@data-item-keyword]',
path='./@data-item-keyword',
transform=lambda x: x.lower().replace(' ', '-')
)
)
]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
avparser = DOMHTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='alternate versions',
extractor=Path(
foreach='//ul[@class="trivia"]/li',
path='.//text()',
transform=transformers.strip
)
)
]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
tparser = DOMHTMLTriviaParser()
result = tparser.parse(trivia_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='trivia',
extractor=Path(
foreach='//div[@class="sodatext"]',
path='.//text()',
transform=transformers.strip
)
)
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
preprocessors.remove(dom, '//span[@class="linksoda"]')
return dom
class DOMHTMLSoundtrackParser(DOMParserBase):
"""Parser for the "soundtrack" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
stparser = DOMHTMLSoundtrackParser()
result = stparser.parse(soundtrack_html_string)
"""
_defGetRefs = True
preprocessors = [('<br />', '\n'), ('<br>', '\n')]
rules = [
Rule(
key='soundtrack',
extractor=Path(
foreach='//div[@class="list"]//div',
path='.//text()',
transform=transformers.strip
)
)
]
def postprocess_data(self, data):
if 'soundtrack' in data:
nd = []
for x in data['soundtrack']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix + len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='crazy credits',
extractor=Path(
foreach='//ul/li/tt',
path='.//text()',
transform=lambda x: x.replace('\n', ' ').replace(' ', ' ')
)
)
]
def _process_goof(x):
text = (x.get('text') or '').strip()
category = (x.get('category') or 'Goof').strip()
return {"category": category, "text": text}
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='goofs',
extractor=Rules(
foreach='//div[contains(@class, "soda sodavote")]',
rules=[
Rule(
key='text',
extractor=Path('./div[@class="sodatext"]/text()')
),
Rule(
key='category',
extractor=Path('./preceding-sibling::h4[1]/text()')
)
],
transform=_process_goof
)
)
]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='quotes',
extractor=Path(
foreach='//div[@class="sodatext"]',
path='.//text()',
transform=lambda x: x
.strip()
.replace(' \n', '::')
.replace('::\n', '::')
.replace('\n', ' ')
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//div[@class="did-you-know-actions"]')
return dom
def postprocess_data(self, data):
quotes = data.get('quotes', [])
if not quotes:
return {}
quotes = [q.split('::') for q in quotes]
return {'quotes': quotes}
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
rules = [
Rule(
key='release dates',
extractor=Rules(
foreach='//table[contains(@class, "release-dates-table-test-only")]//tr',
rules=[
Rule(
key='country',
extractor=Path('.//td[1]//text()')
),
Rule(
key='date',
extractor=Path('.//td[2]//text()')
),
Rule(
key='notes',
extractor=Path('.//td[3]//text()')
)
]
)
),
Rule(
key='akas',
extractor=Rules(
foreach='//table[contains(@class, "akas-table-test-only")]//tr',
rules=[
Rule(
key='countries',
extractor=Path('./td[1]/text()')
),
Rule(
key='title',
extractor=Path('./td[2]/text()')
)
]
)
)
]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')
]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data):
return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date):
continue
country = country.strip()
date = date.strip()
if not (country and date):
continue
notes = i.get('notes')
info = '%s::%s' % (country, date)
if notes:
notes = notes.replace('\n', '')
i['notes'] = notes
info += notes
rl.append(info)
if releases:
data['raw release dates'] = data['release dates']
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = (aka.get('title') or '').strip()
if not title:
continue
countries = (aka.get('countries') or '').split(',')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s %s' % (title, country.strip()))
if akas:
data['raw akas'] = data['akas']
del data['akas']
if nakas:
data['akas'] = data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile(r'mean\s*=\s*([0-9]\.[0-9])\s*median\s*=\s*([0-9])', re.I)
rules = [
Rule(
key='votes',
extractor=Rules(
foreach='//th[@class="firstTableCoulmn"]/../../tr',
rules=[
Rule(
key='ordinal',
extractor=Path('./td[1]/div//text()')
),
Rule(
key='votes',
extractor=Path('./td[3]/div/div//text()')
)
]
)
),
Rule(
key='mean and median',
extractor=Path(
'//div[starts-with(normalize-space(text()), "Arithmetic mean")]/text()'
)
),
Rule(
key='demographics',
extractor=Rules(
foreach='//div[@class="smallcell"]',
rules=[
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='rating',
extractor=Path('..//div[@class="bigcell"]//text()')
),
Rule(
key='votes',
extractor=Path('./a/text()')
)
]
)
)
]
def postprocess_data(self, data):
nd = {}
demographics = data.get('demographics')
if demographics:
dem = {}
for dem_data in demographics:
link = (dem_data.get('link') or '').strip()
votes = (dem_data.get('votes') or '').strip()
rating = (dem_data.get('rating') or '').strip()
if not (link and votes and rating):
continue
eq_idx = link.rfind('=')
if eq_idx == -1:
continue
info = link[eq_idx + 1:].replace('_', ' ')
try:
votes = int(votes.replace(',', ''))
except Exception:
continue
try:
rating = float(rating)
except Exception:
continue
dem[info] = {'votes': votes, 'rating': rating}
nd['demographics'] = dem
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for v_info in votes:
ordinal = v_info.get('ordinal')
nr_votes = v_info.get('votes')
if not (ordinal and nr_votes):
continue
try:
ordinal = int(ordinal)
except Exception:
continue
try:
nr_votes = int(nr_votes.replace(',', ''))
except Exception:
continue
nd['number of votes'][ordinal] = nr_votes
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try:
am = float(am)
except (ValueError, OverflowError):
pass
if isinstance(am, float):
nd['arithmetic mean'] = am
try:
med = int(med)
except (ValueError, OverflowError):
pass
if isinstance(med, int):
nd['median'] = med
return nd
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'):
href = href[1:]
# TODO: imdbURL_base may be set by the user!
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLCriticReviewsParser(DOMParserBase):
"""Parser for the "critic reviews" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
crparser = DOMHTMLCriticReviewsParser()
result = crparser.parse(criticreviews_html_string)
"""
kind = 'critic reviews'
rules = [
Rule(
key='metascore',
extractor=Path('//div[@class="metascore_wrap"]/div/span//text()')
),
Rule(
key='metacritic url',
extractor=Path('//div[@class="article"]/div[@class="see-more"]/a/@href')
)
]
class DOMHTMLReviewsParser(DOMParserBase):
"""Parser for the "reviews" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
rparser = DOMHTMLReviewsParser()
result = rparser.parse(reviews_html_string)
"""
rules = [
Rule(
key='reviews',
extractor=Rules(
foreach='//div[@class="review-container"]',
rules=[
Rule(
key='text',
extractor=Path('.//div[@class="text show-more__control"]//text()')
),
Rule(
key='helpful',
extractor=Path('.//div[@class="text-muted"]/text()[1]')
),
Rule(
key='title',
extractor=Path('.//div[@class="title"]//text()')
),
Rule(
key='author',
extractor=Path('.//span[@class="display-name-link"]/a/@href')
),
Rule(
key='date',
extractor=Path('.//span[@class="review-date"]//text()')
),
Rule(
key='rating',
extractor=Path('.//span[@class="point-scale"]/preceding-sibling::span[1]/text()')
)
],
transform=lambda x: ({
'content': x.get('text', '').replace('\n', ' ').replace(' ', ' ').strip(),
'helpful': [int(s) for s in x.get('helpful', '').split() if s.isdigit()],
'title': x.get('title', '').strip(),
'author': analyze_imdbid(x.get('author')),
'date': x.get('date', '').strip(),
'rating': x.get('rating', '').strip()
})
)
)
]
preprocessors = [('<br>', '<br>\n')]
def postprocess_data(self, data):
for review in data.get('reviews', []):
if review.get('rating') and len(review['rating']) == 2:
review['rating'] = int(review['rating'][0])
else:
review['rating'] = None
if review.get('helpful') and len(review['helpful']) == 2:
review['not_helpful'] = review['helpful'][1] - review['helpful'][0]
review['helpful'] = review['helpful'][0]
else:
review['helpful'] = 0
review['not_helpful'] = 0
review['author'] = "ur%s" % review['author']
return data
class DOMHTMLFullCreditsParser(DOMParserBase):
"""Parser for the "full credits" (series cast section) page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
fcparser = DOMHTMLFullCreditsParser()
result = fcparser.parse(fullcredits_html_string)
"""
kind = 'full credits'
rules = [
Rule(
key='cast',
extractor=Rules(
foreach='//table[@class="cast_list"]//tr[@class="odd" or @class="even"]',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[2]/a/@href')
),
Rule(
key='roleID',
extractor=Path('./td[4]//div[@class="_imdbpyrole"]/@roleid')
),
Rule(
key='headshot',
extractor=Path('./td[@class="primary_photo"]/a/img/@loadlate')
)
],
transform=lambda x: build_person(
x.get('person', ''),
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID', '')).split('/'),
headshot=(x.get('headshot', ''))
)
)
)
]
preprocessors = [
(_reRolesMovie, _manageRoles)
]
def postprocess_data(self, data):
clean_cast = []
for person in data.get('cast', []):
if person.personID and person.get('name'):
clean_cast.append(person)
if clean_cast:
data['cast'] = clean_cast
return data
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews"
"miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
rules = [
Rule(
foreach='//h4[@class="li_group"]',
key=Path(
'./text()',
transform=lambda x: x.strip().lower()
),
extractor=Rules(
foreach='./following::ul[1]/li/a',
rules=[
Rule(
key='link',
extractor=Path('./@href')
),
Rule(
key='info',
extractor=Path('./text()')
)
],
transform=lambda x: (
x.get('info').strip(),
unquote(_normalize_href(x.get('link')))
)
)
)
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='connection',
extractor=Rules(
foreach='//div[@class="_imdbpy"]',
rules=[
Rule(
key=Path('./h5/text()', transform=transformers.lower),
extractor=Rules(
foreach='./a',
rules=[
Rule(
key='title',
extractor=Path('./text()')
),
Rule(
key='movieID',
extractor=Path('./@href')
)
]
)
)
]
)
)
]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in list(data.keys()):
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = ''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title, movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes, modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data:
return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
rules = [
Rule(
key='locations',
extractor=Rules(
foreach='//dt',
rules=[
Rule(
key='place',
extractor=Path('.//text()')
),
Rule(
key='note',
extractor=Path('./following-sibling::dd[1]//text()')
)
],
transform=lambda x: ('%s::%s' % (x['place'].strip(),
(x['note'] or '').strip())).strip(':')
)
)
]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "publicity" (for people) and "contacts" (for people)
pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
tparser = DOMHTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
re_space = re.compile(r'\s+')
rules = [
Rule(
key='tech',
extractor=Rules(
foreach='//table//tr/td[@class="label"]',
rules=[
Rule(
key=Path(
'./text()',
transform=lambda x: x.lower().strip()),
extractor=Path(
'..//td[2]//text()',
transform=lambda x: [t.strip()
for t in x.split(':::') if t.strip()]
)
)
]
)
)
]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'</div>\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I), r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
(re.compile(r'<span class="ghost">\|</span>', re.I), r':::'),
(re.compile('<br/?>', re.I), r':::')
# this is for splitting individual entries
]
def postprocess_data(self, data):
info = {}
for section in data.get('tech', []):
info.update(section)
for key, value in info.items():
if isinstance(value, list):
info[key] = [self.re_space.sub(' ', x).strip() for x in value]
else:
info[key] = self.re_space.sub(' ', value).strip()
return {self.kind: info}
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='news',
extractor=Rules(
foreach='//h2',
rules=[
Rule(
key='title',
extractor=Path('./text()')
),
Rule(
key='fromdate',
extractor=Path('./following-sibling::p[1]/small//text()')
),
Rule(
key='body',
extractor=Path('../following-sibling::p[2]//text()')
),
Rule(
key='link',
extractor=Path('../..//a[text()="Permalink"]/@href')
),
Rule(
key='fulllink',
extractor=Path('../..//a[starts-with(text(), "See full article at")]/@href')
)
],
transform=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ', '').strip(),
'body': (x.get('body') or '').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}
)
)
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if 'news' not in data:
return {}
for news in data['news']:
if 'full article link' in news:
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':':
title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':':
kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLSeasonEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
sparser = DOMHTMLSeasonEpisodesParser()
result = sparser.parse(episodes_html_string)
"""
rules = [
Rule(
key='series link',
extractor=Path('//div[@class="parent"]//a/@href')
),
Rule(
key='series title',
extractor=Path('//head/meta[@property="og:title"]/@content')
),
Rule(
key='_seasons',
extractor=Path(
foreach='//select[@id="bySeason"]//option',
path='./@value'
)
),
Rule(
key='_current_season',
extractor=Path('//select[@id="bySeason"]//option[@selected]/@value')
),
Rule(
key='episodes',
extractor=Rules(
foreach='//div[@class="info"]',
rules=[
Rule(
key=Path('.//meta/@content',
transform=lambda x: 'episode %s' % x),
extractor=Rules(
rules=[
Rule(
key='link',
extractor=Path('.//strong//a[@href][1]/@href')
),
Rule(
key='original air date',
extractor=Path('.//div[@class="airdate"]/text()')
),
Rule(
key='title',
extractor=Path('.//strong//text()')
),
Rule(
key='rating',
extractor=Path(
'.//div[contains(@class, "ipl-rating-star")][1]'
'/span[@class="ipl-rating-star__rating"][1]/text()'
)
),
Rule(
key='votes',
extractor=Path(
'.//div[contains(@class, "ipl-rating-star")][1]'
'/span[@class="ipl-rating-star__total-votes"][1]/text()'
)
),
Rule(
key='plot',
extractor=Path('.//div[@class="item_description"]//text()')
)
]
)
)
]
)
)
]
def postprocess_data(self, data):
series_id = analyze_imdbid(data.get('series link'))
series_title = data.get('series title', '').strip()
selected_season = data.get('_current_season', 'unknown season').strip()
if not (series_id and series_title):
return {}
series = Movie(title=series_title, movieID=str(series_id),
accessSystem=self._as, modFunct=self._modFunct)
if series.get('kind') == 'movie':
series['kind'] = 'tv series'
try:
selected_season = int(selected_season)
except ValueError:
pass
nd = {selected_season: {}}
if 'episode -1' in data:
counter = 1
for episode in data['episode -1']:
while 'episode %d' % counter in data:
counter += 1
k = 'episode %d' % counter
data[k] = [episode]
del data['episode -1']
episodes = data.get('episodes', [])
for ep in episodes:
if not ep:
continue
episode_nr, episode = list(ep.items())[0]
if not episode_nr.startswith('episode '):
continue
episode_nr = episode_nr[8:].rstrip()
try:
episode_nr = int(episode_nr)
except ValueError:
pass
episode_id = analyze_imdbid(episode.get('link' ''))
episode_air_date = episode.get('original air date', '').strip()
episode_title = episode.get('title', '').strip()
episode_plot = episode.get('plot', '')
episode_rating = episode.get('rating', '')
episode_votes = episode.get('votes', '')
if not (episode_nr is not None and episode_id and episode_title):
continue
ep_obj = Movie(movieID=episode_id, title=episode_title,
accessSystem=self._as, modFunct=self._modFunct)
ep_obj['kind'] = 'episode'
ep_obj['episode of'] = series
ep_obj['season'] = selected_season
ep_obj['episode'] = episode_nr
if episode_rating:
try:
ep_obj['rating'] = float(episode_rating)
except:
pass
if episode_votes:
try:
ep_obj['votes'] = int(episode_votes.replace(',', '')
.replace('.', '').replace('(', '').replace(')', ''))
except:
pass
if episode_air_date:
ep_obj['original air date'] = episode_air_date
if episode_air_date[-4:].isdigit():
ep_obj['year'] = episode_air_date[-4:]
if episode_plot:
ep_obj['plot'] = episode_plot
nd[selected_season][episode_nr] = ep_obj
_seasons = data.get('_seasons') or []
for idx, season in enumerate(_seasons):
try:
_seasons[idx] = int(season)
except ValueError:
pass
return {'episodes': nd, '_seasons': _seasons, '_current_season': selected_season}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = 'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown':
year = '????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.rules = [
Rule(
key='series title',
extractor=Path('//title/text()')
),
Rule(
key='series movieID',
extractor=Path(
'.//h1/a[@class="main"]/@href',
transform=analyze_imdbid
)
),
Rule(
key='episodes',
extractor=Rules(
foreach='//div[@class="_imdbpy"]/h3',
rules=[
Rule(
key='./a/@name',
extractor=Rules(
foreach=self._episodes_path,
rules=[
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='title',
extractor=Path('./a/text()')
),
Rule(
key='year',
extractor=Path('./preceding-sibling::a[1]/@name')
),
Rule(
key='episode',
extractor=Path('./text()[1]')
),
Rule(
key='oad',
extractor=Path(self._oad_path)
),
Rule(
key='plot',
extractor=Path('./following-sibling::text()[1]')
)
],
transform=_build_episode
)
)
]
)
)
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I), r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if 'series title' not in data:
return {}
if 'series movieID' not in data:
return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle:
return {}
seriesID = data['series movieID']
if seriesID is None:
return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in list(data.keys()):
if key.startswith('filter-season-') or key.startswith('season-'):
season_key = key.replace('filter-season-', '').replace('season-', '')
try:
season_key = int(season_key)
except ValueError:
pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode:
continue
episode_key = episode.get('episode')
if episode_key is None:
continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key, episode_key)
if cast_key in data:
cast = data[cast_key]
for i in range(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='faqs',
extractor=Rules(
foreach='//div[@class="section"]',
rules=[
Rule(
key='question',
extractor=Path('./h3/a/span/text()')
),
Rule(
key='answer',
extractor=Path('../following-sibling::div[1]//text()')
)
],
transform=lambda x: '%s::%s' % (
x.get('question').strip(),
'\n\n'.join(x.get('answer').replace('\n\n', '\n').strip().split('||'))
)
)
)
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='series title',
extractor=Path(
'//title/text()',
transform=lambda x: x.replace(' - TV schedule', '')
)
),
Rule(
key='series id',
extractor=Path('//h1/a[@href]/@href')
),
Rule(
key='tv airings',
extractor=Rules(
foreach='//tr[@class]',
rules=[
Rule(
key='date',
extractor=Path('./td[1]//text()')
),
Rule(
key='time',
extractor=Path('./td[2]//text()')
),
Rule(
key='channel',
extractor=Path('./td[3]//text()')
),
Rule(
key='link',
extractor=Path('./td[4]/a[1]/@href')
),
Rule(
key='title',
extractor=Path('./td[4]//text()')
),
Rule(
key='season',
extractor=Path('./td[5]//text()')
)
],
transform=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
)
)
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data.get('series title') or ''
seriesID = analyze_imdbid(data.get('series id'))
if seriesID and 'airing' in data:
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = [_f for _f in data['airing'] if _f]
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
rules = [
Rule(
key='parents guide',
extractor=Rules(
foreach='//tr[@class="ipl-zebra-list__item"]',
rules=[
Rule(
key=Path(
'./td[1]/text()',
transform=transformers.lower
),
extractor=Path(
path='./td[2]//text()',
transform=lambda x: [
re_space.sub(' ', t)
for t in x.split('\n') if t.strip()
]
)
)
]
)
)
]
def postprocess_data(self, data):
ret = {}
for sect in data.get('parents guide', []):
for key, value in sect.items():
ret[key] = value
if isinstance(ret.get('mpaa'), list):
ret['mpaa'] = ret['mpaa'][0]
return ret
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'full_credits_parser': ((DOMHTMLFullCreditsParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), None),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'criticrev_parser': ((DOMHTMLCriticReviewsParser,), {'kind': 'critic reviews'}),
'reviews_parser': ((DOMHTMLReviewsParser,), {'kind': 'reviews'}),
'externalsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,), None),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,), None),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,), None),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,), None),
'photosites_parser': ((DOMHTMLOfficialsitesParser,), None),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'season_episodes_parser': ((DOMHTMLSeasonEpisodesParser,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
|
the-stack_0_19559
|
# 1. data
dataset_type = "Thumos14Dataset"
data_root = "data/thumos14/"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
num_frames = 480
chunk_size = 32
img_shape = (112, 112)
overlap_ratio = 0.25
keep_ratio = 0.4
feat_downsample = 2
expid = "8.b.i"
data = dict(
samples_per_gpu=4,
workers_per_gpu=6,
train=dict(
typename=dataset_type,
ann_file=data_root + "annotations/val.json",
video_prefix=data_root + "frames_15fps/val",
pipeline=[
dict(typename="LoadMetaInfo"),
dict(typename="LoadAnnotations"),
dict(typename="Time2Frame"),
dict(typename="TemporalRandomCrop", num_frames=num_frames, iof_th=0.75),
dict(typename="LoadFrames", to_float32=True),
dict(typename="SpatialRandomCrop", crop_size=img_shape),
dict(
typename="PhotoMetricDistortion",
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
p=0.5,
),
dict(typename="Rotate", limit=(-45, 45), border_mode="reflect101", p=0.5),
dict(typename="SpatialRandomFlip", flip_ratio=0.5),
dict(typename="Normalize", **img_norm_cfg),
dict(typename="Pad", size=(num_frames, *img_shape)),
dict(typename="DefaultFormatBundle"),
dict(
typename="Collect",
keys=["imgs", "gt_segments", "gt_labels", "gt_segments_ignore"],
),
],
),
val=dict(
typename=dataset_type,
ann_file=data_root + "annotations/test.json",
video_prefix=data_root + "frames_15fps/test",
pipeline=[
dict(typename="LoadMetaInfo"),
dict(typename="Time2Frame"),
dict(
typename="OverlapCropAug",
num_frames=num_frames,
overlap_ratio=overlap_ratio,
transforms=[
dict(typename="TemporalCrop"),
dict(typename="LoadFrames", to_float32=True),
dict(typename="SpatialCenterCrop", crop_size=img_shape),
dict(typename="Normalize", **img_norm_cfg),
dict(typename="Pad", size=(num_frames, *img_shape)),
dict(typename="DefaultFormatBundle"),
dict(typename="Collect", keys=["imgs"]),
],
),
],
),
)
# 2. model
num_classes = 20
strides = [8, 16, 32, 64, 128]
use_sigmoid = True
scales_per_octave = 5
octave_base_scale = 2
num_anchors = scales_per_octave
model = dict(
typename="MemSingleStageDetector",
chunk_size=chunk_size,
backbone=dict(
typename="ChunkVideoSwin",
chunk_size=chunk_size,
do_pooling=True,
patch_size=(2, 4, 4),
in_chans=3,
embed_dim=96,
drop_path_rate=0.1,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=(8, 7, 7),
patch_norm=True,
frozen_stages=2,
use_checkpoint=False,
),
neck=[
dict(
typename="SRMSwin",
srm_cfg=dict(
in_channels=768,
out_channels=512,
with_transformer=False,
),
),
dict(
typename="Transformer1DRelPos",
encoder_layer_cfg=dict(
dim=512,
num_heads=16,
max_seq_len=num_frames // strides[0],
drop_path=0.1,
),
num_layers=3,
),
dict(
typename="SelfAttnTDM",
in_channels=512,
out_channels=512,
strides=2,
num_heads=8,
kernel_sizes = None,
stage_layers=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3, 4),
out_order="bct",
),
dict(
typename="FPN",
in_channels=[512, 512, 512, 512, 512],
out_channels=256,
num_outs=5,
start_level=0,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
),
],
head=dict(
typename="RetinaHead",
num_classes=num_classes,
num_anchors=num_anchors,
in_channels=256,
stacked_convs=4,
feat_channels=256,
use_sigmoid=use_sigmoid,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
),
)
# 3. engines
meshgrid = dict(
typename="SegmentAnchorMeshGrid",
strides=strides,
base_anchor=dict(
typename="SegmentBaseAnchor",
base_sizes=strides,
octave_base_scale=octave_base_scale,
scales_per_octave=scales_per_octave,
),
)
segment_coder = dict(
typename="DeltaSegmentCoder", target_means=[0.0, 0.0], target_stds=[1.0, 1.0]
)
train_engine = dict(
typename="MemBankTrainEngine",
membank=dict(
chunk_size=chunk_size,
keep_ratio=keep_ratio,
feat_downsample=feat_downsample,
mode="random",
mem_bank_meta_file=f"data/tmp/thumos14/memory_mechanism/{expid}/feat_swint_15fps_128x128_crop112x112/meta_val.json",
mem_bank_dir=f"data/tmp/thumos14/memory_mechanism/{expid}/feat_swint_15fps_128x128_crop112x112/val",
),
model=model,
criterion=dict(
typename="SegmentAnchorCriterion",
num_classes=num_classes,
meshgrid=meshgrid,
segment_coder=segment_coder,
reg_decoded_segment=True,
loss_cls=dict(
typename="FocalLoss",
use_sigmoid=use_sigmoid,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
),
loss_segment=dict(typename="DIoULoss", loss_weight=1.0),
train_cfg=dict(
assigner=dict(
typename="MaxIoUAssigner",
pos_iou_thr=0.6,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
iou_calculator=dict(typename="SegmentOverlaps"),
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
),
optimizer=dict(
typename="SGD",
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(custom_keys=dict(backbone={"lr_mult": 0.4})),
),
)
# 3.2 val engine
val_engine = dict(
typename="ValEngine",
model=model,
meshgrid=meshgrid,
converter=dict(
typename="SegmentAnchorConverter",
num_classes=num_classes,
segment_coder=segment_coder,
nms_pre=1000,
use_sigmoid=use_sigmoid,
),
num_classes=num_classes,
test_cfg=dict(
score_thr=0.005, nms=dict(typename="nmw", iou_thr=0.5), max_per_video=1200
),
use_sigmoid=use_sigmoid,
)
# 4. hooks
hooks = [
dict(typename="OptimizerHook"),
dict(
typename="CosineRestartLrSchedulerHook",
periods=[100] * 12,
restart_weights=[1] * 12,
warmup="linear",
warmup_iters=500,
warmup_ratio=1e-1,
min_lr_ratio=1e-2,
),
dict(typename="EvalHook", eval_cfg=dict(mode="anet")),
dict(typename="SnapshotHook", interval=100),
dict(typename="LoggerHook", interval=10),
]
# 5. work modes
modes = ["train"]
max_epochs = 1200
# 6. checkpoint
# weights = dict(filepath='open-mmlab://i3d_r50_256p_32x2x1_100e_kinetics400_rgb')
weights = dict(
filepath="data/pretrained_models/vswin/swin_tiny_patch244_window877_kinetics400_1k_keysfrom_backbone.pth"
)
# optimizer = dict(filepath='epoch_900_optim.pth')
# meta = dict(filepath='epoch_900_meta.pth')
# 7. misc
seed = 10
dist_params = dict(backend="nccl")
log_level = "INFO"
find_unused_parameters = False
|
the-stack_0_19560
|
#!/usr/bin/env python
# from tokens import EOF, Token
from errors import ParseError
class TokenStream:
def __init__(self, tokens):
self.tokens = tuple(tokens)
self.at = 0
def current(self):
if self.at >= len(self.tokens):
return EOF('')
raise ParseError('ran out of tokens')
return self.tokens[self.at]
def next(self):
self.at += 1
if self.at > len(self.tokens):
return EOF('')
raise ParseError('ran out of tokens')
advance = next
def hasNext(self):
return self.at < len(self.tokens) - 1
class AstNode(object):
pass
class ParseTree(object):
__slots__ = ('rule', 'name', 'children')
def __init__(self, rule, name):
self.rule = rule
self.name = name
self.children = []
def add(self, child):
self.children.append(child)
def __repr__(self):
text = '<%s>\n ' % self.name
for child in self.children:
if isinstance(child, ParseTree):
text += repr(child).replace('\n', '\n ')
else:
text += repr(child) + '\n '
text = text.rstrip() + '\n' + '</%s>' % self.name
return text
# vim: et sw=4 sts=4
|
the-stack_0_19561
|
import torch
import torch.utils.data
import pandas as pd
import os
import numpy as np
from numpy import genfromtxt
import tqdm
BANDS = ['B1', 'B10', 'B11', 'B12', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8',
'B8A', 'B9']
NORMALIZING_FACTOR = 1e-4
PADDING_VALUE = -1
class BavarianCropsDataset(torch.utils.data.Dataset):
def __init__(self, root, partition, classmapping, mode=None, scheme="random", region=None, samplet=70, cache=True, seed=0, validfraction=0.1):
assert (mode in ["trainvalid", "traintest"] and scheme=="random") or (mode is None and scheme=="blocks") # <- if scheme random mode is required, else None
assert scheme in ["random","blocks"]
assert partition in ["train","test","trainvalid","valid"]
self.seed = seed
self.validfraction = validfraction
self.scheme = scheme
# ensure that different seeds are set per partition
seed += sum([ord(ch) for ch in partition])
np.random.seed(seed)
torch.random.manual_seed(seed)
self.mode = mode
self.root = root
if scheme=="random":
if mode == "traintest":
self.trainids = os.path.join(self.root, "ids", "random", region+"_train.txt")
self.testids = os.path.join(self.root, "ids", "random", region+"_test.txt")
elif mode == "trainvalid":
self.trainids = os.path.join(self.root, "ids", "random", region+"_train.txt")
self.testids = None
self.read_ids = self.read_ids_random
elif scheme=="blocks":
self.trainids = os.path.join(self.root, "ids", "blocks", region+"_train.txt")
self.testids = os.path.join(self.root, "ids", "blocks", region+"_test.txt")
self.validids = os.path.join(self.root, "ids", "blocks", region + "_valid.txt")
self.read_ids = self.read_ids_blocks
self.mapping = pd.read_csv(classmapping, index_col=0).sort_values(by="id")
self.mapping = self.mapping.set_index("nutzcode")
self.classes = self.mapping["id"].unique()
self.classname = self.mapping.groupby("id").first().classname.values
self.klassenname = self.mapping.groupby("id").first().klassenname.values
self.nclasses = len(self.classes)
self.region = region
self.partition = partition
self.data_folder = "{root}/csv/{region}".format(root=self.root, region=self.region)
self.samplet = samplet
#all_csv_files
#self.csvfiles = [ for f in os.listdir(root)]
print("Initializing BavarianCropsDataset {} partition in {}".format(self.partition, self.region))
self.cache = os.path.join(self.root,"npy",os.path.basename(classmapping), scheme,region, partition)
print("read {} classes".format(self.nclasses))
if cache and self.cache_exists() and not self.mapping_consistent_with_cache():
self.clean_cache()
if cache and self.cache_exists() and self.mapping_consistent_with_cache():
print("precached dataset files found at " + self.cache)
self.load_cached_dataset()
else:
print("no cached dataset found. iterating through csv folders in " + str(self.data_folder))
self.cache_dataset()
self.hist, _ = np.histogram(self.y, bins=self.nclasses)
print("loaded {} samples".format(len(self.ids)))
#print("class frequencies " + ", ".join(["{c}:{h}".format(h=h, c=c) for h, c in zip(self.hist, self.classes)]))
print(self)
def __str__(self):
return "Dataset {}. region {}. partition {}. X:{}, y:{} with {} classes".format(self.root, self.region, self.partition,str(len(self.X)) +"x"+ str(self.X[0].shape), self.y.shape, self.nclasses)
def read_ids_random(self):
assert isinstance(self.seed, int)
assert isinstance(self.validfraction, float)
assert self.partition in ["train", "valid", "test"]
assert self.trainids is not None
assert os.path.exists(self.trainids)
np.random.seed(self.seed)
"""if trainids file provided and no testids file <- sample holdback set from trainids"""
if self.testids is None:
assert self.partition in ["train", "valid"]
print("partition {} and no test ids file provided. Splitting trainids file in train and valid partitions".format(self.partition))
with open(self.trainids,"r") as f:
ids = [int(id) for id in f.readlines()]
print("Found {} ids in {}".format(len(ids), self.trainids))
np.random.shuffle(ids)
validsize = int(len(ids) * self.validfraction)
validids = ids[:validsize]
trainids = ids[validsize:]
print("splitting {} ids in {} for training and {} for validation".format(len(ids), len(trainids), len(validids)))
assert len(validids) + len(trainids) == len(ids)
if self.partition == "train":
return trainids
if self.partition == "valid":
return validids
elif self.testids is not None:
assert self.partition in ["train", "test"]
if self.partition=="test":
with open(self.testids,"r") as f:
test_ids = [int(id) for id in f.readlines()]
print("Found {} ids in {}".format(len(test_ids), self.testids))
return test_ids
if self.partition == "train":
with open(self.trainids, "r") as f:
train_ids = [int(id) for id in f.readlines()]
return train_ids
def read_ids_blocks(self):
assert self.partition in ["train", "valid", "test", "trainvalid"]
assert os.path.exists(self.validids)
assert os.path.exists(self.testids)
assert os.path.exists(self.trainids)
assert self.scheme == "blocks"
assert self.mode is None
def read(filename):
with open(filename, "r") as f:
ids = [int(id) for id in f.readlines()]
return ids
if self.partition == "train":
ids = read(self.trainids)
elif self.partition == "valid":
ids = read(self.validids)
elif self.partition == "test":
ids = read(self.testids)
elif self.partition == "trainvalid":
ids = read(self.trainids) + read(self.validids)
return ids
def cache_dataset(self):
"""
Iterates though the data folders and stores y, ids, classweights, and sequencelengths
X is loaded at with getitem
"""
#ids = self.split(self.partition)
ids = self.read_ids()
assert len(ids) > 0
self.X = list()
self.nutzcodes = list()
self.stats = dict(
not_found=list()
)
self.ids = list()
self.samples = list()
#i = 0
for id in tqdm.tqdm(ids):
id_file = self.data_folder+"/{id}.csv".format(id=id)
if os.path.exists(id_file):
self.samples.append(id_file)
X,nutzcode = self.load(id_file)
if len(nutzcode) > 0:
nutzcode = nutzcode[0]
if nutzcode in self.mapping.index:
self.X.append(X)
self.nutzcodes.append(nutzcode)
self.ids.append(id)
else:
self.stats["not_found"].append(id_file)
self.y = self.applyclassmapping(self.nutzcodes)
self.sequencelengths = np.array([np.array(X).shape[0] for X in self.X])
assert len(self.sequencelengths) > 0
self.sequencelength = self.sequencelengths.max()
self.ndims = np.array(X).shape[1]
self.hist,_ = np.histogram(self.y, bins=self.nclasses)
self.classweights = 1 / self.hist
#if 0 in self.hist:
# classid_ = np.argmin(self.hist)
# nutzid_ = self.mapping.iloc[classid_].name
# raise ValueError("Class {id} (nutzcode {nutzcode}) has 0 occurences in the dataset! "
# "Check dataset or mapping table".format(id=classid_, nutzcode=nutzid_))
#self.dataweights = np.array([self.classweights[y] for y in self.y])
self.cache_variables(self.y, self.sequencelengths, self.ids, self.ndims, self.X, self.classweights)
def mapping_consistent_with_cache(self):
# cached y must have the same number of classes than the mapping
return True
#return len(np.unique(np.load(os.path.join(self.cache, "y.npy")))) == self.nclasses
def cache_variables(self, y, sequencelengths, ids, ndims, X, classweights):
os.makedirs(self.cache, exist_ok=True)
# cache
np.save(os.path.join(self.cache, "classweights.npy"), classweights)
np.save(os.path.join(self.cache, "y.npy"), y)
np.save(os.path.join(self.cache, "ndims.npy"), ndims)
np.save(os.path.join(self.cache, "sequencelengths.npy"), sequencelengths)
np.save(os.path.join(self.cache, "ids.npy"), ids)
#np.save(os.path.join(self.cache, "dataweights.npy"), dataweights)
np.save(os.path.join(self.cache, "X.npy"), X)
def load_cached_dataset(self):
# load
self.classweights = np.load(os.path.join(self.cache, "classweights.npy"))
self.y = np.load(os.path.join(self.cache, "y.npy"))
self.ndims = int(np.load(os.path.join(self.cache, "ndims.npy")))
self.sequencelengths = np.load(os.path.join(self.cache, "sequencelengths.npy"))
self.sequencelength = self.sequencelengths.max()
self.ids = np.load(os.path.join(self.cache, "ids.npy"))
self.X = np.load(os.path.join(self.cache, "X.npy"), allow_pickle=True)
def cache_exists(self):
weightsexist = os.path.exists(os.path.join(self.cache, "classweights.npy"))
yexist = os.path.exists(os.path.join(self.cache, "y.npy"))
ndimsexist = os.path.exists(os.path.join(self.cache, "ndims.npy"))
sequencelengthsexist = os.path.exists(os.path.join(self.cache, "sequencelengths.npy"))
idsexist = os.path.exists(os.path.join(self.cache, "ids.npy"))
Xexists = os.path.exists(os.path.join(self.cache, "X.npy"))
return yexist and sequencelengthsexist and idsexist and ndimsexist and Xexists and weightsexist
def clean_cache(self):
os.remove(os.path.join(self.cache, "classweights.npy"))
os.remove(os.path.join(self.cache, "y.npy"))
os.remove(os.path.join(self.cache, "ndims.npy"))
os.remove(os.path.join(self.cache, "sequencelengths.npy"))
os.remove(os.path.join(self.cache, "ids.npy"))
#os.remove(os.path.join(self.cache, "dataweights.npy"))
os.remove(os.path.join(self.cache, "X.npy"))
os.removedirs(self.cache)
def load(self, csv_file, load_pandas = False):
"""['B1', 'B10', 'B11', 'B12', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8',
'B8A', 'B9', 'QA10', 'QA20', 'QA60', 'doa', 'label', 'id']"""
if load_pandas:
sample = pd.read_csv(csv_file, index_col=0)
X = np.array((sample[BANDS] * NORMALIZING_FACTOR).values)
nutzcodes = sample["label"].values
# nutzcode to classids (451,411) -> (0,1)
else: # load with numpy
data = genfromtxt(csv_file, delimiter=',', skip_header=1)
X = data[:, 1:14] * NORMALIZING_FACTOR
nutzcodes = data[:, 18]
# drop times that contain nans
if np.isnan(X).any():
t_without_nans = np.isnan(X).sum(1) > 0
X = X[~t_without_nans]
nutzcodes = nutzcodes[~t_without_nans]
return X, nutzcodes
def applyclassmapping(self, nutzcodes):
"""uses a mapping table to replace nutzcodes (e.g. 451, 411) with class ids"""
return np.array([self.mapping.loc[nutzcode]["id"] for nutzcode in nutzcodes])
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
load_file = False
if load_file:
id = self.ids[idx]
csvfile = os.path.join(self.data_folder, "{}.csv".format(id))
X,nutzcodes = self.load(csvfile)
y = self.applyclassmapping(nutzcodes=nutzcodes)
else:
X = self.X[idx]
y = np.array([self.y[idx]] * X.shape[0]) # repeat y for each entry in x
# pad up to maximum sequence length
t = X.shape[0]
if self.samplet is None:
npad = self.sequencelengths.max() - t
X = np.pad(X,[(0,npad), (0,0)],'constant', constant_values=PADDING_VALUE)
y = np.pad(y, (0, npad), 'constant', constant_values=PADDING_VALUE)
else:
idxs = np.random.choice(t, self.samplet, replace=False)
idxs.sort()
X = X[idxs]
y = y[idxs]
X = torch.from_numpy(X).type(torch.FloatTensor)
y = torch.from_numpy(y).type(torch.LongTensor)
return X, y, self.ids[idx]
if __name__=="__main__":
root = "/data/BavarianCrops"
classmapping = "/data/BavarianCrops/classmapping.isprs.csv"
train = BavarianCropsDataset(root="/data/BavarianCrops",
region="holl",
partition="train",
scheme="blocks",
classmapping = classmapping,
samplet=50)
test = BavarianCropsDataset(root="/data/BavarianCrops",
region="holl",
partition="test",
scheme="blocks",
classmapping = classmapping,
samplet=50)
train = BavarianCropsDataset(root="/data/BavarianCrops",
region="holl",
partition="valid",
scheme="blocks",
classmapping = classmapping,
samplet=50)
trainvalid = BavarianCropsDataset(root="/data/BavarianCrops",
region="holl",
partition="trainvalid",
scheme="blocks",
classmapping = classmapping,
samplet=50,)
|
the-stack_0_19562
|
import requests
from bs4 import BeautifulSoup
baseQuery = 'https://patents.google.com/xhr/result'
params = {'id': 'patent/US10343279B2/en',
'qs': 'q=Deep+learning+neural+interfaces&oq=Deep+learning+neural+interfaces'}
response = requests.get(baseQuery, params=params)
soup = BeautifulSoup(response.text, 'lxml')
"""
<span itemprop="title">Navigational control of robotic systems and other computer-implemented processes using developmental network with turing machine learning</span>
<dd itemprop="publicationNumber">US10343279B2</dd>
<dd itemprop="countryCode">US</dd>
<dd itemprop="countryName">United States</dd>
<dd itemprop="priorArtKeywords" repeat>area</dd>
<dd itemprop="priorArtKeywords" repeat>developmental</dd>
<dd itemprop="priorArtKeywords" repeat>turing machine</dd>
<dd itemprop="priorArtKeywords" repeat>weights</dd>
<dd itemprop="priorArtKeywords" repeat>emergent</dd>
<dd><time itemprop="priorArtDate" datetime="2015-07-10">2015-07-10</time></dd>
<dd itemprop="inventor" repeat>Juyang Weng</dd>
<dd itemprop="inventor" repeat>Zejia Zheng</dd>
<dd itemprop="inventor" repeat>Xie He</dd>
<dt>Current Assignee (The listed assignees may be inaccurate. Google has not performed a legal analysis and makes no representation or warranty as to the accuracy of the list.)</dt>
<dd itemprop="assigneeCurrent" repeat>
Michigan State University MSU
</dd>
<dd itemprop="assigneeOriginal" repeat>Michigan State University MSU</dd>
<dd><time itemprop="priorityDate" datetime="2015-07-10">2015-07-10</time></dd>
<dt>Filing date</dt>
<dd><time itemprop="filingDate" datetime="2016-07-08">2016-07-08</time></dd>
<dt>Publication date</dt>
<dd><time itemprop="publicationDate" datetime="2019-07-09">2019-07-09</time></dd>
<div class="abstract">
"""
title = soup.find('span', {'itemprop': "title"})
print(title.text.strip())
publicationNumber = soup.find("dd", {"itemprop": "publicationNumber"})
print(publicationNumber.text)
countryCode = soup.find("dd", {"itemprop": "countryCode"})
print(countryCode.text)
countryName = soup.find("dd", {"itemprop": "countryName"})
print(countryName.text)
priorArtKeywords = soup.find_all("dd", {"itemprop": "priorArtKeywords"})
print(priorArtKeywords)
priorArtDate = soup.find("time", {"itemprop": "priorArtDate"})
print(priorArtDate.text)
inventors = soup.find_all("dd", {"itemprop": "inventor"})
print(inventors)
assigneeCurrent = soup.find("dd", {"itemprop": "assigneeCurrents"})
print(assigneeCurrent.text.strip() if assigneeCurrent is not None else None)
assigneeOriginal = soup.find("dd", {"itemprop": "assigneeOriginal"})
print(assigneeOriginal.text)
priorityDate = soup.find("time", {"itemprop": "priorityDate"})
print(priorityDate.text)
filingDate = soup.find("time", {"itemprop": "filingDate"})
print(filingDate.text)
publicationDate = soup.find("time", {"itemprop": "publicationDate"})
print("publicationDate", publicationDate.text)
abstract = soup.find("div", {"class": "abstract"})
print("abstract", abstract.text)
|
the-stack_0_19563
|
import pandas as pd
import rdt
from copulas.univariate import BetaUnivariate
from sdv.demo import load_demo
from sdv.relational import HMA1
from sdv.tabular import GaussianCopula
def test_sdv_model_kwargs():
metadata, tables = load_demo(metadata=True)
tables = {'users': tables['users']}
metadata = metadata.to_dict()
del metadata['tables']['sessions']
del metadata['tables']['transactions']
hma = HMA1(metadata, model=GaussianCopula, model_kwargs={
'default_distribution': 'beta',
'categorical_transformer': 'label_encoding',
})
hma.fit(tables)
model = hma._models['users']
assert model._default_distribution == BetaUnivariate
assert model._DTYPE_TRANSFORMERS['O'] == 'label_encoding'
assert isinstance(
model._metadata._hyper_transformer._transformers['gender'],
rdt.transformers.categorical.LabelEncodingTransformer
)
def test_ids_only_child():
"""Ensure tables with nothing else than ids can be modeled and sampled."""
parent = pd.DataFrame({
'parent_id': range(10),
})
pk_child = pd.DataFrame({
'child_id': range(10),
'parent_id': range(10),
})
no_pk_child = pd.DataFrame({
'parent_id': range(10),
})
metadata = {
'tables': {
'parent': {
'fields': {
'parent_id': {
'type': 'id',
},
},
'primary_key': 'parent_id',
},
'pk_child': {
'fields': {
'child_id': {
'type': 'id',
},
'parent_id': {
'type': 'id',
'ref': {
'table': 'parent',
'field': 'field_id'
}
},
},
'primary_key': 'child_id',
},
'no_pk_child': {
'fields': {
'parent_id': {
'type': 'id',
'ref': {
'table': 'parent',
'field': 'field_id'
}
},
},
},
}
}
tables = {
'parent': parent,
'pk_child': pk_child,
'no_pk_child': no_pk_child,
}
hma1 = HMA1(metadata=metadata)
hma1.fit(tables)
sampled = hma1.sample()
assert set(sampled.keys()) == {'parent', 'pk_child', 'no_pk_child'}
for name, table in tables.items():
assert table.shape == sampled[name].shape
assert table.columns.tolist() == sampled[name].columns.tolist()
|
the-stack_0_19564
|
#!/usr/bin/env python3
# Dependencies from the Python 3 standard library:
import os
import subprocess as sp
from shutil import copyfile
# Dependencies from the Scipy stack https://www.scipy.org/stackspec.html :
import numpy as np
import matplotlib.pyplot as plt
# Dependencies from https://github.com/AndrewGYork/remote_refocus/figure_generation :
import np_tif
## Set/create directories
input_directory = os.path.abspath(os.path.join(
os.getcwd(), os.pardir, os.pardir, 'big_data_input', 'uFchip'))
temp_directory = os.path.abspath(os.path.join(
os.getcwd(), os.pardir, os.pardir, 'temp'))
if not os.path.isdir(temp_directory): os.mkdir(temp_directory)
temp_directory = os.path.join(temp_directory, 'uFchip')
if not os.path.isdir(temp_directory): os.mkdir(temp_directory)
output_directory = os.path.abspath(os.path.join(
os.getcwd(), os.pardir, os.pardir, 'big_data_output'))
if not os.path.isdir(output_directory): os.mkdir(output_directory)
output_directory = os.path.join(output_directory, 'uFchip')
if not os.path.isdir(output_directory): os.mkdir(output_directory)
## Set input file name and acquisition parameters for processing
input_filename = ('transmitted_z_step_10_uF2.tif')
cropped_filename = os.path.splitext(input_filename)[0] + '_cropped.tif'
input_filename = os.path.join(input_directory, input_filename)
cropped_filename = os.path.join(temp_directory, cropped_filename)
num_tps = 1000 # Number of time points in series
left_crop = 400
right_crop = 750
top_crop = 0
bottom_crop = 0
## If cropped file exists then load
if os.path.exists(cropped_filename):
print('Found cropped tif, loading...', end='', sep='')
data = np_tif.tif_to_array(cropped_filename)
print('done')
print('tif shape (t, y, x) =', data.shape)
## If no file found then load original and crop
else:
print('Loading original file...', end='', sep='')
data = np_tif.tif_to_array(input_filename)
print('done')
data = data.reshape((num_tps,) + data.shape[-2:])
print('tif shape (t, y, x) =', data.shape)
print('Cropping...', end='', sep='')
if left_crop or right_crop > 0:
data = data[:, :, left_crop:-right_crop]
if top_crop or bottom_crop > 0:
data = data[:, top_crop:-bottom_crop, :]
print('done')
print("Saving result...", end='', sep='')
np_tif.array_to_tif(
data.reshape(num_tps, data.shape[-2], data.shape[-1]),
cropped_filename, slices=1, channels=1, frames=num_tps)
print('done')
print('tif shape (t, y, x) =', data.shape)
## Choose parameters for video
current_frame = -1
xmargin = 0.01
ymargin = 0.025
space = 0.175
img_size = 0.5
max_intensity = 12500
wlw = 2 # white line width half amplitude
start_tp = 0 # removing uneventful begging
stop_tp = 100 # remove uneventful end
xslice = 86 # choose slice along x for display
## Set output folder for images to make video
output_filename = os.path.join(temp_directory, 'img%06i.png')
## Make images for video
fig = plt.figure()
for t in range(wlw + start_tp, num_tps - wlw - stop_tp):
plt.clf()
current_frame += 1
time = 0.001*t - 0.002 - 0.001*start_tp
print('time = ', time)
ax1 = plt.axes([0, 0.575, img_size, img_size])
ax1_data = data[t, :, :]
ax1.imshow(ax1_data, cmap='gray', vmin=0, vmax=max_intensity)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax1.get_xticklines(), visible=False)
plt.setp(ax1.get_yticklines(), visible=False)
ax2 = plt.axes([0, 0, img_size, 1.5*img_size])
ax2_data = np.zeros((data.shape[0], data.shape[2], 3)) # RGB
make_me_rgb = data[:, xslice, :].reshape(data.shape[0], data.shape[2], 1)
make_me_rgb = np.clip(make_me_rgb/max_intensity, 0, 1) # Normalize to 0, 1
ax2_data[:, :, :] = make_me_rgb # Broadcast across all three color channels
white_line = ax2_data[t-wlw:t+wlw, :, :]
yellow = np.array([1, 1, 0])
white_line[0, :, :] = 0.5*yellow # Woo, broadcasting.
white_line[1, :, :] = 1.0*yellow
white_line[2, :, :] = 1.0*yellow
white_line[3, :, :] = 0.5*yellow
ax2.imshow(ax2_data)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax2.get_xticklines(), visible=False)
plt.setp(ax2.get_yticklines(), visible=False)
if 50 < t < 275:
plt.figtext(xmargin, ymargin + 3.9*space, 'Microscope',
color='yellow', family='monospace')
else:
plt.figtext(xmargin, ymargin + 3.9*space, 'Microscope',
color='white', family='monospace')
if 475 < t < 625:
plt.figtext(xmargin, ymargin + 2*space, 'Stage',
color='yellow', family='monospace')
else:
plt.figtext(xmargin, ymargin + 2*space, 'Stage',
color='white', family='monospace')
if 725 < t < 800:
plt.figtext(xmargin, ymargin + space, 'Remote',
color='yellow', family='monospace')
else:
plt.figtext(xmargin, ymargin + space, 'Remote',
color='white', family='monospace')
plt.figtext(xmargin, ymargin, 't=%6ss'%('%0.3f'%time),
color='yellow', family='monospace')
plt.savefig(output_filename%current_frame, bbox_inches='tight')
plt.close(fig)
## Choose 'poster' image and copy to video location
copyfile(os.path.join(temp_directory, 'img000250.png'),
os.path.join(output_directory, 'poster.png'))
## Make video from images
print("Converting images to mp4...", end='')
convert_command = [
'ffmpeg', '-y', # auto overwrite files
'-r', '50', # frame rate
'-f', 'image2', # format is image sequence
'-i', os.path.join(temp_directory,
'img%06d.png'), # image sequence name
'-movflags', 'faststart', # internet optimisation...(?)
'-pix_fmt', 'yuv420p', # cross browser compatibility
'-vcodec', 'libx264', # codec choice
'-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2', # even pixel number (important)
'-preset', 'veryslow', # take time and compress to max
'-crf', '25', # image quality vs file size
os.path.join(output_directory, 'figure.mp4')] # output file name
try:
with open('conversion_messages.txt', 'wt') as f:
f.write("So far, everthing's fine...\n")
f.flush()
sp.check_call(convert_command, stderr=f, stdout=f)
f.flush()
os.remove('conversion_messages.txt')
except: # This is unlikely to be platform independent :D
print("MP4 conversion failed. Is ffmpeg installed?")
raise
print('"figure.mp4" created')
print('done.')
## This is a default figure so copy to 'images' in 'master' directory
image_directory = os.path.abspath(os.path.join(
os.getcwd(), os.pardir, os.pardir, 'master', 'images', 'uFchip'))
if not os.path.isdir(image_directory): os.mkdir(image_directory)
copyfile(os.path.join(output_directory, 'figure.mp4'),
os.path.join(image_directory, 'figure.mp4'))
copyfile(os.path.join(output_directory, 'poster.png'),
os.path.join(image_directory, 'poster.png'))
|
the-stack_0_19565
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tripleo_common.actions import package_update
from tripleo_common import constants
from tripleo_common.tests import base
class ClearBreakpointsActionTest(base.TestCase):
def setUp(self,):
super(ClearBreakpointsActionTest, self).setUp()
self.stack_id = 'stack_id'
self.refs = 'refs'
@mock.patch('tripleo_common.actions.package_update.PackageUpdateManager')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
def test_run(self, mock_compute_client,
mock_orchestration_client,
mock_update_manager):
mock_ctx = mock.MagicMock()
action = package_update.ClearBreakpointsAction(self.stack_id,
self.refs)
result = action.run(mock_ctx)
self.assertEqual(None, result)
mock_compute_client.assert_called_once()
mock_orchestration_client.assert_called_once()
mock_update_manager.assert_called_once_with(
mock_orchestration_client(),
mock_compute_client(),
self.stack_id,
stack_fields={})
mock_update_manager().clear_breakpoints.assert_called_once_with(
self.refs)
class UpdateStackActionTest(base.TestCase):
def setUp(self,):
super(UpdateStackActionTest, self).setUp()
self.timeout = 1
self.container = 'container'
@mock.patch('tripleo_common.actions.templates.ProcessTemplatesAction.run')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
@mock.patch('tripleo_common.actions.package_update.time')
@mock.patch('heatclient.common.template_utils.get_template_contents')
def test_run(self, mock_template_contents,
mock_time,
mock_compute_client,
mock_orchestration_client,
mock_object_client,
mock_templates_run):
mock_ctx = mock.MagicMock()
heat = mock.MagicMock()
heat.stacks.get.return_value = mock.MagicMock(
stack_name='stack', id='stack_id')
mock_orchestration_client.return_value = heat
mock_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_swift = mock.MagicMock()
mock_env = """environments:
- path: environments/test.yaml
name: container
parameter_defaults:
random_data: a_value
temp_environment: temp_environment
template: template
"""
mock_swift.get_object.return_value = ({}, mock_env)
mock_object_client.return_value = mock_swift
# freeze time at datetime.datetime(2016, 9, 8, 16, 24, 24)
mock_time.time.return_value = 1473366264
mock_templates_run.return_value = {
'StackAction': 'UPDATE',
'DeployIdentifier': 1473366264,
'UpdateIdentifier': 1473366264
}
action = package_update.UpdateStackAction(self.timeout,
container=self.container)
action.run(mock_ctx)
# verify parameters are as expected
updated_mock_env = """environments:
- path: environments/test.yaml
name: container
parameter_defaults:
DeployIdentifier: 1473366264
StackAction: UPDATE
UpdateIdentifier: 1473366264
random_data: a_value
temp_environment: temp_environment
template: template
"""
mock_swift.put_object.assert_called_once_with(
self.container, constants.PLAN_ENVIRONMENT, updated_mock_env
)
heat.stacks.update.assert_called_once_with(
'stack_id',
StackAction='UPDATE',
DeployIdentifier=1473366264,
UpdateIdentifier=1473366264,
existing='true',
timeout_mins=1,
environment={
'resource_registry': {
'resources': {
'*': {
'*': {'UpdateDeployment': {'hooks': 'pre-update'}}
}
}
}
})
|
the-stack_0_19566
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ********************************************************
# Author : huangming
# Email : [email protected]
# Last modified : 2018-05-08 08:52
# Filename : log.py
# Description : py3
# ********************************************************
'''Implements a simple log library.
This module is a simple encapsulation of logging module to provide a more
convenient interface to write log. The log will both print to stdout and
write to log file. It provides a more flexible way to set the log actions,
and also very simple. See examples showed below:
Example 1: Use default settings
import log
log = log.Log(cmdlevel='info')
log.debug('hello, world')
log.info('hello, world')
log.error('hello, world')
log.critical('hello, world')
Result:
Print all log messages to file, and only print log whose level is greater
than ERROR to stdout. The log file is located in 'xxx.log' if the module
name is xxx.py. The default log file handler is size-rotated, if the log
file's size is greater than 20M, then it will be rotated.
Example 2: Use set_logger to change settings
# Change limit size in bytes of default rotating action
log.set_logger(limit = 10240) # 10M
# Use time-rotated file handler, each day has a different log file, see
# logging.handlers.TimedRotatingFileHandler for more help about 'when'
log.set_logger(when = 'D', limit = 1)
# Use normal file handler (not rotated)
log.set_logger(backup_count = 0)
# File log level set to INFO, and stdout log level set to DEBUG
log.set_logger(cmdlevel = 'DEBUG', filelevel = 'INFO')
# Change default log file name and log mode
log.set_logger(filename = 'yyy.log', mode = 'w')
# Change default log formatter
log.set_logger(cmdfmt = '[%(levelname)s] %(message)s')
'''
__author__ = "Mingo <[email protected]>"
__status__ = "Development"
__all__ = ['set_logger', 'debug', 'info', 'warning', 'error',
'critical', 'exception']
import os
import sys
import traceback
import logging
import logging.handlers
class ColoredFormatter(logging.Formatter):
'''A colorful formatter.'''
def __init__(self, fmt = None, datefmt = None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
# Color escape string
COLOR_RED='\033[1;31m'
COLOR_GREEN='\033[1;32m'
COLOR_YELLOW='\033[1;33m'
COLOR_BLUE='\033[1;34m'
COLOR_PURPLE='\033[1;35m'
COLOR_CYAN='\033[1;36m'
COLOR_GRAY='\033[1;37m'
COLOR_WHITE='\033[1;38m'
COLOR_RESET='\033[1;0m'
# Define log color
LOG_COLORS = {
'DEBUG': '%s',
'INFO': COLOR_GREEN + '%s' + COLOR_RESET,
'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET,
'ERROR': COLOR_RED + '%s' + COLOR_RESET,
'CRITICAL': COLOR_RED + '%s' + COLOR_RESET,
'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET,
}
level_name = record.levelname
msg = logging.Formatter.format(self, record)
return LOG_COLORS.get(level_name, '%s') % msg
class Log():
def __init__(self, loggername='', filename = None, mode = 'a',
cmdlevel='DEBUG',
filelevel='INFO',
cmdfmt = '[%(asctime)s] %(filename)s line:%(lineno)d %(levelname)-8s%(message)s',
filefmt = '[%(asctime)s] %(levelname)-8s%(message)s',
cmddatefmt = '%H:%M:%S',
filedatefmt = '%Y-%m-%d %H:%M:%S',
backup_count = 0, limit = 20480, when = None, colorful = False):
self.filename = filename
self.loggername = loggername
if self.filename is None:
self.filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
self.filename = os.path.basename(self.filename.replace('.py', '.log'))
#self.filename = os.path.join('/tmp', self.filename)
if not os.path.exists(os.path.abspath(os.path.dirname(self.filename))):
os.makedirs(os.path.abspath(os.path.dirname(self.filename)))
self.mode = mode
self.cmdlevel = cmdlevel
self.filelevel = filelevel
if isinstance(self.cmdlevel, str):
self.cmdlevel = getattr(logging, self.cmdlevel.upper(), logging.DEBUG)
if isinstance(self.filelevel, str):
self.filelevel = getattr(logging, self.filelevel.upper(), logging.DEBUG)
self.filefmt = filefmt
self.cmdfmt = cmdfmt
self.filedatefmt = filedatefmt
self.cmddatefmt = cmddatefmt
self.backup_count = backup_count
self.limit = limit
self.when = when
self.colorful = colorful
self.logger = None
self.streamhandler = None
self.filehandler = None
if self.cmdlevel > 10:
self.filefmt = '[%(asctime)s] %(levelname)-8s%(message)s'
self.cmdfmt = '[%(asctime)s] %(levelname)-8s%(message)s'
self.cmddatefmt = '%Y-%m-%d %H:%M:%S'
self.set_logger(cmdlevel = self.cmdlevel)
def init_logger(self):
'''Reload the logger.'''
if self.logger is None:
self.logger = logging.getLogger(self.loggername)
else:
logging.shutdown()
self.logger.handlers = []
self.streamhandler = None
self.filehandler = None
self.logger.setLevel(logging.DEBUG)
def add_streamhandler(self):
'''Add a stream handler to the logger.'''
self.streamhandler = logging.StreamHandler()
self.streamhandler.setLevel(self.cmdlevel)
if self.colorful:
formatter = ColoredFormatter(self.cmdfmt, self.cmddatefmt)
else:
formatter = logging.Formatter(self.cmdfmt, self.cmddatefmt,)
self.streamhandler.setFormatter(formatter)
self.logger.addHandler(self.streamhandler)
def add_filehandler(self):
'''Add a file handler to the logger.'''
# Choose the filehandler based on the passed arguments
if self.backup_count == 0: # Use FileHandler
self.filehandler = logging.FileHandler(self.filename, self.mode)
elif self.when is None: # Use RotatingFileHandler
self.filehandler = logging.handlers.RotatingFileHandler(self.filename,
self.mode, self.limit, self.backup_count)
else: # Use TimedRotatingFileHandler
self.filehandler = logging.handlers.TimedRotatingFileHandler(self.filename,
self.when, 1, self.backup_count)
self.filehandler.setLevel(self.filelevel)
formatter = logging.Formatter(self.filefmt, self.filedatefmt,)
self.filehandler.setFormatter(formatter)
self.logger.addHandler(self.filehandler)
def set_logger(self, **kwargs):
'''Configure the logger.'''
keys = ['mode','cmdlevel','filelevel','filefmt','cmdfmt',\
'filedatefmt','cmddatefmt','backup_count','limit',\
'when','colorful']
for (key, value) in kwargs.items():
if not (key in keys):
return False
setattr(self, key, value)
if isinstance(self.cmdlevel, str):
self.cmdlevel = getattr(logging, self.cmdlevel.upper(), logging.DEBUG)
if isinstance(self.filelevel, str):
self.filelevel = getattr(logging, self.filelevel.upper(), logging.DEBUG)
if not "cmdfmt" in kwargs:
self.filefmt='[%(asctime)s] %(filename)s line:%(lineno)d %(levelname)-8s%(message)s'
self.filedatefmt = '%Y-%m-%d %H:%M:%S'
self.cmdfmt='[%(asctime)s] %(filename)s line:%(lineno)d %(levelname)-8s%(message)s'
self.cmddatefmt = '%H:%M:%S'
if self.cmdlevel > 10:
self.filefmt = '[%(asctime)s] %(levelname)-8s%(message)s'
self.cmdfmt = '[%(asctime)s] %(levelname)-8s%(message)s'
self.cmddatefmt = '%Y-%m-%d %H:%M:%S'
self.init_logger()
if self.cmdlevel>0:
self.add_streamhandler()
if self.filelevel>0:
self.add_filehandler()
# Import the common log functions for convenient
self.import_log_funcs()
return True
def addFileLog(self,log):
self.logger.addHandler(log.filehandler)
return self
def import_log_funcs(self):
'''Import the common log functions from the logger to the class'''
log_funcs = ['debug', 'info', 'warning', 'error', 'critical',
'exception']
for func_name in log_funcs:
func = getattr(self.logger, func_name)
setattr(self, func_name, func)
def trace(self):
info = sys.exc_info()
for file, lineno, function, text in traceback.extract_tb(info[2]):
self.error('%s line:%s in %s:%s' % (file, lineno, function, text))
self.error('%s: %s' % info[:2])
if __name__ == '__main__':
log = Log(cmdlevel='info',colorful = True)
err_log = Log('haha',cmdlevel='info',filename = 'log/tmp.log',backup_count = 1,when = 'D')
# log = Log(cmdlevel='debug')
log.set_logger(cmdlevel='debug')
# log = log.addFileLog(err_log)
for i in range(100000):
log.debug('debug')
err_log.debug('debug')
log.info('debug%s' % 'haha')
err_log.info('info%s' % 'haha')
log.error((1,2))
log.error('debug')
log.info({'a':1,'b':2})
os.system("pause")
class A():
def __init__(self, log):
self.log = log
def a(self,a):
self.log.info(a)
class B():
def __init__(self, log):
self.log = log
def b(self,a):
self.log.info(a)
a = A(log)
a.a("test a")
b = B(log)
b.b(5)
def fun(a):
return 10/a
try:
a = fun(0)
except:
log.trace()
|
the-stack_0_19567
|
# util/langhelpers.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
from functools import update_wrapper
import hashlib
import inspect
import itertools
import operator
import re
import sys
import textwrap
import types
import warnings
from . import _collections
from . import compat
from .. import exc
def md5_hex(x):
if compat.py3k:
x = x.encode("utf-8")
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
__slots__ = ("warn_only", "_exc_info")
def __init__(self, warn_only=False):
self.warn_only = warn_only
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
if not self.warn_only:
compat.raise_(
exc_value,
with_traceback=exc_tb,
)
else:
if not compat.py3k and self._exc_info and self._exc_info[1]:
# emulate Py3K's behavior of telling us when an exception
# occurs in an exception handler.
warn(
"An exception has occurred during handling of a "
"previous exception. The previous exception "
"is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1])
)
self._exc_info = None # remove potential circular references
compat.raise_(value, with_traceback=traceback)
def clsname_as_plain_name(cls):
return " ".join(
n.lower() for n in re.findall(r"([A-Z][a-z]+)", cls.__name__)
)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, "__index__"):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain(
(base,),
compat.itertools_imap(lambda i: base + str(i), range(1000)),
)
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def map_bits(fn, n):
"""Call the given function given each nonzero bit from n."""
while n:
b = n & (~n + 1)
yield fn(b)
n ^= b
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, "target", "fn")
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata["name"] = fn.__name__
code = (
"""\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {targ_name: target, fn_name: fn}, fn.__name__
)
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location, class_location=None):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = (
"Construct a new :class:`.%s` object. \n\n"
"This constructor is mirrored as a public API function; "
"see :func:`sqlalchemy%s` "
"for a full usage and argument description."
% (target.__name__, location)
)
else:
fn = callable_ = target
doc = (
"This function is mirrored; see :func:`sqlalchemy%s` "
"for a description of arguments." % location
)
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata["name"] = location_name
code = (
"""\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
"""
% metadata
)
env = {"cls": callable_, "symbol": symbol}
exec(code, env)
decorated = env[location_name]
if hasattr(fn, "_linked_to"):
linked_to, linked_to_location = fn._linked_to
linked_to_doc = linked_to.__doc__
if class_location is None:
class_location = "%s.%s" % (target.__module__, target.__name__)
linked_to_doc = inject_docstring_text(
linked_to_doc,
".. container:: inherited_member\n\n "
"Inherited from :func:`sqlalchemy%s`; this constructor "
"creates a :class:`%s` object"
% (linked_to_location, class_location),
1,
)
decorated.__doc__ = linked_to_doc
else:
decorated.__doc__ = fn.__doc__
decorated.__module__ = "sqlalchemy_1_3" + location.rsplit(".", 1)[0]
if decorated.__module__ not in sys.modules:
raise ImportError(
"public_factory location %s is not in sys.modules"
% (decorated.__module__,)
)
if compat.py2k or hasattr(fn, "__func__"):
fn.__func__.__doc__ = doc
if not hasattr(fn.__func__, "_linked_to"):
fn.__func__._linked_to = (decorated, location)
else:
fn.__doc__ = doc
if not hasattr(fn, "_linked_to"):
fn._linked_to = (decorated, location)
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def clear(self):
self.impls.clear()
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" % (self.group, name)
)
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def _inspect_func_args(fn):
try:
co_varkeywords = inspect.CO_VARKEYWORDS
except AttributeError:
# https://docs.python.org/3/library/inspect.html
# The flags are specific to CPython, and may not be defined in other
# Python implementations. Furthermore, the flags are an implementation
# detail, and can be removed or deprecated in future Python releases.
spec = compat.inspect_getfullargspec(fn)
return spec[0], bool(spec[2])
else:
# use fn.__code__ plus flags to reduce method call overhead
co = fn.__code__
nargs = co.co_argcount
return (
list(co.co_varnames[:nargs]),
bool(co.co_flags & co_varkeywords),
)
def get_cls_kwargs(cls, _set=None):
r"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getfullargspec() to cut down on method overhead,
as this is used within the Core typing system to create copies of type
objects which is a performance-sensitive operation.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get("__init__", False)
has_init = (
ctr
and isinstance(ctr, types.FunctionType)
and isinstance(ctr.__code__, types.CodeType)
)
if has_init:
names, has_kw = _inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard("self")
return _set
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getfullargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getfullargspec(fn)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getfullargspec(fn.__func__)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True
)
elif hasattr(fn, "__func__"):
return compat.inspect_getfullargspec(fn.__func__)
elif hasattr(fn, "__call__"):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
spec = fn
args = compat.inspect_formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = "%s[0]" % spec[1]
else:
self_arg = None
apply_pos = compat.inspect_formatargspec(
spec[0], spec[1], spec[2], None, spec[4]
)
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults :]
else:
defaulted_vals = ()
apply_kw = compat.inspect_formatargspec(
name_args,
spec[1],
spec[2],
defaulted_vals,
formatvalue=lambda x: "=" + x,
)
if grouped:
return dict(
args=args,
self_arg=self_arg,
apply_pos=apply_pos,
apply_kw=apply_kw,
)
else:
return dict(
args=args[1:-1],
self_arg=self_arg,
apply_pos=apply_pos[1:-1],
apply_kw=apply_kw[1:-1],
)
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and "(self)" or "self"
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (
grouped
and "(self, *args, **kwargs)"
or "self, *args, **kwargs"
)
return dict(self_arg="self", args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return compat.inspect_getfullargspec(method)
except TypeError:
if method is object.__init__:
return (["self"], None, None, None)
else:
return (["self"], "args", "kwargs", None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
spec = compat.inspect_getfullargspec(insp.__init__)
except TypeError:
continue
else:
default_len = spec.defaults and len(spec.defaults) or 0
if i == 0:
if spec.varargs:
vargs = spec.varargs
if default_len:
pos_args.extend(spec.args[1:-default_len])
else:
pos_args.extend(spec.args[1:])
else:
kw_args.update(
[(arg, missing) for arg in spec.args[1:-default_len]]
)
if default_len:
kw_args.update(
[
(arg, default)
for arg, default in zip(
spec.args[-default_len:], spec.defaults
)
]
)
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
__slots__ = "target", "name", "kwargs", "__weakref__"
def __getstate__(self):
return {
"target": self.target,
"name": self.name,
"kwargs": self.kwargs,
}
def __setstate__(self, state):
self.target = state["target"]
self.name = state["name"]
self.kwargs = state.get("kwargs", ())
def __init__(self, meth, kwargs=()):
self.target = meth.__self__
self.name = meth.__name__
self.kwargs = kwargs
def __call__(self, *arg, **kw):
kw.update(self.kwargs)
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = {cls}
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (
_
for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType)
)
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == "builtins" or not hasattr(c, "__subclasses__"):
continue
else:
if c.__module__ == "__builtin__" or not hasattr(
c, "__subclasses__"
):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(
into_cls,
from_cls,
skip=None,
only=None,
name="self.proxy",
from_instance=None,
):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = (
"__slots__",
"__del__",
"__getattribute__",
"__metaclass__",
"__getstate__",
"__setstate__",
)
dunders = [
m
for m in dir(from_cls)
if (
m.startswith("__")
and m.endswith("__")
and not hasattr(into_cls, m)
and m not in skip
)
]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, "__call__"):
continue
fn = getattr(fn, "im_func", fn)
except AttributeError:
continue
try:
spec = compat.inspect_getfullargspec(fn)
fn_args = compat.inspect_formatargspec(spec[0])
d_args = compat.inspect_formatargspec(spec[0][1:])
except TypeError:
fn_args = "(self, *args, **kw)"
d_args = "(*args, **kw)"
py = (
"def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals()
)
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, "__func__", meth1) is getattr(
meth2, "__func__", meth2
)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError("a class or collection of method names are required")
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith("_")])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and "any of" or "all of"
raise TypeError(
"%r does not implement %s: %s"
% (obj, qualifier, ", ".join(interface))
)
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = "Anonymous" + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError(
"dictionary does not contain required keys %s"
% ", ".join(required - found)
)
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def oneshot(self, *args, **kw):
result = fn(self, *args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
self.__dict__[fn.__name__] = memo
return result
return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class MemoizedSlots(object):
"""Apply memoized items to an object using a __getattr__ scheme.
This allows the functionality of memoized_property and
memoized_instancemethod to be available to a class using __slots__.
"""
__slots__ = ()
def _fallback_getattr(self, key):
raise AttributeError(key)
def __getattr__(self, key):
if key.startswith("_memoized"):
raise AttributeError(key)
elif hasattr(self, "_memoized_attr_%s" % key):
value = getattr(self, "_memoized_attr_%s" % key)()
setattr(self, key, value)
return value
elif hasattr(self, "_memoized_method_%s" % key):
fn = getattr(self, "_memoized_method_%s" % key)
def oneshot(*args, **kw):
result = fn(*args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
setattr(self, key, memo)
return result
oneshot.__doc__ = fn.__doc__
return oneshot
else:
return self._fallback_getattr(key)
def dependency_for(modulename, add_to_all=False):
def decorate(obj):
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), [tokens[-1]]
)
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
if add_to_all and hasattr(mod, "__all__"):
mod.__all__.append(obj.__name__)
return obj
return decorate
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ["true", "yes", "on", "y", "t", "1"]:
return True
elif obj in ["false", "no", "off", "n", "f", "0"]:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaluate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True, dest=None):
r"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if dest is None:
dest = kw
if (
key in kw
and (not isinstance(type_, type) or not isinstance(kw[key], type_))
and kw[key] is not None
):
if type_ is bool and flexi_bool:
dest[key] = asbool(kw[key])
else:
dest[key] = type_(kw[key])
def constructor_copy(obj, cls, *args, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update(
(k, obj.__dict__[k]) for k in names.difference(kw) if k in obj.__dict__
)
return cls(*args, **kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, "__emulates__"):
# canonicalize set vs sets.Set to a standard: the builtin set
if specimen.__emulates__ is not None and issubclass(
specimen.__emulates__, set
):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, "append"):
return list
elif hasattr(specimen, "add"):
return set
elif hasattr(specimen, "set"):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'"
% (name, " or ".join("'%s'" % a for a in argtype), type(arg))
)
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'"
% (name, argtype, type(arg))
)
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, "items"):
return list(dictlike.items())
else:
if hasattr(dictlike, "iteritems"):
return dictlike.iteritems()
elif hasattr(dictlike, "items"):
return iter(dictlike.items())
getter = getattr(dictlike, "__getitem__", getattr(dictlike, "get", None))
if getter is None:
raise TypeError("Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, "iterkeys"):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, "keys"):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError("Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
clsval = self.func(owner)
clsval.__doc__ = self.func.__doc__
return clsval
else:
return self.func(instance)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = "symbol"
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
@classmethod
def parse_user_argument(
cls, arg, choices, name, resolve_symbol_names=False
):
"""Given a user parameter, parse the parameter into a chosen symbol.
The user argument can be a string name that matches the name of a
symbol, or the symbol object itself, or any number of alternate choices
such as True/False/ None etc.
:param arg: the user argument.
:param choices: dictionary of symbol object to list of possible
entries.
:param name: name of the argument. Used in an :class:`.ArgumentError`
that is raised if the parameter doesn't match any available argument.
:param resolve_symbol_names: include the name of each symbol as a valid
entry.
"""
# note using hash lookup is tricky here because symbol's `__hash__`
# is its int value which we don't want included in the lookup
# explicitly, so we iterate and compare each.
for sym, choice in choices.items():
if arg is sym:
return sym
elif resolve_symbol_names and arg == sym.name:
return sym
elif arg in choice:
return sym
if arg is None:
return None
raise exc.ArgumentError("Invalid value for '%s': %r" % (name, arg))
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except Exception:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def ellipses_string(value, len_=25):
try:
if len(value) > len_:
return "%s..." % value[0:len_]
else:
return value
except TypeError:
return value
class _hash_limit_string(compat.text_type):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
def __new__(cls, value, num, args):
interpolated = (value % args) + (
" (this warning may be suppressed after %d occurrences)" % num
)
self = super(_hash_limit_string, cls).__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def warn(msg):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def warn_limited(msg, args):
"""Issue a warning with a parameterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def only_once(fn, retry_on_exception):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
try:
return once_fn(*arg, **kw)
except:
if retry_on_exception:
once.insert(0, once_fn)
raise
return go
_SQLA_RE = re.compile(r"sqlalchemy_1_3/([a-z_]+/){0,2}[a-z_]+\.py")
_UNITTEST_RE = re.compile(r"unit(?:2|test2?/)")
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start : end + 1]
NoneType = type(None)
def attrsetter(attrname):
code = "def set(obj, value):" " obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env["set"]
class EnsureKWArgType(type):
r"""Apply translation of functions to accept \**kw arguments if they
don't already.
"""
def __init__(cls, clsname, bases, clsdict):
fn_reg = cls.ensure_kwarg
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = compat.inspect_getfullargspec(fn)
if not spec.varkw:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict)
def _wrap_w_kw(self, fn):
def wrap(*arg, **kw):
return fn(*arg)
return update_wrapper(wrap, fn)
def wrap_callable(wrapper, fn):
"""Augment functools.update_wrapper() to work with objects with
a ``__call__()`` method.
:param fn:
object with __call__ method
"""
if hasattr(fn, "__name__"):
return update_wrapper(wrapper, fn)
else:
_f = wrapper
_f.__name__ = fn.__class__.__name__
if hasattr(fn, "__module__"):
_f.__module__ = fn.__module__
if hasattr(fn.__call__, "__doc__") and fn.__call__.__doc__:
_f.__doc__ = fn.__call__.__doc__
elif fn.__doc__:
_f.__doc__ = fn.__doc__
return _f
def quoted_token_parser(value):
"""Parse a dotted identifier with accommodation for quoted names.
Includes support for SQL-style double quotes as a literal character.
E.g.::
>>> quoted_token_parser("name")
["name"]
>>> quoted_token_parser("schema.name")
["schema", "name"]
>>> quoted_token_parser('"Schema"."Name"')
['Schema', 'Name']
>>> quoted_token_parser('"Schema"."Name""Foo"')
['Schema', 'Name""Foo']
"""
if '"' not in value:
return value.split(".")
# 0 = outside of quotes
# 1 = inside of quotes
state = 0
result = [[]]
idx = 0
lv = len(value)
while idx < lv:
char = value[idx]
if char == '"':
if state == 1 and idx < lv - 1 and value[idx + 1] == '"':
result[-1].append('"')
idx += 1
else:
state ^= 1
elif char == "." and state == 0:
result.append([])
else:
result[-1].append(char)
idx += 1
return ["".join(token) for token in result]
def add_parameter_text(params, text):
params = _collections.to_list(params)
def decorate(fn):
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(doc, {param: text for param in params})
fn.__doc__ = doc
return fn
return decorate
def _dedent_docstring(text):
split_text = text.split("\n", 1)
if len(split_text) == 1:
return text
else:
firstline, remaining = split_text
if not firstline.startswith(" "):
return firstline + "\n" + textwrap.dedent(remaining)
else:
return textwrap.dedent(text)
def inject_docstring_text(doctext, injecttext, pos):
doctext = _dedent_docstring(doctext or "")
lines = doctext.split("\n")
if len(lines) == 1:
lines.append("")
injectlines = textwrap.dedent(injecttext).split("\n")
if injectlines[0]:
injectlines.insert(0, "")
blanks = [num for num, line in enumerate(lines) if not line.strip()]
blanks.insert(0, 0)
inject_pos = blanks[min(pos, len(blanks) - 1)]
lines = lines[0:inject_pos] + injectlines + lines[inject_pos:]
return "\n".join(lines)
def inject_param_text(doctext, inject_params):
doclines = doctext.splitlines()
lines = []
to_inject = None
while doclines:
line = doclines.pop(0)
if to_inject is None:
m = re.match(r"(\s+):param (?:\\\*\*?)?(.+?):", line)
if m:
param = m.group(2)
if param in inject_params:
# default indent to that of :param: plus one
indent = " " * len(m.group(1)) + " "
# but if the next line has text, use that line's
# indentntation
if doclines:
m2 = re.match(r"(\s+)\S", doclines[0])
if m2:
indent = " " * len(m2.group(1))
to_inject = indent + inject_params[param]
elif line.lstrip().startswith(":param "):
lines.append("\n")
lines.append(to_inject)
lines.append("\n")
to_inject = None
elif not line.rstrip():
lines.append(line)
lines.append(to_inject)
lines.append("\n")
to_inject = None
elif line.endswith("::"):
# TODO: this still wont cover if the code example itself has blank
# lines in it, need to detect those via indentation.
lines.append(line)
lines.append(
doclines.pop(0)
) # the blank line following a code example
continue
lines.append(line)
return "\n".join(lines)
def repr_tuple_names(names):
"""Trims a list of strings from the middle and return a string of up to
four elements. Strings greater than 11 characters will be truncated"""
if len(names) == 0:
return None
flag = len(names) <= 4
names = names[0:4] if flag else names[0:3] + names[-1:]
res = ["%s.." % name[:11] if len(name) > 11 else name for name in names]
if flag:
return ", ".join(res)
else:
return "%s, ..., %s" % (", ".join(res[0:3]), res[-1])
|
the-stack_0_19572
|
import sys
import random
import copy
import keras
import numpy as np
class Puzzle:
goal24 = list(range(25))
goal15 = list(range(16))
goal8 = list(range(9))
model = keras.models.load_model(
'../neuronal-network/saved-neuronal-networks/15puzzle_solver_model_v20.h5')
MaxPDB = 5
pdb = []
pdb_pattern = []
for i in range(MaxPDB):
pdb.append({})
pdb_pattern.append(None)
def __init__(self, board=None, blank=-1):
if not board:
self.x = 3
self.size = 9
self.board = [i for i in range(0, self.size)]
self.blank = 0
else:
self.board = board
if len(self.board) == 9:
self.x = 3
self.size = 9
elif len(self.board) == 16:
self.x = 4
self.size = 16
elif len(self.board) == 25:
self.x = 5
self.size = 25
else:
print('puzzle size not supported')
sys.exit(1)
if blank == -1:
self.blank = board.index(0)
self.preferred = None
def initialize_pdb(id):
f = open("pdb"+str(id)+".txt", 'r')
print('Reading PDB '+str(id))
line = f.readline(100)
line = line.rstrip()
numbers = line.split(' ')
# los valores incluidos en el patron
Puzzle.pdb_pattern[id] = [int(x) for x in numbers]
while f:
line = f.readline(100)
line = line.rstrip()
numbers = line.split(' ')
if len(numbers) < 9:
break
tup = tuple([int(x) for x in numbers[:-1]])
value = int(numbers[-1])
Puzzle.pdb[id][tup] = value
def pdb_heuristic(self, id):
def abstract(x):
if x in Puzzle.pdb_pattern[id] or x == 0:
return x
else:
return -1
# creamos una tupla reemplazando por -1
tup = tuple([abstract(x) for x in self.board])
# a todo elemento que no esta en pattern
return max(Puzzle.pdb[id][tup], self.manhattan())
def pdb_1(self):
return self.pdb_heuristic(1)
def pdb_2(self):
return self.pdb_heuristic(2)
def pdb_3(self):
return self.pdb_heuristic(3)
def pdb_max12(self):
return max(self.pdb_heuristic(1), self.pdb_heuristic(2))
def pdb_max23(self):
return max(self.pdb_heuristic(2), self.pdb_heuristic(3))
def pdb_max13(self):
return max(self.pdb_heuristic(1), self.pdb_heuristic(3))
def pdb_max123(self):
return max(self.pdb_heuristic(1), self.pdb_heuristic(2), self.pdb_heuristic(3))
def __hash__(self):
return hash(tuple(self.board))
def __eq__(self, other):
return self.board == other.board
def __repr__(self):
def tostr(d):
if d > 0:
return "%2d" % (d)
else:
return " "
s = '\n'
for i in range(0, self.x):
s += "|"
s += "|".join([tostr(d)
for d in self.board[i*self.x:i*self.x+self.x]])
s += "|\n"
return s
def zero_heuristic(self):
return 0
def incorrect_tiles(self):
'''
retorna el numero de piezas que no estan en la posicion correcta
'''
num = 0
for i in range(0, self.size):
if self.board[i] == 0:
continue
else:
if self.board[i] != i:
num += 1
return num
def manhattan(self):
'''
retorna la suma de distancias manhattan de cada pieza a su
posicion final
'''
num = 0
for i in range(0, self.size):
if self.board[i] == 0:
continue
else:
num += abs(i % self.x - self.board[i] % self.x)
num += abs(i // self.x - self.board[i] // self.x)
return num
def nn_repr(self):
str = ''
for n in self.board:
str += '0'*n + '1' + '0'*(15-n)
return np.array([list(str)]).astype('f')
def successors(self):
'''
Crea una lista de tuplas de la forma (estado, accion, costo)
donde estado es el estado sucesor de self que se genera al ejecutar
accion (un string) y costo (un numero real) es el costo de accion
'''
def create_child(newblank):
child = copy.deepcopy(self)
child.blank = newblank
child.board[child.blank] = 0
child.board[self.blank] = self.board[newblank]
child.preferred = None
return child
prediction = Puzzle.model.predict(self.nn_repr())[0]
# best = np.argmax(prediction)
# print('myself:',self)
# print('my preferred child:')
succ = []
if self.blank > self.x - 1:
c = create_child(self.blank-self.x)
c.preferred = prediction[3]
succ.append((c, 'up', 1))
# if best == 3:
# c.preferred = True
# print(c)
if self.blank % self.x > 0:
c = create_child(self.blank-1)
c.preferred = prediction[0]
succ.append((c, 'left', 1))
# if best == 0:
# c.preferred = True
# print(c)
if self.blank % self.x < self.x - 1:
c = create_child(self.blank+1)
c.preferred = prediction[2]
succ.append((c, 'right', 1))
# if best == 2:
# c.preferred = True
# print(c)
if self.blank < self.size - self.x:
c = create_child(self.blank+self.x)
c.preferred = prediction[1]
succ.append((c, 'down', 1))
# if best == 1:
# c.preferred = True
# print(c)
return succ
def is_goal(self):
return self.size == 16 and Puzzle.goal15 == self.board or self.size == 9 and Puzzle.goal8 == self.board
def random_walk(self, steps):
state = self
seen = [self]
for i in range(0, steps):
state = random.choice(state.successors())[0]
while state in seen:
state = random.choice(state.successors())[0]
seen.append(state)
return state
|
the-stack_0_19573
|
from PyAl.core import *
from bs4 import BeautifulSoup
import requests
import requests_cache
class Request:
def __init__(self, url, payload=None, post=False):
bundleID = bundle()
cacheName = volatile(bundleID + "_requests_cache")
requests_cache.configure(cacheName)
if payload:
self.request = requests.get(url, params=payload) if not post else requests.post(url, data=payload)
else:
self.request = requests.get(url)
def souper(self):
if self.request.status_code == requests.codes.ok:
return BeautifulSoup(self.request.text)
else:
self.request.raise_for_status()
|
the-stack_0_19574
|
import copy
import os
import pathlib
from urllib.parse import urljoin
from .plugin import CredentialPlugin, CertFiles, raise_for_status
import requests
from django.utils.translation import gettext_lazy as _
base_inputs = {
'fields': [
{
'id': 'url',
'label': _('Server URL'),
'type': 'string',
'format': 'url',
'help_text': _('The URL to the HashiCorp Vault'),
},
{
'id': 'token',
'label': _('Token'),
'type': 'string',
'secret': True,
'help_text': _('The access token used to authenticate to the Vault server'),
},
{
'id': 'cacert',
'label': _('CA Certificate'),
'type': 'string',
'multiline': True,
'help_text': _('The CA certificate used to verify the SSL certificate of the Vault server'),
},
{'id': 'role_id', 'label': _('AppRole role_id'), 'type': 'string', 'multiline': False, 'help_text': _('The Role ID for AppRole Authentication')},
{
'id': 'secret_id',
'label': _('AppRole secret_id'),
'type': 'string',
'multiline': False,
'secret': True,
'help_text': _('The Secret ID for AppRole Authentication'),
},
{
'id': 'namespace',
'label': _('Namespace name (Vault Enterprise only)'),
'type': 'string',
'multiline': False,
'help_text': _('Name of the namespace to use when authenticate and retrieve secrets'),
},
{
'id': 'kubernetes_role',
'label': _('Kubernetes role'),
'type': 'string',
'multiline': False,
'help_text': _(
'The Role for Kubernetes Authentication.'
' This is the named role, configured in Vault server, for AWX pod auth policies.'
' see https://www.vaultproject.io/docs/auth/kubernetes#configuration'
),
},
{
'id': 'default_auth_path',
'label': _('Path to Auth'),
'type': 'string',
'multiline': False,
'default': 'approle',
'help_text': _('The Authentication path to use if one isn\'t provided in the metadata when linking to an input field. Defaults to \'approle\''),
},
],
'metadata': [
{
'id': 'secret_path',
'label': _('Path to Secret'),
'type': 'string',
'help_text': _(
(
'The path to the secret stored in the secret backend e.g, /some/secret/. It is recommended'
' that you use the secret backend field to identify the storage backend and to use this field'
' for locating a specific secret within that store. However, if you prefer to fully identify'
' both the secret backend and one of its secrets using only this field, join their locations'
' into a single path without any additional separators, e.g, /location/of/backend/some/secret.'
)
),
},
{
'id': 'auth_path',
'label': _('Path to Auth'),
'type': 'string',
'multiline': False,
'help_text': _('The path where the Authentication method is mounted e.g, approle'),
},
],
'required': ['url', 'secret_path'],
}
hashi_kv_inputs = copy.deepcopy(base_inputs)
hashi_kv_inputs['fields'].append(
{
'id': 'api_version',
'label': _('API Version'),
'choices': ['v1', 'v2'],
'help_text': _('API v1 is for static key/value lookups. API v2 is for versioned key/value lookups.'),
'default': 'v1',
}
)
hashi_kv_inputs['metadata'] = (
[
{
'id': 'secret_backend',
'label': _('Name of Secret Backend'),
'type': 'string',
'help_text': _('The name of the kv secret backend (if left empty, the first segment of the secret path will be used).'),
}
]
+ hashi_kv_inputs['metadata']
+ [
{
'id': 'secret_key',
'label': _('Key Name'),
'type': 'string',
'help_text': _('The name of the key to look up in the secret.'),
},
{
'id': 'secret_version',
'label': _('Secret Version (v2 only)'),
'type': 'string',
'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
},
]
)
hashi_kv_inputs['required'].extend(['api_version', 'secret_key'])
hashi_ssh_inputs = copy.deepcopy(base_inputs)
hashi_ssh_inputs['metadata'] = (
[
{
'id': 'public_key',
'label': _('Unsigned Public Key'),
'type': 'string',
'multiline': True,
}
]
+ hashi_ssh_inputs['metadata']
+ [
{'id': 'role', 'label': _('Role Name'), 'type': 'string', 'help_text': _('The name of the role used to sign.')},
{
'id': 'valid_principals',
'label': _('Valid Principals'),
'type': 'string',
'help_text': _('Valid principals (either usernames or hostnames) that the certificate should be signed for.'),
},
]
)
hashi_ssh_inputs['required'].extend(['public_key', 'role'])
def handle_auth(**kwargs):
token = None
if kwargs.get('token'):
token = kwargs['token']
elif kwargs.get('role_id') and kwargs.get('secret_id'):
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
elif kwargs.get('kubernetes_role'):
token = method_auth(**kwargs, auth_param=kubernetes_auth(**kwargs))
else:
raise Exception('Either token or AppRole/Kubernetes authentication parameters must be set')
return token
def approle_auth(**kwargs):
return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']}
def kubernetes_auth(**kwargs):
jwt_file = pathlib.Path('/var/run/secrets/kubernetes.io/serviceaccount/token')
with jwt_file.open('r') as jwt_fo:
jwt = jwt_fo.read().rstrip()
return {'role': kwargs['kubernetes_role'], 'jwt': jwt}
def method_auth(**kwargs):
# get auth method specific params
request_kwargs = {'json': kwargs['auth_param'], 'timeout': 30}
# we first try to use the 'auth_path' from the metadata
# if not found we try to fetch the 'default_auth_path' from inputs
auth_path = kwargs.get('auth_path') or kwargs['default_auth_path']
url = urljoin(kwargs['url'], 'v1')
cacert = kwargs.get('cacert', None)
sess = requests.Session()
# Namespace support
if kwargs.get('namespace'):
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
with CertFiles(cacert) as cert:
request_kwargs['verify'] = cert
resp = sess.post(request_url, **request_kwargs)
resp.raise_for_status()
token = resp.json()['auth']['client_token']
return token
def kv_backend(**kwargs):
token = handle_auth(**kwargs)
url = kwargs['url']
secret_path = kwargs['secret_path']
secret_backend = kwargs.get('secret_backend', None)
secret_key = kwargs.get('secret_key', None)
cacert = kwargs.get('cacert', None)
api_version = kwargs['api_version']
request_kwargs = {
'timeout': 30,
'allow_redirects': False,
}
sess = requests.Session()
sess.headers['Authorization'] = 'Bearer {}'.format(token)
# Compatibility header for older installs of Hashicorp Vault
sess.headers['X-Vault-Token'] = token
if kwargs.get('namespace'):
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
if api_version == 'v2':
if kwargs.get('secret_version'):
request_kwargs['params'] = {'version': kwargs['secret_version']}
if secret_backend:
path_segments = [secret_backend, 'data', secret_path]
else:
try:
mount_point, *path = pathlib.Path(secret_path.lstrip(os.sep)).parts
'/'.join(path)
except Exception:
mount_point, path = secret_path, []
# https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
path_segments = [mount_point, 'data'] + path
else:
if secret_backend:
path_segments = [secret_backend, secret_path]
else:
path_segments = [secret_path]
request_url = urljoin(url, '/'.join(['v1'] + path_segments)).rstrip('/')
with CertFiles(cacert) as cert:
request_kwargs['verify'] = cert
response = sess.get(request_url, **request_kwargs)
raise_for_status(response)
json = response.json()
if api_version == 'v2':
json = json['data']
if secret_key:
try:
return json['data'][secret_key]
except KeyError:
raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path))
return json['data']
def ssh_backend(**kwargs):
token = handle_auth(**kwargs)
url = urljoin(kwargs['url'], 'v1')
secret_path = kwargs['secret_path']
role = kwargs['role']
cacert = kwargs.get('cacert', None)
request_kwargs = {
'timeout': 30,
'allow_redirects': False,
}
request_kwargs['json'] = {'public_key': kwargs['public_key']}
if kwargs.get('valid_principals'):
request_kwargs['json']['valid_principals'] = kwargs['valid_principals']
sess = requests.Session()
sess.headers['Authorization'] = 'Bearer {}'.format(token)
if kwargs.get('namespace'):
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
# Compatability header for older installs of Hashicorp Vault
sess.headers['X-Vault-Token'] = token
# https://www.vaultproject.io/api/secret/ssh/index.html#sign-ssh-key
request_url = '/'.join([url, secret_path, 'sign', role]).rstrip('/')
with CertFiles(cacert) as cert:
request_kwargs['verify'] = cert
resp = sess.post(request_url, **request_kwargs)
raise_for_status(resp)
return resp.json()['data']['signed_key']
hashivault_kv_plugin = CredentialPlugin('HashiCorp Vault Secret Lookup', inputs=hashi_kv_inputs, backend=kv_backend)
hashivault_ssh_plugin = CredentialPlugin('HashiCorp Vault Signed SSH', inputs=hashi_ssh_inputs, backend=ssh_backend)
|
the-stack_0_19576
|
# -*- coding: utf-8 -*-
from chatterbot import ChatBot
# Uncomment the following lines to enable verbose logging
# import logging
# logging.basicConfig(level=logging.INFO)
# Create a new instance of a ChatBot
bot = ChatBot(
"SQLMemoryTerminal",
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
"chatterbot.logic.MathematicalEvaluation",
"chatterbot.logic.TimeLogicAdapter",
"chatterbot.logic.BestMatch"
],
input_adapter="chatterbot.input.TerminalAdapter",
output_adapter="chatterbot.output.TerminalAdapter",
)
print("Type something to begin...")
# The following loop will execute each time the user enters input
while True:
try:
# We pass None to this method because the parameter
# is not used by the TerminalAdapter
bot_input = bot.get_response(None)
# Press ctrl-c or ctrl-d on the keyboard to exit
except (KeyboardInterrupt, EOFError, SystemExit):
break
|
the-stack_0_19579
|
from typing import List, Tuple, Union
import unrealsdk
from .. import bl2tools
def set_materials(iobject: unrealsdk.UObject, materials: List[unrealsdk.UObject]) -> None:
# As of now, the MapEditor doesent handle Materials for IO 100% correctly and sometimes exports wrong MICs.
# So for now we simply ignore any materials, else the game might crash.
return
if materials is None or iobject.ObjectMesh is None:
return
iobject.ObjectMesh.Materials = materials
def set_scale(iobject: unrealsdk.UObject, scale: float) -> None:
iobject.DrawScale = scale
def set_scale3d(iobject: unrealsdk.UObject, scale3d: List[float]) -> None:
iobject.DrawScale3D = tuple(scale3d)
def set_rotation(iobject: unrealsdk.UObject, rotator: Union[List[int], Tuple[int, int, int]]) -> None:
iobject.Rotation = tuple(rotator)
def set_location(iobject: unrealsdk.UObject, position: Union[List[float], Tuple[float, float, float]]) -> None:
iobject.Location = tuple(position)
def instantiate(io_definition: unrealsdk.UObject) -> unrealsdk.UObject:
if not io_definition:
unrealsdk.Log("Nope io")
return False
pc = bl2tools.get_player_controller()
_loc = (pc.Location.X, pc.Location.Y, pc.Location.Z)
pop_master = unrealsdk.FindAll("WillowPopulationMaster")[-1]
is_bal_def = bl2tools.obj_is_in_class(io_definition, "InteractiveObjectBalanceDefinition")
if is_bal_def:
iobject = pop_master.SpawnPopulationControlledActor(
io_definition.DefaultInteractiveObject.InteractiveObjectClass, None, "", _loc, (0, 0, 0)
)
else:
iobject = pop_master.SpawnPopulationControlledActor(
io_definition.InteractiveObjectClass, None, "", _loc, (0, 0, 0)
)
if pc.GetCurrentPlaythrough() != 2:
will_pop = unrealsdk.FindAll("WillowPopulationOpportunityPoint")[1:]
pop = unrealsdk.FindAll("PopulationOpportunityPoint")[1:]
regions = pop if len(pop) > len(will_pop) else will_pop
region_game_stage = max(pc.GetGameStageFromRegion(x.GameStageRegion)
for x in regions if x.GameStageRegion)
else:
region_game_stage = max(x.GetGameStage() for x in unrealsdk.FindAll("WillowPlayerPawn") if x.Arms)
iobject.SetGameStage(region_game_stage)
iobject.SetExpLevel(region_game_stage)
if is_bal_def:
x = io_definition.SelectGradeIndex(region_game_stage, 0)
iobject.InitializeBalanceDefinitionState(io_definition, x)
io_definition.SetupInteractiveObjectLoot(iobject, x)
iobject.InitializeFromDefinition(io_definition.DefaultInteractiveObject, False)
if bl2tools.obj_is_in_class(iobject, "WillowVendingMachine"):
vending_name = bl2tools.get_obj_path_name(iobject.InteractiveObjectDefinition).lower()
markup = unrealsdk.FindObject("AttributeInitializationDefinition",
"GD_Economy.VendingMachine.Init_MarkupCalc_P1")
iobject.CommerceMarkup.InitializationDefinition = markup
iobject.FeaturedItemCommerceMarkup.InitializationDefinition = markup
iobject.InventoryConfigurationName = "Inventory"
iobject.FeaturedItemConfigurationName = "FeaturedItem"
item_stage = unrealsdk.FindObject("AttributeInitializationDefinition",
"GD_Population_Shopping.Balance.Init_FeaturedItem_GameStage")
item_awesome = unrealsdk.FindObject("AttributeInitializationDefinition",
"GD_Population_Shopping.Balance.Init_FeaturedItem_AwesomeLevel")
iobject.FeaturedItemGameStage.InitializationDefinition = item_stage
iobject.FeaturedItemAwesomeLevel.InitializationDefinition = item_awesome
if "health" in vending_name:
iobject.ShopType = 2
elif "ammo" in vending_name:
iobject.ShopType = 1
elif "weapon" in vending_name:
iobject.ShopType = 0
iobject.ResetInventory()
else:
iobject.InitializeFromDefinition(io_definition, False)
return iobject
# noinspection PyBroadException
def destroy(iobject: unrealsdk.UObject) -> None:
try: # faster than if statement for this case. Exception shouldn't happen most of the time.
set_location(iobject, (-9999999, -9999999, -9999999))
set_scale(iobject, 0)
iobject.Destroyed()
except Exception: # We really don't care if our object does not exist or is already deleted.
pass
|
the-stack_0_19580
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Python provides the base64 module as a core module but this is mostly
limited to encoding and decoding base64 and it's variants. It is often
useful to be able to perform other operations on base64 text. This
module is meant to be used in conjunction with the core base64 module.
Standardized base64 is defined in
RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
This module provides the following base64 utility functionality:
* tests if text is valid base64
* filter formatting from base64
* convert base64 between different alphabets
* Handle padding issues
- test if base64 is padded
- removes padding
- restores padding
* wraps base64 text into formatted blocks
- via iterator
- return formatted string
"""
import re
import string
import six
from six.moves import urllib
from keystone.i18n import _
class InvalidBase64Error(ValueError):
pass
base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
_strip_formatting_re = re.compile(r'\s+')
_base64_to_base64url_trans = string.maketrans('+/', '-_')
_base64url_to_base64_trans = string.maketrans('-_', '+/')
def _check_padding_length(pad):
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
def is_valid_base64(text):
"""Test if input text can be base64 decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64, False otherwise
"""
text = filter_formatting(text)
if base64_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def is_valid_base64url(text):
"""Test if input text can be base64url decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64url,
False otherwise
"""
text = filter_formatting(text)
if base64url_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def filter_formatting(text):
"""Return base64 text without any formatting, just the base64.
Base64 text is often formatted with whitespace, line endings,
etc. This function strips out any formatting, the result will
contain only base64 characters.
Note, this function does not filter out all non-base64 alphabet
characters, it only removes characters used for formatting.
:param text: input text to filter
:type text: string
:returns: string -- filtered text without formatting
"""
return _strip_formatting_re.sub('', text)
def base64_to_base64url(text):
"""Convert base64 text to base64url text.
base64url text is designed to be safe for use in file names and
URL's. It is defined in RFC-4648 Section 5.
base64url differs from base64 in the last two alphabet characters
at index 62 and 63, these are sometimes referred as the
altchars. The '+' character at index 62 is replaced by '-'
(hyphen) and the '/' character at index 63 is replaced by '_'
(underscore).
This function only translates the altchars, non-alphabet
characters are not filtered out.
WARNING::
base64url continues to use the '=' pad character which is NOT URL
safe. RFC-4648 suggests two alternate methods to deal with this:
percent-encode
percent-encode the pad character (e.g. '=' becomes
'%3D'). This makes the base64url text fully safe. But
percent-encoding has the downside of requiring
percent-decoding prior to feeding the base64url text into a
base64url decoder since most base64url decoders do not
recognize %3D as a pad character and most decoders require
correct padding.
no-padding
padding is not strictly necessary to decode base64 or
base64url text, the pad can be computed from the input text
length. However many decoders demand padding and will consider
non-padded text to be malformed. If one wants to omit the
trailing pad character(s) for use in URL's it can be added back
using the base64_assure_padding() function.
This function makes no decisions about which padding methodology to
use. One can either call base64_strip_padding() to remove any pad
characters (restoring later with base64_assure_padding()) or call
base64url_percent_encode() to percent-encode the pad characters.
:param text: input base64 text
:type text: string
:returns: string -- base64url text
"""
return text.translate(_base64_to_base64url_trans)
def base64url_to_base64(text):
"""Convert base64url text to base64 text.
See base64_to_base64url() for a description of base64url text and
it's issues.
This function does NOT handle percent-encoded pad characters, they
will be left intact. If the input base64url text is
percent-encoded you should call
:param text: text in base64url alphabet
:type text: string
:returns: string -- text in base64 alphabet
"""
return text.translate(_base64url_to_base64_trans)
def base64_is_padded(text, pad='='):
"""Test if the text is base64 padded.
The input text must be in a base64 alphabet. The pad must be a
single character. If the text has been percent-encoded (e.g. pad
is the string '%3D') you must convert the text back to a base64
alphabet (e.g. if percent-encoded use the function
base64url_percent_decode()).
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: bool -- True if padded, False otherwise
:raises: ValueError, InvalidBase64Error
"""
_check_padding_length(pad)
text_len = len(text)
if text_len > 0 and text_len % 4 == 0:
pad_index = text.find(pad)
if pad_index >= 0 and pad_index < text_len - 2:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'2nd to last char') % pad)
if pad_index == text_len - 2 and text[-1] != pad:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'non-pad last char') % pad)
return True
if text.find(pad) >= 0:
raise InvalidBase64Error(_('text is not a multiple of 4, '
'but contains pad "%s"') % pad)
return False
def base64url_percent_encode(text):
"""Percent-encode base64url padding.
The input text should only contain base64url alphabet
characters. Any non-base64url alphabet characters will also be
subject to percent-encoding.
:param text: text containing ONLY characters in the base64url alphabet
:type text: string
:returns: string -- percent-encoded base64url text
:raises: InvalidBase64Error
"""
if len(text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return urllib.parse.quote(text)
def base64url_percent_decode(text):
"""Percent-decode base64url padding.
The input text should only contain base64url alphabet
characters and the percent-encoded pad character. Any other
percent-encoded characters will be subject to percent-decoding.
:param text: base64url alphabet text
:type text: string
:returns: string -- percent-decoded base64url text
"""
decoded_text = urllib.parse.unquote(text)
if len(decoded_text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return decoded_text
def base64_strip_padding(text, pad='='):
"""Remove padding from input base64 text.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: string -- base64 text without padding
:raises: ValueError
"""
_check_padding_length(pad)
# Can't be padded if text is less than 4 characters.
if len(text) < 4:
return text
if text[-1] == pad:
if text[-2] == pad:
return text[0:-2]
else:
return text[0:-1]
else:
return text
def base64_assure_padding(text, pad='='):
"""Assure the input text ends with padding.
Base64 text is normally expected to be a multiple of 4
characters. Each 4 character base64 sequence produces 3 octets of
binary data. If the binary data is not a multiple of 3 the base64
text is padded at the end with a pad character such that it is
always a multiple of 4. Padding is ignored and does not alter the
binary data nor it's length.
In some circumstances it is desirable to omit the padding
character due to transport encoding conflicts. Base64 text can
still be correctly decoded if the length of the base64 text
(consisting only of characters in the desired base64 alphabet) is
known, padding is not absolutely necessary.
Some base64 decoders demand correct padding or one may wish to
format RFC compliant base64, this function performs this action.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Iteration yields a sequence of lines.
The line does NOT terminate with a line ending.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
If the text ends with the pad it is assumed to already be
padded. Otherwise the binary length is computed from the input
text length and correct number of pad characters are appended.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: string -- input base64 text with padding
:raises: ValueError
"""
_check_padding_length(pad)
if text.endswith(pad):
return text
n = len(text) % 4
if n == 0:
return text
n = 4 - n
padding = pad * n
return text + padding
def base64_wrap_iter(text, width=64):
"""Fold text into lines of text with max line length.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Iteration yields a sequence of lines.
The line does NOT terminate with a line ending.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param width: number of characters in each wrapped line (default: 64)
:type width: int
:returns: generator -- sequence of lines of base64 text.
"""
text = six.text_type(text)
for x in six.moves.range(0, len(text), width):
yield text[x:x + width]
def base64_wrap(text, width=64):
"""Fold text into lines of text with max line length.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Fold the text into lines whose
line length is width chars long, terminate each line with line
ending (default is '\\n'). Return the wrapped text as a single
string.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param width: number of characters in each wrapped line (default: 64)
:type width: int
:returns: string -- wrapped text.
"""
buf = six.StringIO()
for line in base64_wrap_iter(text, width):
buf.write(line)
buf.write(u'\n')
text = buf.getvalue()
buf.close()
return text
|
the-stack_0_19581
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource2 as resource
class HealthMonitor(resource.Resource):
resource_key = 'healthmonitor'
resources_key = 'healthmonitors'
base_path = '/lbaas/healthmonitors'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'delay', 'expected_codes', 'http_method', 'max_retries',
'timeout', 'type', 'url_path',
is_admin_state_up='adminstate_up',
project_id='tenant_id',
)
# Properties
#: The time, in seconds, between sending probes to members.
delay = resource.Body('delay')
#: Expected HTTP codes for a passing HTTP(S) monitor.
expected_codes = resource.Body('expected_codes')
#: The HTTP method that the monitor uses for requests.
http_method = resource.Body('http_method')
#: The administrative state of the health monitor, which is up
#: ``True`` or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: Maximum consecutive health probe tries.
max_retries = resource.Body('max_retries')
#: Name of the health monitor.
name = resource.Body('name')
#: List of pools associated with this health monitor
#: *Type: list of dicts which contain the pool IDs*
pool_ids = resource.Body('pools', type=list)
#: The ID of the project this health monitor is associated with.
project_id = resource.Body('tenant_id')
#: The maximum number of seconds for a monitor to wait for a
#: connection to be established before it times out. This value must
#: be less than the delay value.
timeout = resource.Body('timeout')
#: The type of probe sent by the load balancer to verify the member
#: state, which is PING, TCP, HTTP, or HTTPS.
type = resource.Body('type')
#: Path portion of URI that will be probed if type is HTTP(S).
url_path = resource.Body('url_path')
|
the-stack_0_19585
|
# coding=utf-8
# flake8: noqa E302
"""
Cmd2 testing for argument parsing
"""
import argparse
from typing import (
Optional,
)
import pytest
import cmd2
from .conftest import (
run_cmd,
)
# Prefer statically linked gnureadline if available (for macOS compatibility due to issues with libedit)
try:
import gnureadline as readline
except ImportError:
# Try to import readline, but allow failure for convenience in Windows unit testing
# Note: If this actually fails, you should install readline on Linux or Mac or pyreadline on Windows
try:
# noinspection PyUnresolvedReferences
import readline
except ImportError:
pass
class ArgparseApp(cmd2.Cmd):
def __init__(self):
self.maxrepeats = 3
cmd2.Cmd.__init__(self)
def namespace_provider(self) -> argparse.Namespace:
ns = argparse.Namespace()
ns.custom_stuff = "custom"
return ns
say_parser = cmd2.Cmd2ArgumentParser()
say_parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay')
say_parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE')
say_parser.add_argument('-r', '--repeat', type=int, help='output [n] times')
say_parser.add_argument('words', nargs='+', help='words to say')
@cmd2.with_argparser(say_parser)
def do_say(self, args, *, keyword_arg: Optional[str] = None):
"""Repeat what you tell me to."""
words = []
for word in args.words:
if word is None:
word = ''
if args.piglatin:
word = '%s%say' % (word[1:], word[0])
if args.shout:
word = word.upper()
words.append(word)
repetitions = args.repeat or 1
for i in range(min(repetitions, self.maxrepeats)):
self.stdout.write(' '.join(words))
self.stdout.write('\n')
if keyword_arg is not None:
print(keyword_arg)
tag_parser = cmd2.Cmd2ArgumentParser(description='create a html tag')
tag_parser.add_argument('tag', help='tag')
tag_parser.add_argument('content', nargs='+', help='content to surround with tag')
@cmd2.with_argparser(tag_parser, preserve_quotes=True)
def do_tag(self, args):
self.stdout.write('<{0}>{1}</{0}>'.format(args.tag, ' '.join(args.content)))
self.stdout.write('\n')
@cmd2.with_argparser(cmd2.Cmd2ArgumentParser(), ns_provider=namespace_provider)
def do_test_argparse_ns(self, args):
self.stdout.write('{}'.format(args.custom_stuff))
@cmd2.with_argument_list
def do_arglist(self, arglist, *, keyword_arg: Optional[str] = None):
if isinstance(arglist, list):
self.stdout.write('True')
else:
self.stdout.write('False')
if keyword_arg is not None:
print(keyword_arg)
@cmd2.with_argument_list(preserve_quotes=True)
def do_preservelist(self, arglist):
self.stdout.write('{}'.format(arglist))
known_parser = cmd2.Cmd2ArgumentParser()
known_parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay')
known_parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE')
known_parser.add_argument('-r', '--repeat', type=int, help='output [n] times')
@cmd2.with_argparser(known_parser, with_unknown_args=True)
def do_speak(self, args, extra, *, keyword_arg: Optional[str] = None):
"""Repeat what you tell me to."""
words = []
for word in extra:
if word is None:
word = ''
if args.piglatin:
word = '%s%say' % (word[1:], word[0])
if args.shout:
word = word.upper()
words.append(word)
repetitions = args.repeat or 1
for i in range(min(repetitions, self.maxrepeats)):
self.stdout.write(' '.join(words))
self.stdout.write('\n')
if keyword_arg is not None:
print(keyword_arg)
@cmd2.with_argparser(cmd2.Cmd2ArgumentParser(), preserve_quotes=True, with_unknown_args=True)
def do_test_argparse_with_list_quotes(self, args, extra):
self.stdout.write('{}'.format(' '.join(extra)))
@cmd2.with_argparser(cmd2.Cmd2ArgumentParser(), ns_provider=namespace_provider, with_unknown_args=True)
def do_test_argparse_with_list_ns(self, args, extra):
self.stdout.write('{}'.format(args.custom_stuff))
@pytest.fixture
def argparse_app():
app = ArgparseApp()
return app
def test_invalid_syntax(argparse_app):
out, err = run_cmd(argparse_app, 'speak "')
assert err[0] == "Invalid syntax: No closing quotation"
def test_argparse_basic_command(argparse_app):
out, err = run_cmd(argparse_app, 'say hello')
assert out == ['hello']
def test_argparse_remove_quotes(argparse_app):
out, err = run_cmd(argparse_app, 'say "hello there"')
assert out == ['hello there']
def test_argparser_kwargs(argparse_app, capsys):
"""Test with_argparser wrapper passes through kwargs to command function"""
argparse_app.do_say('word', keyword_arg="foo")
out, err = capsys.readouterr()
assert out == "foo\n"
def test_argparse_preserve_quotes(argparse_app):
out, err = run_cmd(argparse_app, 'tag mytag "hello"')
assert out[0] == '<mytag>"hello"</mytag>'
def test_argparse_custom_namespace(argparse_app):
out, err = run_cmd(argparse_app, 'test_argparse_ns')
assert out[0] == 'custom'
def test_argparse_with_list(argparse_app):
out, err = run_cmd(argparse_app, 'speak -s hello world!')
assert out == ['HELLO WORLD!']
def test_argparse_with_list_remove_quotes(argparse_app):
out, err = run_cmd(argparse_app, 'speak -s hello "world!"')
assert out == ['HELLO WORLD!']
def test_argparse_with_list_preserve_quotes(argparse_app):
out, err = run_cmd(argparse_app, 'test_argparse_with_list_quotes "hello" person')
assert out[0] == '"hello" person'
def test_argparse_with_list_custom_namespace(argparse_app):
out, err = run_cmd(argparse_app, 'test_argparse_with_list_ns')
assert out[0] == 'custom'
def test_argparse_with_list_and_empty_doc(argparse_app):
out, err = run_cmd(argparse_app, 'speak -s hello world!')
assert out == ['HELLO WORLD!']
def test_argparser_correct_args_with_quotes_and_midline_options(argparse_app):
out, err = run_cmd(argparse_app, "speak 'This is a' -s test of the emergency broadcast system!")
assert out == ['THIS IS A TEST OF THE EMERGENCY BROADCAST SYSTEM!']
def test_argparser_and_unknown_args_kwargs(argparse_app, capsys):
"""Test with_argparser wrapper passing through kwargs to command function"""
argparse_app.do_speak('', keyword_arg="foo")
out, err = capsys.readouterr()
assert out == "foo\n"
def test_argparse_quoted_arguments_multiple(argparse_app):
out, err = run_cmd(argparse_app, 'say "hello there" "rick & morty"')
assert out == ['hello there rick & morty']
def test_argparse_help_docstring(argparse_app):
out, err = run_cmd(argparse_app, 'help say')
assert out[0].startswith('Usage: say')
assert out[1] == ''
assert out[2] == 'Repeat what you tell me to.'
def test_argparse_help_description(argparse_app):
out, err = run_cmd(argparse_app, 'help tag')
assert out[0].startswith('Usage: tag')
assert out[1] == ''
assert out[2] == 'create a html tag'
def test_argparse_prog(argparse_app):
out, err = run_cmd(argparse_app, 'help tag')
progname = out[0].split(' ')[1]
assert progname == 'tag'
def test_arglist(argparse_app):
out, err = run_cmd(argparse_app, 'arglist "we should" get these')
assert out[0] == 'True'
def test_arglist_kwargs(argparse_app, capsys):
"""Test with_argument_list wrapper passes through kwargs to command function"""
argparse_app.do_arglist('arg', keyword_arg="foo")
out, err = capsys.readouterr()
assert out == "foo\n"
def test_preservelist(argparse_app):
out, err = run_cmd(argparse_app, 'preservelist foo "bar baz"')
assert out[0] == "['foo', '\"bar baz\"']"
class SubcommandApp(cmd2.Cmd):
"""Example cmd2 application where we a base command which has a couple subcommands."""
def __init__(self):
cmd2.Cmd.__init__(self)
# subcommand functions for the base command
def base_foo(self, args):
"""foo subcommand of base command"""
self.poutput(args.x * args.y)
def base_bar(self, args):
"""bar subcommand of base command"""
self.poutput('((%s))' % args.z)
def base_helpless(self, args):
"""helpless subcommand of base command"""
self.poutput('((%s))' % args.z)
# create the top-level parser for the base command
base_parser = cmd2.Cmd2ArgumentParser()
base_subparsers = base_parser.add_subparsers(dest='subcommand', metavar='SUBCOMMAND')
base_subparsers.required = True
# create the parser for the "foo" subcommand
parser_foo = base_subparsers.add_parser('foo', help='foo help')
parser_foo.add_argument('-x', type=int, default=1, help='integer')
parser_foo.add_argument('y', type=float, help='float')
parser_foo.set_defaults(func=base_foo)
# create the parser for the "bar" subcommand
parser_bar = base_subparsers.add_parser('bar', help='bar help', aliases=['bar_1', 'bar_2'])
parser_bar.add_argument('z', help='string')
parser_bar.set_defaults(func=base_bar)
# create the parser for the "helpless" subcommand
# This subcommand has aliases and no help text. It exists to prevent changes to _set_parser_prog() which
# use an approach which relies on action._choices_actions list. See comment in that function for more
# details.
parser_bar = base_subparsers.add_parser('helpless', aliases=['helpless_1', 'helpless_2'])
parser_bar.add_argument('z', help='string')
parser_bar.set_defaults(func=base_bar)
@cmd2.with_argparser(base_parser)
def do_base(self, args):
"""Base command help"""
# Call whatever subcommand function was selected
func = getattr(args, 'func')
func(self, args)
# Add subcommands using as_subcommand_to decorator
has_subcmds_parser = cmd2.Cmd2ArgumentParser(description="Tests as_subcmd_to decorator")
has_subcmds_subparsers = has_subcmds_parser.add_subparsers(dest='subcommand', metavar='SUBCOMMAND')
has_subcmds_subparsers.required = True
@cmd2.with_argparser(has_subcmds_parser)
def do_test_subcmd_decorator(self, args: argparse.Namespace):
handler = args.cmd2_handler.get()
handler(args)
subcmd_parser = cmd2.Cmd2ArgumentParser(description="A subcommand")
@cmd2.as_subcommand_to('test_subcmd_decorator', 'subcmd', subcmd_parser, help=subcmd_parser.description.lower())
def subcmd_func(self, args: argparse.Namespace):
# Make sure printing the Namespace works. The way we originally added cmd2_hander to it resulted in a RecursionError.
self.poutput(args)
helpless_subcmd_parser = cmd2.Cmd2ArgumentParser(add_help=False, description="A subcommand with no help")
@cmd2.as_subcommand_to(
'test_subcmd_decorator', 'helpless_subcmd', helpless_subcmd_parser, help=helpless_subcmd_parser.description.lower()
)
def helpless_subcmd_func(self, args: argparse.Namespace):
# Make sure vars(Namespace) works. The way we originally added cmd2_hander to it resulted in a RecursionError.
self.poutput(vars(args))
@pytest.fixture
def subcommand_app():
app = SubcommandApp()
return app
def test_subcommand_foo(subcommand_app):
out, err = run_cmd(subcommand_app, 'base foo -x2 5.0')
assert out == ['10.0']
def test_subcommand_bar(subcommand_app):
out, err = run_cmd(subcommand_app, 'base bar baz')
assert out == ['((baz))']
def test_subcommand_invalid(subcommand_app):
out, err = run_cmd(subcommand_app, 'base baz')
assert err[0].startswith('Usage: base')
assert err[1].startswith("Error: argument SUBCOMMAND: invalid choice: 'baz'")
def test_subcommand_base_help(subcommand_app):
out, err = run_cmd(subcommand_app, 'help base')
assert out[0].startswith('Usage: base')
assert out[1] == ''
assert out[2] == 'Base command help'
def test_subcommand_help(subcommand_app):
# foo has no aliases
out, err = run_cmd(subcommand_app, 'help base foo')
assert out[0].startswith('Usage: base foo')
assert out[1] == ''
assert out[2] == 'positional arguments:'
# bar has aliases (usage should never show alias name)
out, err = run_cmd(subcommand_app, 'help base bar')
assert out[0].startswith('Usage: base bar')
assert out[1] == ''
assert out[2] == 'positional arguments:'
out, err = run_cmd(subcommand_app, 'help base bar_1')
assert out[0].startswith('Usage: base bar')
assert out[1] == ''
assert out[2] == 'positional arguments:'
out, err = run_cmd(subcommand_app, 'help base bar_2')
assert out[0].startswith('Usage: base bar')
assert out[1] == ''
assert out[2] == 'positional arguments:'
# helpless has aliases and no help text (usage should never show alias name)
out, err = run_cmd(subcommand_app, 'help base helpless')
assert out[0].startswith('Usage: base helpless')
assert out[1] == ''
assert out[2] == 'positional arguments:'
out, err = run_cmd(subcommand_app, 'help base helpless_1')
assert out[0].startswith('Usage: base helpless')
assert out[1] == ''
assert out[2] == 'positional arguments:'
out, err = run_cmd(subcommand_app, 'help base helpless_2')
assert out[0].startswith('Usage: base helpless')
assert out[1] == ''
assert out[2] == 'positional arguments:'
def test_subcommand_invalid_help(subcommand_app):
out, err = run_cmd(subcommand_app, 'help base baz')
assert out[0].startswith('Usage: base')
def test_add_another_subcommand(subcommand_app):
"""
This tests makes sure _set_parser_prog() sets _prog_prefix on every _SubParsersAction so that all future calls
to add_parser() write the correct prog value to the parser being added.
"""
new_parser = subcommand_app.base_subparsers.add_parser('new_sub', help="stuff")
assert new_parser.prog == "base new_sub"
def test_subcmd_decorator(subcommand_app):
# Test subcommand that has help option
out, err = run_cmd(subcommand_app, 'test_subcmd_decorator subcmd')
assert out[0].startswith('Namespace(')
out, err = run_cmd(subcommand_app, 'help test_subcmd_decorator subcmd')
assert out[0] == 'Usage: test_subcmd_decorator subcmd [-h]'
out, err = run_cmd(subcommand_app, 'test_subcmd_decorator subcmd -h')
assert out[0] == 'Usage: test_subcmd_decorator subcmd [-h]'
# Test subcommand that has no help option
out, err = run_cmd(subcommand_app, 'test_subcmd_decorator helpless_subcmd')
assert "'subcommand': 'helpless_subcmd'" in out[0]
out, err = run_cmd(subcommand_app, 'help test_subcmd_decorator helpless_subcmd')
assert out[0] == 'Usage: test_subcmd_decorator helpless_subcmd'
assert not err
out, err = run_cmd(subcommand_app, 'test_subcmd_decorator helpless_subcmd -h')
assert not out
assert err[0] == 'Usage: test_subcmd_decorator [-h] SUBCOMMAND ...'
assert err[1] == 'Error: unrecognized arguments: -h'
def test_unittest_mock():
from unittest import (
mock,
)
from cmd2 import (
CommandSetRegistrationError,
)
with mock.patch.object(ArgparseApp, 'namespace_provider'):
with pytest.raises(CommandSetRegistrationError):
app = ArgparseApp()
with mock.patch.object(ArgparseApp, 'namespace_provider', spec=True):
app = ArgparseApp()
with mock.patch.object(ArgparseApp, 'namespace_provider', spec_set=True):
app = ArgparseApp()
with mock.patch.object(ArgparseApp, 'namespace_provider', autospec=True):
app = ArgparseApp()
def test_pytest_mock_invalid(mocker):
from cmd2 import (
CommandSetRegistrationError,
)
mocker.patch.object(ArgparseApp, 'namespace_provider')
with pytest.raises(CommandSetRegistrationError):
app = ArgparseApp()
@pytest.mark.parametrize(
'spec_param',
[
{'spec': True},
{'spec_set': True},
{'autospec': True},
],
)
def test_pytest_mock_valid(mocker, spec_param):
mocker.patch.object(ArgparseApp, 'namespace_provider', **spec_param)
app = ArgparseApp()
|
the-stack_0_19586
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DeimOS Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deimosd with different proxy configuration.
Test plan:
- Start deimosd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on deimosd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create deimosds that connect to them
- Manipulate the deimosds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import DeimOSTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(DeimOSTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: deimosd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: deimosd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("deimosostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"deimosostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
the-stack_0_19588
|
import sys
import typer
from typer.core import TyperCommand
from click.parser import OptionParser
from mltk import cli
class _VariableArgumentOptionParser(OptionParser):
def parse_args(self, args):
if len(args) >= 2:
self.ctx.meta['vargs'] = args[1:]
return super().parse_args(args[:1])
else:
return super().parse_args(args)
class _VariableArgumentParsingCommand(TyperCommand):
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = _VariableArgumentOptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
@cli.root_cli.command("custom", cls=_VariableArgumentParsingCommand)
def custom_model_command(
ctx: typer.Context,
model: str = typer.Argument(...,
help='Name of MLTK model OR path to model specification python script',
metavar='<model>'
)
):
"""Custom Model Operations
This allows for running custom-defined model commands.
The model operations are defined in the model specification file.
----------
Examples
----------
\b
# Run the 'visualize' custom command provided by the
# siamese_contrastive example model
mltk custom siamese_contrastive visualize
"""
# Import all required packages here instead of at top
# to help improve the CLI's responsiveness
from mltk.core import load_mltk_model
# Find the MLTK Model file
try:
mltk_model = load_mltk_model(
model,
print_not_found_err=True
)
except Exception as e:
cli.handle_exception('Failed to load model', e)
if len(mltk_model.cli.registered_commands) == 0:
cli.abort(msg=f'Model {mltk_model.name} does not define any custom commands')
# This works around an issue with mltk_cli.py
# modules that only have one command
# It simply adds a hidden dummy command to the root CLI
@mltk_model.cli.command('_mltk_workaround', hidden=True)
def _mltk_workaround():
pass
try:
orig_argv = sys.argv
sys.argv = ['mltk'] + ctx.meta['vargs']
mltk_model.cli(prog_name=f'mltk custom {mltk_model.name}')
except Exception as e:
cli.handle_exception(f'Model {mltk_model.name} custom command failed', e)
finally:
sys.argv = orig_argv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.