filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_14563
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2016, Linaro Limited
import struct
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=False, dest='inf',
default='../out/arm/core/tee.bin',
help='The input tee.bin')
return parser.parse_args()
def main():
args = get_args()
with open(args.inf, "rb") as f:
data = f.read(4)
magic = struct.unpack('<I', data)[0]
print("Magic: \t\t0x{:08x}".format(magic))
data = f.read(1)
version = struct.unpack('<B', data)[0]
print("Version: \t0x{:02x}".format(version))
data = f.read(1)
arch_id = struct.unpack('<B', data)[0]
print("ArchID: \t0x{:02x}".format(arch_id))
data = f.read(2)
flags = struct.unpack('<H', data)[0]
print("Arch Flags: \t0x{:04x}".format(arch_id))
data = f.read(4)
init_size = struct.unpack('<I', data)[0]
print("Init size: \t0x{:04x}".format(init_size))
data = f.read(4)
laddr_h = struct.unpack('<I', data)[0]
print("Load addr high:\t0x{:04x}".format(laddr_h))
data = f.read(4)
laddr_l = struct.unpack('<I', data)[0]
print("Load addr low: \t0x{:04x}".format(laddr_l))
data = f.read(4)
mem_usage = struct.unpack('<I', data)[0]
print("Mem usage: \t0x{:04x}".format(mem_usage))
data = f.read(4)
pgd_size = struct.unpack('<I', data)[0]
print("Pages size: \t0x{:04x}".format(pgd_size))
if __name__ == "__main__":
main()
|
the-stack_106_14564
|
"""Contains the drivers and interface code for pinball machines which use the Multimorphic R-ROC hardware controllers.
This code can be used with P-ROC driver boards, or with Stern SAM, Stern
Whitestar, Williams WPC, or Williams WPC95 driver boards.
Much of this code is from the P-ROC drivers section of the pyprocgame project,
written by Adam Preble and Gerry Stellenberg. It was originally released under
the MIT license and is released here under the MIT License.
More info on the P-ROC hardware platform: http://pinballcontrollers.com/
Original code source on which this module was based:
https://github.com/preble/pyprocgame
"""
import logging
import asyncio
from mpf.core.platform import DmdPlatform, DriverConfig, SwitchConfig, SegmentDisplayPlatform
from mpf.platforms.interfaces.dmd_platform import DmdPlatformInterface
from mpf.platforms.interfaces.segment_display_platform_interface import SegmentDisplayPlatformInterface
from mpf.platforms.p_roc_common import PDBConfig, PROCBasePlatform
from mpf.core.utility_functions import Util
from mpf.platforms.p_roc_devices import PROCDriver
class PRocHardwarePlatform(PROCBasePlatform, DmdPlatform, SegmentDisplayPlatform):
"""Platform class for the P-ROC hardware controller.
Args:
machine: The MachineController instance.
Attributes:
machine: The MachineController instance.
"""
__slots__ = ["config", "dmd", "alpha_display", "aux_port", "_use_extended_matrix",
"_use_first_eight_direct_inputs"]
def __init__(self, machine):
"""Initialise P-ROC."""
super().__init__(machine)
self.log = logging.getLogger('P-ROC')
self.debug_log("Configuring P-ROC hardware")
# validate config for p_roc
self.config = self.machine.config_validator.validate_config("p_roc", self.machine.config['p_roc'])
self.dmd = None
self.alpha_display = None
self.connect()
self.aux_port = AuxPort(self)
self.aux_port.reset()
self._use_extended_matrix = False
self._use_first_eight_direct_inputs = False
# Because PDBs can be configured in many different ways, we need to
# traverse the YAML settings to see how many PDBs are being used.
# Then we can configure the P-ROC appropriately to use those PDBs.
# Only then can we relate the YAML coil/light #'s to P-ROC numbers for
# the collections.
if self.machine_type == self.pinproc.MachineTypePDB:
self.debug_log("Configuring P-ROC for PDBs (P-ROC driver boards)")
self.pdbconfig = PDBConfig(self.proc, self.machine.config, self.pinproc.DriverCount)
else:
self.debug_log("Configuring P-ROC for OEM driver boards")
def _get_default_subtype(self):
"""Return default subtype for P-Roc."""
return "matrix"
def __repr__(self):
"""Return string representation."""
return '<Platform.P-ROC>'
def get_info_string(self):
"""Dump infos about boards."""
infos = "Firmware Version: {} Firmware Revision: {} Hardware Board ID: {}\n".format(
self.version, self.revision, self.hardware_version)
return infos
def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict):
"""Create a P-ROC driver.
Typically drivers are coils or flashers, but for the P-ROC this is
also used for matrix-based lights.
Args:
config: Dictionary of settings for the driver.
Returns:
A reference to the PROCDriver object which is the actual object you
can use to pulse(), patter(), enable(), etc.
"""
# todo need to add Aux Bus support
# todo need to add virtual driver support for driver counts > 256
# Find the P-ROC number for each driver. For P-ROC driver boards, the
# P-ROC number is specified via the Ax-By-C format. For OEM driver
# boards configured via driver numbers, libpinproc's decode() method
# can provide the number.
if self.machine_type == self.pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_coil_number(str(number))
if proc_num == -1:
raise AssertionError("Driver {} cannot be controlled by the P-ROC. ".format(str(number)))
else:
proc_num = self.pinproc.decode(self.machine_type, str(number))
return PROCDriver(proc_num, config, self, number)
def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict):
"""Configure a P-ROC switch.
Args:
number: String number of the switch to configure.
config: SwitchConfig settings.
Returns: A configured switch object.
"""
del platform_config
try:
if number.startswith("SD") and 0 <= int(number[2:]) <= 7:
self._use_first_eight_direct_inputs = True
_, y = number.split('/', 2)
if int(y) > 7:
self._use_extended_matrix = True
except ValueError:
pass
if self._use_extended_matrix and self._use_first_eight_direct_inputs:
raise AssertionError(
"P-Roc vannot use extended matrix and the first eight direct inputs at the same "
"time. Either only use SD8 to SD31 or only use matrix X/Y with Y <= 7. Offending "
"switch: {}".format(number))
if self.machine_type == self.pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_switch_number(str(number))
if proc_num == -1:
raise AssertionError("Switch {} cannot be controlled by the P-ROC. ".format(str(number)))
else:
proc_num = self.pinproc.decode(self.machine_type, str(number))
return self._configure_switch(config, proc_num)
@asyncio.coroutine
def get_hw_switch_states(self):
"""Read in and set the initial switch state.
The P-ROC uses the following values for hw switch states:
1 - closed (debounced)
2 - open (debounced)
3 - closed (not debounced)
4 - open (not debounced)
"""
states = self.proc.switch_get_states()
for switch, state in enumerate(states):
if state == 3 or state == 1:
states[switch] = 1
else:
states[switch] = 0
return states
def configure_dmd(self):
"""Configure a hardware DMD connected to a classic P-ROC."""
self.dmd = PROCDMD(self.pinproc, self.proc, self.machine)
return self.dmd
def configure_segment_display(self, number: str) -> "SegmentDisplayPlatformInterface":
"""Configure display."""
number_int = int(number)
if 0 < number_int >= 4:
raise AssertionError("Number must be between 0 and 3 for p_roc segment display.")
if not self.alpha_display:
self.alpha_display = AuxAlphanumericDisplay(self, self.aux_port)
return PRocAlphanumericDisplay(self.alpha_display, number_int)
def tick(self):
"""Check the P-ROC for any events (switch state changes or notification that a DMD frame was updated).
Also tickles the watchdog and flushes any queued commands to the P-ROC.
"""
# Get P-ROC events (switches & DMD frames displayed)
for event in self.proc.get_events():
event_type = event['type']
event_value = event['value']
if event_type == self.pinproc.EventTypeDMDFrameDisplayed:
pass
elif event_type == self.pinproc.EventTypeSwitchClosedDebounced:
self.machine.switch_controller.process_switch_by_num(
state=1, num=event_value, platform=self)
elif event_type == self.pinproc.EventTypeSwitchOpenDebounced:
self.machine.switch_controller.process_switch_by_num(
state=0, num=event_value, platform=self)
elif event_type == self.pinproc.EventTypeSwitchClosedNondebounced:
self.machine.switch_controller.process_switch_by_num(
state=1, num=event_value, platform=self)
elif event_type == self.pinproc.EventTypeSwitchOpenNondebounced:
self.machine.switch_controller.process_switch_by_num(
state=0, num=event_value, platform=self)
else:
self.log.warning("Received unrecognized event from the P-ROC. "
"Type: %s, Value: %s", event_type, event_value)
self.proc.watchdog_tickle()
self.proc.flush()
class PROCDMD(DmdPlatformInterface):
"""Parent class for a physical DMD attached to a P-ROC.
Args:
proc: Reference to the MachineController's proc attribute.
machine: Reference to the MachineController
Attributes:
dmd: Reference to the P-ROC's DMD buffer.
"""
__slots__ = ["proc", "machine", "dmd"]
def __init__(self, pinproc, proc, machine):
"""Set up DMD."""
self.proc = proc
self.machine = machine
# size is hardcoded here since 128x32 is all the P-ROC hw supports
self.dmd = pinproc.DMDBuffer(128, 32)
# dmd_timing defaults should be 250, 400, 180, 800
if self.machine.config['p_roc']['dmd_timing_cycles']:
dmd_timing = Util.string_to_list(
self.machine.config['p_roc']['dmd_timing_cycles'])
self.proc.dmd_update_config(high_cycles=dmd_timing)
def set_brightness(self, brightness: float):
"""Set brightness."""
# currently not supported. can be implemented using dmd_timing_cycles
assert brightness == 1.0
def update(self, data):
"""Update the DMD with a new frame.
Args:
data: A 4096-byte raw string.
"""
if len(data) == 4096:
self.dmd.set_data(data)
self.proc.dmd_draw(self.dmd)
else:
self.machine.log.warning("Received DMD frame of length %s instead"
"of 4096. Discarding...", len(data))
class AuxPort(object):
"""Aux port on the P-Roc."""
__slots__ = ["platform", "_commands"]
def __init__(self, platform):
"""Initialise aux port."""
self.platform = platform
self._commands = []
def reset(self):
"""Reset aux port."""
commands = [self.platform.pinproc.aux_command_disable()]
for _ in range(1, 255):
commands += [self.platform.pinproc.aux_command_jump(0)]
self.platform.proc.aux_send_commands(0, commands)
def reserve_index(self):
"""Return index of next free command slot and reserve it."""
self._commands += [[]]
return len(self._commands) - 1
def update(self, index, commands):
"""Update command slot with command."""
self._commands[index] = commands
self._write_commands()
def _write_commands(self):
"""Write commands to hardware."""
# disable program
commands = [self.platform.pinproc.aux_command_disable()]
# build command list
for command_set in self._commands:
commands += command_set
self.platform.proc.aux_send_commands(0, commands)
# jump from slot 0 to slot 1. overwrites the disable
self.platform.proc.aux_send_commands(0, [self.platform.pinproc.aux_command_jump(1)])
class PRocAlphanumericDisplay(SegmentDisplayPlatformInterface):
"""Since AuxAlphanumericDisplay updates all four displays wrap it and set the correct offset."""
__slots__ = ["display"]
def __init__(self, display, index):
"""Initialise alpha numeric display."""
super().__init__(index)
self.display = display
def set_text(self, text: str, flashing: bool):
"""Set digits to display."""
# TODO: handle flashing using delay
self.display.set_text(text, self.number)
class AuxAlphanumericDisplay(object):
"""An alpha numeric display connected to the aux port on the P-Roc."""
# Start at ASCII table offset 32: ' '
asciiSegments = [0x0000, # ' '
0x016a, # '!' Random Debris Character 1
0x3014, # '"' Random Debris Character 2
0x5d80, # '#' Random Debris Character 3
0x00a4, # '$' Random Debris Character 4
0x3270, # '%' Random Debris Character 5
0x4640, # '&' Random Debris Character 6
0x0200, # '''
0x1400, # '('
0x4100, # ')'
0x7f40, # '*'
0x2a40, # '+'
0x8080, # ','
0x0840, # '-'
0x8000, # '.'
0x4400, # '/'
0x003f, # '0'
0x0006, # '1'
0x085b, # '2'
0x084f, # '3'
0x0866, # '4'
0x086d, # '5'
0x087d, # '6'
0x0007, # '7'
0x087f, # '8'
0x086f, # '9'
0x0821, # ':' Random Debris Character 7
0x1004, # ';' Random Debris Character 8
0x1c00, # '<' Left Arrow
0x1386, # '=' Random Debris Character 9
0x4140, # '>' Right Arrow
0x0045, # '?' Random Debris Character 10
0x4820, # '@' Random Debris Character 11
0x0877, # 'A'
0x2a4f, # 'B'
0x0039, # 'C'
0x220f, # 'D'
0x0879, # 'E'
0x0871, # 'F'
0x083d, # 'G'
0x0876, # 'H'
0x2209, # 'I'
0x001e, # 'J'
0x1470, # 'K'
0x0038, # 'L'
0x0536, # 'M'
0x1136, # 'N'
0x003f, # 'O'
0x0873, # 'P'
0x103f, # 'Q'
0x1873, # 'R'
0x086d, # 'S'
0x2201, # 'T'
0x003e, # 'U'
0x4430, # 'V'
0x5036, # 'W'
0x5500, # 'X'
0x2500, # 'Y'
0x4409, # 'Z'
0x6004, # '[' Random Debris Character 12
0x6411, # '\' Random Debris Character 13
0x780a, # ']' Random Debris Character 14
0x093a, # '^' Random Debris Character 15
0x0008, # '_'
0x2220, # '`' Random Debris Character 16
0x0c56, # 'a' Broken Letter a
0x684e, # 'b' Broken Letter b
0x081c, # 'c' Broken Letter c
0x380e, # 'd' Broken Letter d
0x1178, # 'e' Broken Letter e
0x4831, # 'f' Broken Letter f
0x083d, # 'g' Broken Letter g NOT CREATED YET
0x0854, # 'h' Broken Letter h
0x2209, # 'i' Broken Letter i NOT CREATED YET
0x001e, # 'j' Broken Letter j NOT CREATED YET
0x1070, # 'k' Broken Letter k
0x0038, # 'l' Broken Letter l NOT CREATED YET
0x0536, # 'm' Broken Letter m NOT CREATED YET
0x1136, # 'n' Broken Letter n NOT CREATED YET
0x085c, # 'o' Broken Letter o
0x0873, # 'p' Broken Letter p NOT CREATED YET
0x103f, # 'q' Broken Letter q NOT CREATED YET
0x1c72, # 'r' Broken Letter r
0x116c, # 's' Broken Letter s
0x2120, # 't' Broken Letter t
0x003e, # 'u' Broken Letter u NOT CREATED YET
0x4430, # 'v' Broken Letter v NOT CREATED YET
0x5036, # 'w' Broken Letter w NOT CREATED YET
0x5500, # 'x' Broken Letter x NOT CREATED YET
0x2500, # 'y' Broken Letter y NOT CREATED YET
0x4409 # 'z' Broken Letter z NOT CREATED YET
]
strobes = [8, 9, 10, 11, 12]
full_intensity_delay = 350 # microseconds
inter_char_delay = 40 # microseconds
__slots__ = ["platform", "aux_controller", "aux_index", "texts"]
def __init__(self, platform, aux_controller):
"""Initialise the alphanumeric display."""
self.platform = platform
self.aux_controller = aux_controller
self.aux_index = aux_controller.reserve_index()
self.texts = [" "] * 4
def set_text(self, text, index):
"""Set text for display."""
if len(text) != 8:
text = text[0:8].rjust(8, ' ')
self.texts[index] = text
# build expected format
input_strings = [self.texts[0] + self.texts[1], self.texts[2] + self.texts[3]]
self.display(input_strings)
def display(self, input_strings, intensities=None):
"""Set display text."""
strings = []
if intensities is None:
intensities = [[1] * 16] * 2
# Make sure strings are at least 16 chars.
# Then convert each string to a list of chars.
for j in range(0, 2):
input_strings[j] = input_strings[j]
if len(input_strings[j]) < 16:
input_strings[j] += ' ' * (16 - len(input_strings[j]))
strings += [list(input_strings[j])]
# Make sure insensities are 1 or less
for i in range(0, 16):
for j in range(0, 2):
if intensities[j][i] > 1:
intensities[j][i] = 1
commands = []
char_on_time = []
char_off_time = []
# Initialize a 2x16 array for segments value
segs = [[0] * 16 for _ in range(2)]
# Loop through each character
for i in range(0, 16):
# Activate the character position (this goes to both displayas)
commands += [self.platform.pinproc.aux_command_output_custom(i, 0, self.strobes[0], False, 0)]
for j in range(0, 2):
segs[j][i] = self.asciiSegments[ord(strings[j][i]) - 32]
# Check for commas or periods.
# If found, squeeze comma into previous character.
# No point checking the last character (plus, this avoids an
# indexing error by not checking i+1 on the 16th char.
if i < 15:
comma_dot = strings[j][i + 1]
if comma_dot == "," or comma_dot == ".":
segs[j][i] |= self.asciiSegments[ord(comma_dot) - 32]
strings[j].remove(comma_dot)
# Append a space to ensure there are enough chars.
strings[j].append(' ')
# character is 16 bits long, characters are loaded in 2 lots of 8 bits,
# for each display (4 enable lines total)
commands += [self.platform.pinproc.aux_command_output_custom(
segs[j][i] & 0xff, 0,
self.strobes[j * 2 + 1], False, 0)] # first 8 bits of characater data
commands += [self.platform.pinproc.aux_command_output_custom(
(segs[j][i] >> 8) & 0xff, 0,
self.strobes[j * 2 + 2], False, 0)] # second 8 bits of characater data
char_on_time += [intensities[j][i] * self.full_intensity_delay]
char_off_time += [self.inter_char_delay + (self.full_intensity_delay - char_on_time[j])]
if char_on_time[0] < char_on_time[1]:
first = 0
second = 1
else:
first = 1
second = 0
# Determine amount of time to leave the other char on after the
# first is off.
between_delay = char_on_time[second] - char_on_time[first]
# Not sure if the hardware will like a delay of 0
# Use 2 to be extra safe. 2 microseconds won't affect display.
if between_delay == 0:
between_delay = 2
# Delay until it's time to turn off the character with the lowest intensity
commands += [self.platform.pinproc.aux_command_delay(char_on_time[first])]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[first * 2 + 1], False, 0)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[first * 2 + 2], False, 0)]
# Delay until it's time to turn off the other character.
commands += [self.platform.pinproc.aux_command_delay(between_delay)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[second * 2 + 1], False, 0)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[second * 2 + 2], False, 0)]
# Delay for the inter-digit delay.
commands += [self.platform.pinproc.aux_command_delay(char_off_time[second])]
# Send the new list of commands to the Aux port controller.
self.aux_controller.update(self.aux_index, commands)
|
the-stack_106_14565
|
import logging
from django.db.models import signals
import purpleserver.core.serializers as serializers
import purpleserver.manager.models as models
import purpleserver.events.tasks as tasks
from purpleserver.events.serializers import EventTypes
logger = logging.getLogger(__name__)
def register_signals():
signals.post_save.connect(shipment_updated, sender=models.Shipment)
signals.post_delete.connect(shipment_cancelled, sender=models.Shipment)
signals.post_save.connect(tracker_updated, sender=models.Tracking)
logger.info("webhooks signals registered...")
def shipment_updated(sender, instance, created, raw, using, update_fields, *args, **kwargs):
"""Shipment related events:
- shipment purchased (label purchased)
- shipment fulfilled (shipped)
"""
changes = update_fields or {}
if created or 'status' not in changes:
return
elif instance.status == serializers.ShipmentStatus.purchased.value:
event = EventTypes.shipment_purchased.value
elif instance.status == serializers.ShipmentStatus.transit.value:
event = EventTypes.shipment_fulfilled.value
elif instance.status == serializers.ShipmentStatus.cancelled.value:
event = EventTypes.shipment_cancelled.value
else:
return
data = serializers.Shipment(instance).data
event_at = instance.updated_at
test_mode = instance.test_mode
tasks.notify_webhooks(event, data, event_at, test_mode)
def shipment_cancelled(sender, instance, *args, **kwargs):
"""Shipment related events:
- shipment cancelled/deleted (label voided)
"""
event = EventTypes.shipment_cancelled.value
data = serializers.Shipment(instance)
event_at = instance.updated_at
test_mode = instance.test_mode
tasks.notify_webhooks(event, data, event_at, test_mode)
def tracker_updated(sender, instance, created, raw, using, update_fields, *args, **kwargs):
"""Tracking related events:
- tracker created (pending)
- tracker status changed (in-transit, delivered or blocked)
"""
if created:
event = EventTypes.tracker_created.value
elif any(field in (update_fields or []) for field in ['status']):
event = EventTypes.tracker_updated.value
else:
return
data = serializers.TrackingStatus(instance).data
event_at = instance.updated_at
test_mode = instance.test_mode
tasks.notify_webhooks(event, data, event_at, test_mode)
|
the-stack_106_14567
|
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestCosineEmbeddingCriterion(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
def test_cosine_embedding_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
S = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"CosineEmbeddingCriterion", ["S", "Y"], ["output"],
margin=margin)
def ref_cec(S, Y):
result = (1 - S) * (Y == 1) + np.maximum(S - margin, 0) * (Y == -1)
return (result, )
# This checks the op implementation against a reference function in
# python.
self.assertReferenceChecks(gc, op, [S, Y], ref_cec)
# This checks the op implementation over multiple device options (e.g.
# CPU and CUDA). [0] means that the 0-th output is checked.
self.assertDeviceChecks(dc, op, [S, Y], [0])
# Now, since this operator's output has a "kink" around the margin
# value, we move the S vector away from the margin a little bit. This
# is a standard trick to avoid gradient check to fail on subgradient
# points.
S[np.abs(S - margin) < 0.1] += 0.2
# This checks the operator's gradient. the first 0 means that we are
# checking the gradient of the first input (S), and the second [0] means
# that the gradient check should initiate from the 0-th output.
self.assertGradientChecks(gc, op, [S, Y], 0, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
the-stack_106_14568
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import tsdb
from sentry.api.base import DocSection, EnvironmentMixin, StatsMixin
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import Environment, Project, Team
from sentry.utils.apidocs import attach_scenarios, scenario
@scenario('RetrieveEventCountsOrganization')
def retrieve_event_counts_organization(runner):
runner.request(method='GET', path='/organizations/%s/stats/' % runner.org.slug)
class OrganizationStatsEndpoint(OrganizationEndpoint, EnvironmentMixin, StatsMixin):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([retrieve_event_counts_organization])
def get(self, request, organization):
"""
Retrieve Event Counts for an Organization
`````````````````````````````````````````
.. caution::
This endpoint may change in the future without notice.
Return a set of points representing a normalized timestamp and the
number of events seen in the period.
:pparam string organization_slug: the slug of the organization for
which the stats should be
retrieved.
:qparam string stat: the name of the stat to query (``"received"``,
``"rejected"``, ``"blacklisted"``)
:qparam timestamp since: a timestamp to set the start of the query
in seconds since UNIX epoch.
:qparam timestamp until: a timestamp to set the end of the query
in seconds since UNIX epoch.
:qparam string resolution: an explicit resolution to search
for (eg: ``10s``). This should not be
used unless you are familiar with Sentry's
internals as it's restricted to pre-defined
values.
:auth: required
"""
group = request.GET.get('group', 'organization')
if group == 'organization':
keys = [organization.id]
elif group == 'project':
team_list = Team.objects.get_for_user(
organization=organization,
user=request.user,
)
project_ids = request.GET.getlist('projectID')
if not project_ids:
project_list = []
for team in team_list:
project_list.extend(Project.objects.get_for_user(
team=team,
user=request.user,
))
else:
project_list = Project.objects.filter(
teams__in=team_list,
id__in=project_ids,
)
keys = list({p.id for p in project_list})
else:
raise ValueError('Invalid group: %s' % group)
if 'id' in request.GET:
id_filter_set = frozenset(map(int, request.GET.getlist('id')))
keys = [k for k in keys if k in id_filter_set]
if not keys:
return Response([])
stat_model = None
stat = request.GET.get('stat', 'received')
query_kwargs = {}
if stat == 'received':
if group == 'project':
stat_model = tsdb.models.project_total_received
else:
stat_model = tsdb.models.organization_total_received
elif stat == 'rejected':
if group == 'project':
stat_model = tsdb.models.project_total_rejected
else:
stat_model = tsdb.models.organization_total_rejected
elif stat == 'blacklisted':
if group == 'project':
stat_model = tsdb.models.project_total_blacklisted
else:
stat_model = tsdb.models.organization_total_blacklisted
elif stat == 'generated':
if group == 'project':
stat_model = tsdb.models.project
try:
query_kwargs['environment_id'] = self._get_environment_id_from_request(
request,
organization.id,
)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
if stat_model is None:
raise ValueError('Invalid group: %s, stat: %s' % (group, stat))
data = tsdb.get_range(model=stat_model, keys=keys,
**self._parse_args(request, **query_kwargs))
if group == 'organization':
data = data[organization.id]
return Response(data)
|
the-stack_106_14571
|
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import pytest
import numpy as np
from bingo.chromosomes.multiple_values import SinglePointCrossover, \
SinglePointMutation, MultipleValueChromosomeGenerator
from bingo.evolutionary_optimizers.fitness_predictor_island \
import FitnessPredictorIsland as FPI
from bingo.evolutionary_optimizers \
import fitness_predictor_island as fpi_module
from bingo.evolutionary_algorithms.mu_plus_lambda import MuPlusLambda
from bingo.selection.tournament import Tournament
from bingo.evaluation.evaluation import Evaluation
from bingo.evaluation.fitness_function import FitnessFunction
from bingo.stats.hall_of_fame import HallOfFame
MAIN_POPULATION_SIZE = 40
PREDICTOR_POPULATION_SIZE = 4
TRAINER_POPULATION_SIZE = 4
SUBSET_TRAINING_DATA_SIZE = 2
FULL_TRAINING_DATA_SIZE = 20
class DistanceToAverage(FitnessFunction):
def __call__(self, individual):
self.eval_count += 1
avg_data = np.mean(self.training_data)
return np.linalg.norm(individual.values - avg_data)
@pytest.fixture
def full_training_data():
return np.linspace(0.1, 1, FULL_TRAINING_DATA_SIZE)
@pytest.fixture
def ev_alg(full_training_data):
crossover = SinglePointCrossover()
mutation = SinglePointMutation(np.random.random)
selection = Tournament(2)
fitness = DistanceToAverage(full_training_data)
evaluator = Evaluation(fitness)
return MuPlusLambda(evaluator, selection, crossover, mutation,
0., 1.0, MAIN_POPULATION_SIZE)
@pytest.fixture
def generator():
return MultipleValueChromosomeGenerator(np.random.random, 10)
@pytest.fixture
def fitness_predictor_island(ev_alg, generator):
island = FPI(ev_alg, generator, MAIN_POPULATION_SIZE,
predictor_population_size=PREDICTOR_POPULATION_SIZE,
trainer_population_size=TRAINER_POPULATION_SIZE,
predictor_size_ratio=SUBSET_TRAINING_DATA_SIZE/FULL_TRAINING_DATA_SIZE,
predictor_computation_ratio=0.4,
trainer_update_frequency=4,
predictor_update_frequency=5)
island._predictor_island._ea.variation._mutation_probability = 1.0
return island
@pytest.fixture
def fp_island_and_hof(ev_alg, generator):
hof = HallOfFame(5)
fp_island = FPI(ev_alg, generator, MAIN_POPULATION_SIZE,
predictor_population_size=PREDICTOR_POPULATION_SIZE,
trainer_population_size=TRAINER_POPULATION_SIZE,
predictor_size_ratio=SUBSET_TRAINING_DATA_SIZE/FULL_TRAINING_DATA_SIZE,
predictor_computation_ratio=0.4,
trainer_update_frequency=4,
predictor_update_frequency=5,
hall_of_fame=hof)
fp_island._predictor_island._ea.variation._mutation_probability = 1.0
return fp_island, hof
def test_best_fitness_is_true_fitness(fitness_predictor_island,
full_training_data):
true_fitness_function = DistanceToAverage(full_training_data)
best_individual = fitness_predictor_island.get_best_individual()
best_fitness = fitness_predictor_island.get_best_fitness()
expected_best_fitness = true_fitness_function(best_individual)
assert best_fitness == expected_best_fitness
def test_predictor_compute_ratios(fitness_predictor_island):
# init
point_evals_predictor = FULL_TRAINING_DATA_SIZE*TRAINER_POPULATION_SIZE
point_evals_predictor += 2 * point_evals_per_predictor_step()
point_evals_main = 0
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main step
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += 2 * point_evals_per_main_step()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + predictor
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_predictor += point_evals_per_predictor_step()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + 2 predictor
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_predictor += 2 * point_evals_per_predictor_step()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + predictor + trainer update
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_predictor += point_evals_per_predictor_step()
point_evals_predictor += point_evals_per_trainer_update()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + predictor update
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_main += point_evals_per_predictor_update()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
def test_fitness_predictor_island_ages(fitness_predictor_island):
predictor_age = 1
main_age = 0
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
predictor_age += 1
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
predictor_age += 2
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
predictor_age += 1
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
def test_nan_on_predicted_variance_of_trainer(mocker,
fitness_predictor_island):
mocker.patch('bingo.evolutionary_optimizers.'
'fitness_predictor_island.np.var')
fpi_module.np.var.side_effect = OverflowError
island = fitness_predictor_island
trainer = island.population[0]
variance = island._calculate_predictor_variance_of(trainer)
assert np.isnan(variance)
def test_hof_gets_filled(fp_island_and_hof):
fp_island, hof = fp_island_and_hof
fp_island.evolve(1)
assert len(hof) == 5
def test_hof_has_true_fitness(fp_island_and_hof, full_training_data):
fp_island, hof = fp_island_and_hof
true_fitness_function = DistanceToAverage(full_training_data)
fp_island.evolve(1)
for indv in hof:
true_fitness = true_fitness_function(indv)
assert indv.fitness == pytest.approx(true_fitness)
def test_temp_hof_is_cleared_with_predictor_update(fp_island_and_hof, mocker):
fp_island, hof = fp_island_and_hof
mocker.spy(fp_island._hof_w_predicted_fitness, 'clear')
fp_island.evolve(9)
assert fp_island._hof_w_predicted_fitness.clear.call_count == 1
def assert_expected_compute_ratio(fitness_predictor_island, point_evals_main,
point_evals_predictor):
current_ratio = \
fitness_predictor_island._get_predictor_computation_ratio()
np.testing.assert_almost_equal(current_ratio,
point_evals_predictor /
(point_evals_predictor + point_evals_main))
def point_evals_per_predictor_step():
return SUBSET_TRAINING_DATA_SIZE * PREDICTOR_POPULATION_SIZE \
* TRAINER_POPULATION_SIZE
def point_evals_per_main_step():
return SUBSET_TRAINING_DATA_SIZE * MAIN_POPULATION_SIZE
def point_evals_per_trainer_update():
return SUBSET_TRAINING_DATA_SIZE * MAIN_POPULATION_SIZE * \
PREDICTOR_POPULATION_SIZE + FULL_TRAINING_DATA_SIZE + \
point_evals_per_predictor_step()
def point_evals_per_predictor_update():
return point_evals_per_main_step()
|
the-stack_106_14572
|
from django.http import HttpResponse
from django.test import RequestFactory
from django.test.utils import override_settings
from kitsune.sumo.tests import TestCase
from kitsune.wiki.decorators import check_simple_wiki_locale
rf = RequestFactory()
@override_settings(SIMPLE_WIKI_LANGUAGES=["es"])
class SimpleWikiDecoratorTests(TestCase):
def test_faq_locale_redirects(self):
@check_simple_wiki_locale
def temp(request):
return HttpResponse("OK")
req = rf.get("/es/products/firefox")
req.LANGUAGE_CODE = "es"
res = temp(req)
self.assertEqual(302, res.status_code)
self.assertEqual("/kb/frequently-asked-questions", res["location"])
def test_non_faq_locale_doesnt_redirect(self):
@check_simple_wiki_locale
def temp(request):
return HttpResponse("OK")
req = rf.get("/de/products/firefox")
req.LANGUAGE_CODE = "de"
res = temp(req)
self.assertEqual(200, res.status_code)
|
the-stack_106_14573
|
from src.full_node.full_node import FullNode
from typing import Callable, List, Optional, Dict
from aiohttp import web
from src.types.header import Header
from src.types.full_block import FullBlock
from src.util.ints import uint32, uint64, uint128
from src.types.sized_bytes import bytes32
from src.util.byte_types import hexstr_to_bytes
from src.consensus.pot_iterations import calculate_min_iters_from_iterations
from src.util.ws_message import create_payload
class FullNodeRpcApi:
def __init__(self, full_node: FullNode):
self.service = full_node
self.service_name = "chia_full_node"
self.cached_blockchain_state: Optional[Dict] = None
def get_routes(self) -> Dict[str, Callable]:
return {
"/get_blockchain_state": self.get_blockchain_state,
"/get_block": self.get_block,
"/get_header_by_height": self.get_header_by_height,
"/get_header": self.get_header,
"/get_unfinished_block_headers": self.get_unfinished_block_headers,
"/get_network_space": self.get_network_space,
"/get_unspent_coins": self.get_unspent_coins,
"/get_heaviest_block_seen": self.get_heaviest_block_seen,
}
async def _state_changed(self, change: str) -> List[str]:
payloads = []
if change == "block":
data = await self.get_latest_block_headers({})
assert data is not None
payloads.append(
create_payload(
"get_latest_block_headers", data, self.service_name, "wallet_ui"
)
)
data = await self.get_blockchain_state({})
assert data is not None
payloads.append(
create_payload(
"get_blockchain_state", data, self.service_name, "wallet_ui"
)
)
return payloads
return []
async def get_blockchain_state(self, request: Dict):
"""
Returns a summary of the node's view of the blockchain.
"""
tips: List[Header] = self.service.blockchain.get_current_tips()
lca: Header = self.service.blockchain.lca_block
sync_mode: bool = self.service.sync_store.get_sync_mode()
difficulty: uint64 = self.service.blockchain.get_next_difficulty(lca)
lca_block = await self.service.block_store.get_block(lca.header_hash)
if lca_block is None:
return None
min_iters: uint64 = self.service.blockchain.get_next_min_iters(lca_block)
ips: uint64 = min_iters // (
self.service.constants["BLOCK_TIME_TARGET"]
/ self.service.constants["MIN_ITERS_PROPORTION"]
)
tip_hashes = []
for tip in tips:
tip_hashes.append(tip.header_hash)
if sync_mode and self.service.sync_peers_handler is not None:
sync_tip_height = len(self.service.sync_store.get_potential_hashes())
sync_progress_height = self.service.sync_peers_handler.fully_validated_up_to
else:
sync_tip_height = 0
sync_progress_height = uint32(0)
if lca.height > 1:
newer_block_hex = lca.header_hash.hex()
older_block_hex = self.service.blockchain.height_to_hash[
max(1, lca.height - 100)
].hex()
space = await self.get_network_space(
{
"newer_block_header_hash": newer_block_hex,
"older_block_header_hash": older_block_hex,
}
)
else:
space = {"space": uint128(0)}
assert space is not None
response: Dict = {
"success": True,
"blockchain_state": {
"tips": tips,
"tip_hashes": tip_hashes,
"lca": lca,
"sync": {
"sync_mode": sync_mode,
"sync_tip_height": sync_tip_height,
"sync_progress_height": sync_progress_height,
},
"difficulty": difficulty,
"ips": ips,
"min_iters": min_iters,
"space": space["space"],
},
}
self.cached_blockchain_state = dict(response["blockchain_state"])
return response
async def get_block(self, request: Dict) -> Optional[Dict]:
if "header_hash" not in request:
return None
header_hash = hexstr_to_bytes(request["header_hash"])
block: Optional[FullBlock] = await self.service.block_store.get_block(
header_hash
)
if block is None:
return None
return {"success": True, "block": block}
async def get_header_by_height(self, request: Dict) -> Optional[Dict]:
if "height" not in request:
return None
height = request["height"]
header_height = uint32(int(height))
header_hash: Optional[bytes32] = self.service.blockchain.height_to_hash.get(
header_height, None
)
if header_hash is None:
return None
header: Header = self.service.blockchain.headers[header_hash]
return {"success": True, "header": header}
async def get_header(self, request: Dict):
if "header_hash" not in request:
return None
header_hash_str = request["header_hash"]
header_hash = hexstr_to_bytes(header_hash_str)
header: Optional[Header] = self.service.blockchain.headers.get(
header_hash, None
)
return {"success": True, "header": header}
async def get_unfinished_block_headers(self, request: Dict) -> Optional[Dict]:
if "height" not in request:
return None
height = request["height"]
response_headers: List[Header] = []
for block in (
await self.service.full_node_store.get_unfinished_blocks()
).values():
if block.height == height:
response_headers.append(block.header)
return {"success": True, "headers": response_headers}
async def get_latest_block_headers(self, request: Dict) -> Optional[Dict]:
headers: Dict[bytes32, Header] = {}
tips = self.service.blockchain.tips
lca_hash = self.service.blockchain.lca_block.header_hash
heights = []
seen_lca = False
for tip in tips:
current = tip
heights.append(current.height + 1)
headers[current.header_hash] = current
i = 0
while True:
# Returns blocks up to the LCA, and at least 10 blocks from the tip
if current.header_hash == lca_hash:
seen_lca = True
if seen_lca and i > 10:
break
if current.height == 0:
break
header: Optional[Header] = self.service.blockchain.headers.get(
current.prev_header_hash, None
)
assert header is not None
headers[header.header_hash] = header
current = header
i += 1
all_unfinished = {}
for h in heights:
unfinished_dict = await self.get_unfinished_block_headers({"height": h})
assert unfinished_dict is not None
for header in unfinished_dict["headers"]:
assert header is not None
all_unfinished[header.header_hash] = header
sorted_headers = [
v
for v in sorted(
headers.values(), key=lambda item: item.height, reverse=True
)
]
sorted_unfinished = [
v
for v in sorted(
all_unfinished.values(), key=lambda item: item.height, reverse=True
)
]
finished_with_meta = []
finished_header_hashes = set()
for header in sorted_headers:
header_hash = header.header_hash
header_dict = header.to_json_dict()
header_dict["data"]["header_hash"] = header_hash
header_dict["data"]["finished"] = True
finished_with_meta.append(header_dict)
finished_header_hashes.add(header_hash)
if self.cached_blockchain_state is None:
await self.get_blockchain_state({})
assert self.cached_blockchain_state is not None
ips = self.cached_blockchain_state["ips"]
unfinished_with_meta = []
for header in sorted_unfinished:
header_hash = header.header_hash
if header_hash in finished_header_hashes:
continue
header_dict = header.to_json_dict()
header_dict["data"]["header_hash"] = header_hash
header_dict["data"]["finished"] = False
prev_header = self.service.blockchain.headers.get(header.prev_header_hash)
if prev_header is not None:
iter = header.data.total_iters - prev_header.data.total_iters
time_add = int(iter / ips)
header_dict["data"]["finish_time"] = header.data.timestamp + time_add
unfinished_with_meta.append(header_dict)
unfinished_with_meta.extend(finished_with_meta)
return {"success": True, "latest_blocks": unfinished_with_meta}
async def get_total_miniters(self, newer_block, older_block) -> Optional[uint64]:
"""
Calculates the sum of min_iters from all blocks starting from
old and up to and including new_block, but not including old_block.
"""
older_block_parent = await self.service.block_store.get_block(
older_block.prev_header_hash
)
if older_block_parent is None:
return None
older_diff = older_block.weight - older_block_parent.weight
curr_mi = calculate_min_iters_from_iterations(
older_block.proof_of_space,
older_diff,
older_block.proof_of_time.number_of_iterations,
self.service.constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"],
)
# We do not count the min iters in the old block, since it's not included in the range
total_mi: uint64 = uint64(0)
for curr_h in range(older_block.height + 1, newer_block.height + 1):
if (
curr_h % self.service.constants["DIFFICULTY_EPOCH"]
) == self.service.constants["DIFFICULTY_DELAY"]:
curr_b_header_hash = self.service.blockchain.height_to_hash.get(
uint32(int(curr_h))
)
if curr_b_header_hash is None:
return None
curr_b_block = await self.service.block_store.get_block(
curr_b_header_hash
)
if curr_b_block is None or curr_b_block.proof_of_time is None:
return None
curr_parent = await self.service.block_store.get_block(
curr_b_block.prev_header_hash
)
if curr_parent is None:
return None
curr_diff = curr_b_block.weight - curr_parent.weight
curr_mi = calculate_min_iters_from_iterations(
curr_b_block.proof_of_space,
uint64(curr_diff),
curr_b_block.proof_of_time.number_of_iterations,
self.service.constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"],
)
if curr_mi is None:
raise web.HTTPBadRequest()
total_mi = uint64(total_mi + curr_mi)
return total_mi
async def get_network_space(self, request: Dict) -> Optional[Dict]:
"""
Retrieves an estimate of total space validating the chain
between two block header hashes.
"""
if (
"newer_block_header_hash" not in request
or "older_block_header_hash" not in request
):
return None
newer_block_hex = request["newer_block_header_hash"]
older_block_hex = request["older_block_header_hash"]
if newer_block_hex == older_block_hex:
return None
newer_block_bytes = hexstr_to_bytes(newer_block_hex)
older_block_bytes = hexstr_to_bytes(older_block_hex)
newer_block = await self.service.block_store.get_block(newer_block_bytes)
if newer_block is None:
raise web.HTTPNotFound()
older_block = await self.service.block_store.get_block(older_block_bytes)
if older_block is None:
raise web.HTTPNotFound()
delta_weight = newer_block.header.data.weight - older_block.header.data.weight
delta_iters = (
newer_block.header.data.total_iters - older_block.header.data.total_iters
)
total_min_inters = await self.get_total_miniters(newer_block, older_block)
if total_min_inters is None:
raise web.HTTPNotFound()
delta_iters -= total_min_inters
weight_div_iters = delta_weight / delta_iters
tips_adjustment_constant = 0.65
network_space_constant = 2 ** 32 # 2^32
eligible_plots_filter_mult = (
2 ** self.service.constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"]
)
network_space_bytes_estimate = (
weight_div_iters
* network_space_constant
* tips_adjustment_constant
* eligible_plots_filter_mult
)
return {"success": True, "space": uint128(int(network_space_bytes_estimate))}
async def get_unspent_coins(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the unspent coins for a given puzzlehash.
"""
if "puzzle_hash" not in request:
return None
puzzle_hash = hexstr_to_bytes(request["puzzle_hash"])
header_hash = request.get("header_hash", None)
if header_hash is not None:
header_hash = bytes32(hexstr_to_bytes(header_hash))
header = self.service.blockchain.headers.get(header_hash)
else:
header = None
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hash(
puzzle_hash, header
)
return {"success": True, "coin_records": coin_records}
async def get_heaviest_block_seen(self, request: Dict) -> Optional[Dict]:
tips: List[Header] = self.service.blockchain.get_current_tips()
tip_weights = [tip.weight for tip in tips]
i = tip_weights.index(max(tip_weights))
max_tip: Header = tips[i]
if self.service.sync_store.get_sync_mode():
potential_tips = self.service.sync_store.get_potential_tips_tuples()
for _, pot_block in potential_tips:
if pot_block.weight > max_tip.weight:
max_tip = pot_block.header
return {"success": True, "tip": max_tip}
|
the-stack_106_14574
|
import sys
import os
import pickle as pkl
from subprocess import Popen, PIPE
from multiprocessing import Pool, cpu_count
import argparse
from typing import List, Tuple, Dict, Iterator
from collections import namedtuple
from tqdm import tqdm
from decomp.semantics.predpatt import PredPattCorpus
from decomp.semantics.uds import UDSCorpus
sys.path.insert(0, "/home/hltcoe/estengel/miso/")
from miso.data.dataset_readers.decomp.decomp import DecompGraph
from miso.metrics.s_metric.s_metric import S, TEST1
from miso.metrics.s_metric.repr import Triple
from miso.metrics.s_metric import utils
from miso.commands.s_score import compute_args, ComputeTup
# Desired functionality: compute UD Parses from text in parallel, concatenatate them, create predpatt corpus, convert to arbor_graph
global_cmd = "./execute_java.sh {input_path} {output_path}"
input_dir = "/exp/estengel/miso/baselines/inputs"
output_dir = "/exp/estengel/miso/baselines/output"
def uds_worker(tup):
os.chdir("/home/hltcoe/estengel/miso/baseline")
lines, line_id = tup
line_id = str(line_id)
input_path = os.path.join(input_dir, line_id)
#output_path = os.path.join(output_dir, f"{line_id}.conllu")
output_path = output_dir
with open(input_path, "w") as f1:
for line in lines:
f1.write(line.strip() + "\n")
cmd_str = global_cmd.format(input_path = input_path, output_path = output_path)
p = Popen(cmd_str.split(), stdout = PIPE, stderr = PIPE)
out, errs = p.communicate()
with open(os.path.join(output_dir, "worker_outputs", f"{line_id}.err"), "w") as f1:
f1.write(errs.decode("utf8"))
with open(os.path.join(output_dir, "worker_outputs", f"{line_id}.out"), "w") as f1:
f1.write(out.decode("utf8"))
def get_uds_lines(lines, n_cpus):
chunksize = int(len(lines) / n_cpus) + 1
print('chunksize', chunksize)
# chunk lines
chunks = []
curr_chunk = []
for i, line in enumerate(lines):
curr_chunk.append(line)
if len(curr_chunk) == chunksize:
chunks.append(curr_chunk)
curr_chunk = []
# add last chunk
if curr_chunk != []:
chunks.append(curr_chunk)
chunks = [(chunk, i) for i, chunk in enumerate(chunks)]
print(f"Making {n_cpus} workers for {len(chunks)} chunks with {len(chunks[0][0])} lines per job")
pool = Pool(n_cpus)
res = pool.map(uds_worker, chunks)
file_content = []
for i in range(len(chunks)):
with open(os.path.join(output_dir, f"{i}.conllu")) as f1:
conllu_lines = f1.read()
file_content.append((i, conllu_lines))
# sort by
file_content=sorted(file_content, key = lambda x: x[0])
#join sorted lines into one file
all_lines = "".join([x[1] for x in file_content])
return all_lines
def get_lines_and_graphs(split):
# circular, but we'll read this from the Decomp corpus
print(f"reading corpus from {split}")
corpus = UDSCorpus(split = split)
lines = []
graphs = {}
for i, (id, graph) in enumerate(corpus.items()):
# get true graph
true_arbor_graph = get_arbor_graph(graph)
# if no semantics in gold, skip everything
if true_arbor_graph is None:
continue
graphs[i] = true_arbor_graph
# get text for prediction
lines.append(graph.sentence)
return lines, graphs
def get_uds_corpus(uds_text, split):
corpus_path = os.path.join(output_dir, f"{split}.conllu")
corpus = UDSCorpus.from_conll(corpus = uds_text, name = split)
return corpus
def get_arbor_graph(pp_graph):
dg = DecompGraph(pp_graph)
__, __, arbor_graph = dg.get_list_node()
return arbor_graph
def compute_smetric(true_graphs: List[DecompGraph],
pred_graphs: List[DecompGraph],
args: namedtuple,
semantics_only: bool):
"""
compute s-score between lists of decomp graphs
"""
print(len(true_graphs), len(pred_graphs))
assert(len(true_graphs) == len(pred_graphs))
total_match_num, total_test_num, total_gold_num = 0, 0, 0
for g1, g2 in tqdm(zip(true_graphs, pred_graphs), total = len(true_graphs)):
instances1, relations1, attributes1 = DecompGraph.get_triples(g1, semantics_only)
instances1 = [Triple(x[1], x[0], x[2]) for x in instances1]
attributes1 = [Triple(x[1], x[0], x[2]) for x in attributes1]
relations1 = [Triple(x[1], x[0], x[2]) for x in relations1]
try:
instances2, relations2, attributes2 = DecompGraph.get_triples(g2, semantics_only)
except AttributeError:
# None predicted
instances2, relations2, attributes2 = [], [], []
instances2 = [Triple(x[1], x[0], x[2]) for x in instances2]
attributes2 = [Triple(x[1], x[0], x[2]) for x in attributes2]
relations2 = [Triple(x[1], x[0], x[2]) for x in relations2]
best_mapping, best_match_num, test_triple_num, gold_triple_num = S.get_best_match(
instances1, attributes1, relations1,
instances2, attributes2, relations2, args)
total_match_num += best_match_num
total_test_num += test_triple_num
total_gold_num += gold_triple_num
precision, recall, best_f_score = utils.compute_f(
total_match_num, total_test_num, total_gold_num)
return precision, recall, best_f_score
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-split", type = str, default = "dev")
parser.add_argument("--precomputed", action = "store_true")
parser.add_argument("--semantics-only", action = "store_true")
parser.add_argument("--drop-syntax", action = "store_true")
parser.add_argument("--nodes", default = 1, type = int)
parser.add_argument("--output-path", type=str)
args = parser.parse_args()
if not args.precomputed:
#n_cpus = cpu_count()
n_cpus = 8
lines, true_graphs = get_lines_and_graphs(args.input_split)
with open(os.path.join(output_dir, f"{args.input_split}.graphs"), "wb") as f1:
pkl.dump(true_graphs, f1)
uds = get_uds_lines(lines, n_cpus)
# pickle true graphs
with open(os.path.join(output_dir, f"{args.input_split}.conllu"), "w") as f1:
f1.write(uds)
else:
with open(os.path.join(output_dir, f"{args.input_split}.conllu")) as f1:
uds = f1.read()
with open(os.path.join(output_dir, f"{args.input_split}.graphs"), "rb") as f1:
true_graphs = pkl.load(f1)
corpus = get_uds_corpus(uds, args.input_split)
pred_graphs = {}
for i, e in enumerate(corpus):
pg = get_arbor_graph(corpus[e])
pred_graphs[i] = pg
true_graphs = [x[1] for x in sorted(true_graphs.items(), key = lambda x: x[0])]
pred_graphs = [x[1] for x in sorted(pred_graphs.items(), key = lambda x: x[0])]
if args.output_path is not None:
with open(args.output_path, "wb") as f1:
zipped = [x for x in zip(true_graphs, pred_graphs)]
pkl.dump(zipped, f1)
c_args = ComputeTup(**compute_args)
p, r, f1 = compute_smetric(true_graphs, pred_graphs, c_args, args.semantics_only)
print(f"precision: {p}, recall: {r}, F1: {f1}")
|
the-stack_106_14575
|
#!/usr/bin/env python3
import sys
import os
import argparse
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import math
def main():
parser = argparse.ArgumentParser(description="Evaluate predicted frequencies.")
parser.add_argument('predictions', type=str, nargs='+', help="prediction files")
parser.add_argument('--voc', dest='voc', type=str, required=True, help="comma-separated list of strains of interest")
parser.add_argument('-o,--outdir', dest='outdir', required=True)
parser.add_argument('--suffix', dest='suffix', default="", help="add suffix to output figure names")
parser.add_argument('-v,--verbose', dest='verbose', action='store_true')
parser.add_argument('-m', dest='min_ab', default=0, type=float, help="minimal abundance (any samples with true abundance below this threshold are skipped; any predictions below this threshold are considered absent)")
parser.add_argument('--no_plots', action='store_true')
parser.add_argument('--output_format', dest='output_format', default='png', help="comma-separated list of desired output formats")
parser.add_argument('--font_size', dest='font_size', default=12, type=int, help="set font size for the plots")
parser.add_argument('--conts_in_meta', action='store_true', help="Enable if contaminants are present in metadata and kallisto index")
args = parser.parse_args()
false_pos_count = 0
false_neg_count = 0
true_pos_count = 0
true_neg_count = 0
err_list = []
variant_set = set()
voc_list = args.voc.split(',')
output_formats = args.output_format.split(',')
# read predictions
for filename in args.predictions:
dir_name = filename.split('/')[-2]
dataset = filename.split('/')[-4]
voc_name = dir_name.split('_')[0]
hcov = dataset.split('_')[-2]
voc_freq = float(dir_name.split('_')[-1].lstrip('ab'))
if voc_name not in voc_list:
continue
elif voc_freq < args.min_ab:
continue
variant_set.add(voc_name)
with open(filename, 'r') as f:
variant_found = False
err_tups = []
positives = []
for line in f:
if line[0] == "#":
continue
[variant, tpm, ab, corrected_ab] = line.rstrip('\n').split('\t')
if variant not in voc_list:
continue
ab = float(ab)
if (args.conts_in_meta):
ab = ab * (100 / voc_freq)
abs_err = abs(ab - 10)
if ab < args.min_ab:
continue
positives.append(variant)
if variant == voc_name:
variant_found = True
err_tups.append((voc_name, voc_freq, abs_err, ab, hcov))
else:
false_pos_count += 1
if args.verbose:
print("False positive: {} predicted at {}% in {}".format(
variant, ab, filename))
if variant_found:
true_pos_count += 1
if len(err_tups) == 1:
err_list.append(err_tups[0])
else:
voc_name = err_tups[0][0]
voc_freq = err_tups[0][1]
ab = sum([x[3] for x in err_tups])
abs_err = abs(ab - voc_freq)
err_list.append((voc_name, voc_freq, abs_err, ab, hcov))
else:
false_neg_count += 1
if args.verbose:
print("VOC not found in {}".format(filename))
# add zero estimate to error list?
# err_list.append((voc_name, voc_freq, voc_freq, 0))
for variant in voc_list:
if variant not in positives and variant != voc_name:
# true negative
true_neg_count += 1
true_neg_count += len([x for x in voc_list if
x not in positives and x != voc_name ])
_, f, e, _, h = zip(*err_list)
unique_err_vals = list(set(e))
unique_freq_vals = list(set(f))
unique_hvoc_vals = list(set(h))
unique_hvoc_vals.remove("Other")
unique_hvoc_vals.append("Other")
# compute averages
av_err_list = []
for hcov in unique_hvoc_vals:
for freq in unique_freq_vals:
f = list(filter(lambda x: x[4] == hcov and x[1] == freq, err_list))
_, _, err, ab, _ = zip(*f)
av_err = sum(err) / len(f)
av_ab = sum(ab) / len(f)
av_err_list.append((hcov, freq, av_err, av_ab))
for hcov in unique_hvoc_vals:
f = list(filter(lambda x: x[0] == hcov, av_err_list))
_, _, err, _ = zip(*f)
pct_err = map(lambda x: x/10*100, err)
av = sum(pct_err) / len(f)
print("Average error for {}: {}%".format(hcov, av))
# compute stats
average_rel_err = sum([x[2]/x[1]*100 for x in err_list]) / len(err_list)
average_rel_err_tp = (sum([x[2]/x[1]*100 for x in err_list if x[3] > 0])
/ len(err_list))
# print("average relative error: {}%".format(average_rel_err))
print("average relative error of true positives: {}%".format(
average_rel_err_tp))
print("total # true positives: {}".format(true_pos_count))
print("total # true negatives: {}".format(true_neg_count))
print("total # false positives: {}".format(false_pos_count))
print("total # false negatives: {}".format(false_neg_count))
fpr = save_dev(false_pos_count, (false_pos_count + true_neg_count))
fnr = save_dev(false_neg_count, (false_neg_count + true_pos_count))
recall = save_dev(true_pos_count, (true_pos_count + false_neg_count))
precision = save_dev(true_pos_count, (true_pos_count + false_pos_count))
print("FPR = {}".format(fpr))
print("FNR = {}".format(fnr))
print("Precision = {}".format(precision))
print("Recall = {}\n".format(recall)) # sensitivity
if args.no_plots:
sys.exit()
# sort error tuples by voc frequency
av_err_list.sort(key = lambda x : x[1])
# err_list.sort(key = lambda x : x[1])
# variant_list = sorted(list(variant_set))
# fix color per voc
# colormap = cm.get_cmap('Accent', len(variant_list))
# colors = {voc : colormap((i)/len(variant_list))
# for i, voc in enumerate(variant_list)}
colors = {hcov : cm.tab10((i))
for i, hcov in enumerate(unique_hvoc_vals)}
plt.rcParams.update({'font.size': args.font_size}) # increase font size
plt.figure()
for hcov in unique_hvoc_vals:
freq_values = [x[1] for x in av_err_list if x[0] == hcov] # and x[2] < 10]
err_values = [x[2]/10*100 for x in av_err_list if x[0] == hcov] # and x[2] < 10]
plt.plot(freq_values, err_values, label=hcov, color=colors[hcov])
if (freq_values[0] > min(unique_freq_vals)):
plt.plot(freq_values[0], err_values[0], marker="s", color=colors[hcov], markersize=6)
plt.legend()
plt.grid(which="both", alpha=0.2)
plt.ylim(-5, 105)
plt.xlabel("Total SARS-CoV-2 frequency (%)")
plt.ylabel("Relative prediction error (%)")
# plt.gcf().set_size_inches(4, 3)
plt.tight_layout()
for format in output_formats:
plt.savefig("{}/freq_error_plot{}.{}".format(args.outdir,
args.suffix,
format))
# also plot on log scale
plt.xscale('log')
plt.tight_layout()
for format in output_formats:
plt.savefig("{}/freq_error_plot_logscale{}.{}".format(args.outdir,
args.suffix,
format))
# plot true vs estimated frequencies on a scatterplot
plt.figure()
for hcov in unique_hvoc_vals:
freq_values = [x[1] for x in av_err_list if x[0] == hcov]
est_values = [x[3] for x in av_err_list if x[0] == hcov]
plt.scatter(freq_values, est_values, label=hcov, alpha=0.7,
color=colors[hcov], s=20)
plt.xscale('log')
plt.yscale('log')
plt.xlim(0.7, 150)
plt.ylim(0.7, 150)
# plt.plot([0, 100], [0, 100], 'k-', lw=0.75)
plt.hlines(10, 0, 150, 'k', lw=0.75)
plt.legend(prop={'size': args.font_size}) #ncol=len(variants_list),
plt.grid(which="both", alpha=0.2)
plt.xlabel("Total SARS-CoV-2 frequency (%)")
plt.ylabel("Estimated VoC frequency (%)")
# # Hide the right and top spines
# ax = plt.gca()
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
plt.tight_layout()
for format in output_formats:
plt.savefig("{}/freq_scatter_loglog{}.{}".format(args.outdir,
args.suffix,
format))
return
def save_dev(a: int, b: int):
if a == 0:
return 0
else:
return a / b
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_14576
|
import json
import os
import uuid
from collections import defaultdict
from random import randint
from unittest.mock import patch
from torchtext.data.datasets_utils import _ParseSQuADQAData
from torchtext.datasets.squad1 import SQuAD1
from torchtext.datasets.squad2 import SQuAD2
from ..common.case_utils import TempDirMixin, zip_equal, get_random_unicode
from ..common.parameterized_utils import nested_params
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_json_data():
rand_string = get_random_unicode(10)
mock_json_data = {
"data": [
{
"title": rand_string,
"paragraphs": [
{
"context": rand_string,
"qas": [
{
"answers": [
{
"answer_start": randint(1, 1000),
"text": rand_string,
}
],
"question": rand_string,
"id": uuid.uuid1().hex,
},
],
}
],
}
]
}
return mock_json_data
def _get_mock_dataset(root_dir, base_dir_name):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, base_dir_name)
os.makedirs(base_dir, exist_ok=True)
if base_dir_name == SQuAD1.__name__:
file_names = ("train-v1.1.json", "dev-v1.1.json")
else:
file_names = ("train-v2.0.json", "dev-v2.0.json")
mocked_data = defaultdict(list)
for file_name in file_names:
txt_file = os.path.join(base_dir, file_name)
with open(txt_file, "w", encoding="utf-8") as f:
mock_json_data = _get_mock_json_data()
f.write(json.dumps(mock_json_data))
split = "train" if "train" in file_name else "dev"
dataset_line = next(
iter(_ParseSQuADQAData([("file_handle", mock_json_data)]))
)
mocked_data[split].append(dataset_line)
return mocked_data
class TestSQuADs(TempDirMixin, TorchtextTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.patcher = patch(
"torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True
)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
@nested_params([SQuAD1, SQuAD2], ["train", "dev"])
def test_squads(self, squad_dataset, split):
expected_samples = _get_mock_dataset(self.root_dir, squad_dataset.__name__)[
split
]
dataset = squad_dataset(root=self.root_dir, split=split)
samples = list(dataset)
for sample, expected_sample in zip_equal(samples, expected_samples):
self.assertEqual(sample, expected_sample)
@nested_params([SQuAD1, SQuAD2], ["train", "dev"])
def test_squads_split_argument(self, squad_dataset, split):
# call `_get_mock_dataset` to create mock dataset files
_ = _get_mock_dataset(self.root_dir, squad_dataset.__name__)
dataset1 = squad_dataset(root=self.root_dir, split=split)
(dataset2,) = squad_dataset(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
|
the-stack_106_14578
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import datetime
import filecmp
import hashlib
import http.client
import os
import platform
import re
import shutil
import subprocess
import sys
import textwrap
import traceback
import urllib.error
import urllib.parse
import urllib.parse
import urllib.request
import xml.etree.ElementTree as ET
import zipfile
from collections import namedtuple
import scriptutil
# This tool expects to find /lucene off the base URL. You
# must have a working gpg, tar, unzip in your path. This has been
# tested on Linux and on Cygwin under Windows 7.
cygwin = platform.system().lower().startswith('cygwin')
cygwinWindowsRoot = os.popen('cygpath -w /').read().strip().replace('\\','/') if cygwin else ''
def unshortenURL(url):
parsed = urllib.parse.urlparse(url)
if parsed[0] in ('http', 'https'):
h = http.client.HTTPConnection(parsed.netloc)
h.request('HEAD', parsed.path)
response = h.getresponse()
if int(response.status/100) == 3 and response.getheader('Location'):
return response.getheader('Location')
return url
# TODO
# - make sure jars exist inside bin release
# - make sure docs exist
reHREF = re.compile('<a href="(.*?)">(.*?)</a>')
# Set to False to avoid re-downloading the packages...
FORCE_CLEAN = True
def getHREFs(urlString):
# Deref any redirects
while True:
url = urllib.parse.urlparse(urlString)
if url.scheme == "http":
h = http.client.HTTPConnection(url.netloc)
elif url.scheme == "https":
h = http.client.HTTPSConnection(url.netloc)
else:
raise RuntimeError("Unknown protocol: %s" % url.scheme)
h.request('HEAD', url.path)
r = h.getresponse()
newLoc = r.getheader('location')
if newLoc is not None:
urlString = newLoc
else:
break
links = []
try:
html = load(urlString)
except:
print('\nFAILED to open url %s' % urlString)
traceback.print_exc()
raise
for subUrl, text in reHREF.findall(html):
fullURL = urllib.parse.urljoin(urlString, subUrl)
links.append((text, fullURL))
return links
def load(urlString):
try:
content = urllib.request.urlopen(urlString).read().decode('utf-8')
except Exception as e:
print('Retrying download of url %s after exception: %s' % (urlString, e))
content = urllib.request.urlopen(urlString).read().decode('utf-8')
return content
def noJavaPackageClasses(desc, file):
with zipfile.ZipFile(file) as z2:
for name2 in z2.namelist():
if name2.endswith('.class') and (name2.startswith('java/') or name2.startswith('javax/')):
raise RuntimeError('%s contains sheisty class "%s"' % (desc, name2))
def decodeUTF8(bytes):
return codecs.getdecoder('UTF-8')(bytes)[0]
MANIFEST_FILE_NAME = 'META-INF/MANIFEST.MF'
NOTICE_FILE_NAME = 'META-INF/NOTICE.txt'
LICENSE_FILE_NAME = 'META-INF/LICENSE.txt'
def checkJARMetaData(desc, jarFile, gitRevision, version):
with zipfile.ZipFile(jarFile, 'r') as z:
for name in (MANIFEST_FILE_NAME, NOTICE_FILE_NAME, LICENSE_FILE_NAME):
try:
# The Python docs state a KeyError is raised ... so this None
# check is just defensive:
if z.getinfo(name) is None:
raise RuntimeError('%s is missing %s' % (desc, name))
except KeyError:
raise RuntimeError('%s is missing %s' % (desc, name))
s = decodeUTF8(z.read(MANIFEST_FILE_NAME))
for verify in (
'Specification-Vendor: The Apache Software Foundation',
'Implementation-Vendor: The Apache Software Foundation',
'Specification-Title: Lucene Search Engine:',
'Implementation-Title: org.apache.lucene',
'X-Compile-Source-JDK: 11',
'X-Compile-Target-JDK: 11',
'Specification-Version: %s' % version,
'X-Build-JDK: 11.',
'Extension-Name: org.apache.lucene'):
if type(verify) is not tuple:
verify = (verify,)
for x in verify:
if s.find(x) != -1:
break
else:
if len(verify) == 1:
raise RuntimeError('%s is missing "%s" inside its META-INF/MANIFEST.MF' % (desc, verify[0]))
else:
raise RuntimeError('%s is missing one of "%s" inside its META-INF/MANIFEST.MF' % (desc, verify))
if gitRevision != 'skip':
# Make sure this matches the version and git revision we think we are releasing:
match = re.search("Implementation-Version: (.+\r\n .+)", s, re.MULTILINE)
if match:
implLine = match.group(1).replace("\r\n ", "")
verifyRevision = '%s %s' % (version, gitRevision)
if implLine.find(verifyRevision) == -1:
raise RuntimeError('%s is missing "%s" inside its META-INF/MANIFEST.MF (wrong git revision?)' % \
(desc, verifyRevision))
else:
raise RuntimeError('%s is missing Implementation-Version inside its META-INF/MANIFEST.MF' % desc)
notice = decodeUTF8(z.read(NOTICE_FILE_NAME))
lucene_license = decodeUTF8(z.read(LICENSE_FILE_NAME))
if LUCENE_LICENSE is None:
raise RuntimeError('BUG in smokeTestRelease!')
if LUCENE_NOTICE is None:
raise RuntimeError('BUG in smokeTestRelease!')
if notice != LUCENE_NOTICE:
raise RuntimeError('%s: %s contents doesn\'t match main NOTICE.txt' % \
(desc, NOTICE_FILE_NAME))
if lucene_license != LUCENE_LICENSE:
raise RuntimeError('%s: %s contents doesn\'t match main LICENSE.txt' % \
(desc, LICENSE_FILE_NAME))
def normSlashes(path):
return path.replace(os.sep, '/')
def checkAllJARs(topDir, gitRevision, version):
print(' verify JAR metadata/identity/no javax.* or java.* classes...')
for root, dirs, files in os.walk(topDir):
normRoot = normSlashes(root)
for file in files:
if file.lower().endswith('.jar'):
if normRoot.endswith('/replicator/lib') and file.startswith('javax.servlet'):
continue
fullPath = '%s/%s' % (root, file)
noJavaPackageClasses('JAR file "%s"' % fullPath, fullPath)
if file.lower().find('lucene') != -1:
checkJARMetaData('JAR file "%s"' % fullPath, fullPath, gitRevision, version)
def checkSigs(urlString, version, tmpDir, isSigned, keysFile):
print(' test basics...')
ents = getDirEntries(urlString)
artifact = None
changesURL = None
mavenURL = None
artifactURL = None
expectedSigs = []
if isSigned:
expectedSigs.append('asc')
expectedSigs.extend(['sha512'])
sigs = []
artifacts = []
for text, subURL in ents:
if text == 'KEYS':
raise RuntimeError('lucene: release dir should not contain a KEYS file - only toplevel /dist/lucene/KEYS is used')
elif text == 'maven/':
mavenURL = subURL
elif text.startswith('changes'):
if text not in ('changes/', 'changes-%s/' % version):
raise RuntimeError('lucene: found %s vs expected changes-%s/' % (text, version))
changesURL = subURL
elif artifact is None:
artifact = text
artifactURL = subURL
expected = 'lucene-%s' % version
if not artifact.startswith(expected):
raise RuntimeError('lucene: unknown artifact %s: expected prefix %s' % (text, expected))
sigs = []
elif text.startswith(artifact + '.'):
sigs.append(text[len(artifact)+1:])
else:
if sigs != expectedSigs:
raise RuntimeError('lucene: artifact %s has wrong sigs: expected %s but got %s' % (artifact, expectedSigs, sigs))
artifacts.append((artifact, artifactURL))
artifact = text
artifactURL = subURL
sigs = []
if sigs != []:
artifacts.append((artifact, artifactURL))
if sigs != expectedSigs:
raise RuntimeError('lucene: artifact %s has wrong sigs: expected %s but got %s' % (artifact, expectedSigs, sigs))
expected = ['lucene-%s-src.tgz' % version,
'lucene-%s.tgz' % version]
actual = [x[0] for x in artifacts]
if expected != actual:
raise RuntimeError('lucene: wrong artifacts: expected %s but got %s' % (expected, actual))
# Set up clean gpg world; import keys file:
gpgHomeDir = '%s/lucene.gpg' % tmpDir
if os.path.exists(gpgHomeDir):
shutil.rmtree(gpgHomeDir)
os.makedirs(gpgHomeDir, 0o700)
run('gpg --homedir %s --import %s' % (gpgHomeDir, keysFile),
'%s/lucene.gpg.import.log' % tmpDir)
if mavenURL is None:
raise RuntimeError('lucene is missing maven')
if changesURL is None:
raise RuntimeError('lucene is missing changes-%s' % version)
testChanges(version, changesURL)
for artifact, urlString in artifacts:
print(' download %s...' % artifact)
scriptutil.download(artifact, urlString, tmpDir, force_clean=FORCE_CLEAN)
verifyDigests(artifact, urlString, tmpDir)
if isSigned:
print(' verify sig')
# Test sig (this is done with a clean brand-new GPG world)
scriptutil.download(artifact + '.asc', urlString + '.asc', tmpDir, force_clean=FORCE_CLEAN)
sigFile = '%s/%s.asc' % (tmpDir, artifact)
artifactFile = '%s/%s' % (tmpDir, artifact)
logFile = '%s/lucene.%s.gpg.verify.log' % (tmpDir, artifact)
run('gpg --homedir %s --display-charset utf-8 --verify %s %s' % (gpgHomeDir, sigFile, artifactFile),
logFile)
# Forward any GPG warnings, except the expected one (since it's a clean world)
with open(logFile) as f:
print("File: %s" % logFile)
for line in f.readlines():
if line.lower().find('warning') != -1 \
and line.find('WARNING: This key is not certified with a trusted signature') == -1:
print(' GPG: %s' % line.strip())
# Test trust (this is done with the real users config)
run('gpg --import %s' % (keysFile),
'%s/lucene.gpg.trust.import.log' % tmpDir)
print(' verify trust')
logFile = '%s/lucene.%s.gpg.trust.log' % (tmpDir, artifact)
run('gpg --display-charset utf-8 --verify %s %s' % (sigFile, artifactFile), logFile)
# Forward any GPG warnings:
with open(logFile) as f:
for line in f.readlines():
if line.lower().find('warning') != -1:
print(' GPG: %s' % line.strip())
def testChanges(version, changesURLString):
print(' check changes HTML...')
changesURL = None
for text, subURL in getDirEntries(changesURLString):
if text == 'Changes.html':
changesURL = subURL
if changesURL is None:
raise RuntimeError('did not see Changes.html link from %s' % changesURLString)
s = load(changesURL)
checkChangesContent(s, version, changesURL, True)
def testChangesText(dir, version):
"Checks all CHANGES.txt under this dir."
for root, dirs, files in os.walk(dir):
# NOTE: O(N) but N should be smallish:
if 'CHANGES.txt' in files:
fullPath = '%s/CHANGES.txt' % root
#print 'CHECK %s' % fullPath
checkChangesContent(open(fullPath, encoding='UTF-8').read(), version, fullPath, False)
reChangesSectionHREF = re.compile('<a id="(.*?)".*?>(.*?)</a>', re.IGNORECASE)
reUnderbarNotDashHTML = re.compile(r'<li>(\s*(LUCENE)_\d\d\d\d+)')
reUnderbarNotDashTXT = re.compile(r'\s+((LUCENE)_\d\d\d\d+)', re.MULTILINE)
def checkChangesContent(s, version, name, isHTML):
currentVersionTuple = versionToTuple(version, name)
if isHTML and s.find('Release %s' % version) == -1:
raise RuntimeError('did not see "Release %s" in %s' % (version, name))
if isHTML:
r = reUnderbarNotDashHTML
else:
r = reUnderbarNotDashTXT
m = r.search(s)
if m is not None:
raise RuntimeError('incorrect issue (_ instead of -) in %s: %s' % (name, m.group(1)))
if s.lower().find('not yet released') != -1:
raise RuntimeError('saw "not yet released" in %s' % name)
if not isHTML:
sub = 'Lucene %s' % version
if s.find(sub) == -1:
# benchmark never seems to include release info:
if name.find('/benchmark/') == -1:
raise RuntimeError('did not see "%s" in %s' % (sub, name))
if isHTML:
# Make sure that a section only appears once under each release,
# and that each release is not greater than the current version
seenIDs = set()
seenText = set()
release = None
for id, text in reChangesSectionHREF.findall(s):
if text.lower().startswith('release '):
release = text[8:].strip()
seenText.clear()
releaseTuple = versionToTuple(release, name)
if releaseTuple > currentVersionTuple:
raise RuntimeError('Future release %s is greater than %s in %s' % (release, version, name))
if id in seenIDs:
raise RuntimeError('%s has duplicate section "%s" under release "%s"' % (name, text, release))
seenIDs.add(id)
if text in seenText:
raise RuntimeError('%s has duplicate section "%s" under release "%s"' % (name, text, release))
seenText.add(text)
reVersion = re.compile(r'(\d+)\.(\d+)(?:\.(\d+))?\s*(-alpha|-beta|final|RC\d+)?\s*(?:\[.*\])?', re.IGNORECASE)
def versionToTuple(version, name):
versionMatch = reVersion.match(version)
if versionMatch is None:
raise RuntimeError('Version %s in %s cannot be parsed' % (version, name))
versionTuple = versionMatch.groups()
while versionTuple[-1] is None or versionTuple[-1] == '':
versionTuple = versionTuple[:-1]
if versionTuple[-1].lower() == '-alpha':
versionTuple = versionTuple[:-1] + ('0',)
elif versionTuple[-1].lower() == '-beta':
versionTuple = versionTuple[:-1] + ('1',)
elif versionTuple[-1].lower() == 'final':
versionTuple = versionTuple[:-2] + ('100',)
elif versionTuple[-1].lower()[:2] == 'rc':
versionTuple = versionTuple[:-2] + (versionTuple[-1][2:],)
return tuple(int(x) if x is not None and x.isnumeric() else x for x in versionTuple)
reUnixPath = re.compile(r'\b[a-zA-Z_]+=(?:"(?:\\"|[^"])*"' + '|(?:\\\\.|[^"\'\\s])*' + r"|'(?:\\'|[^'])*')" \
+ r'|(/(?:\\.|[^"\'\s])*)' \
+ r'|("/(?:\\.|[^"])*")' \
+ r"|('/(?:\\.|[^'])*')")
def unix2win(matchobj):
if matchobj.group(1) is not None: return cygwinWindowsRoot + matchobj.group()
if matchobj.group(2) is not None: return '"%s%s' % (cygwinWindowsRoot, matchobj.group().lstrip('"'))
if matchobj.group(3) is not None: return "'%s%s" % (cygwinWindowsRoot, matchobj.group().lstrip("'"))
return matchobj.group()
def cygwinifyPaths(command):
# The problem: Native Windows applications running under Cygwin can't
# handle Cygwin's Unix-style paths. However, environment variable
# values are automatically converted, so only paths outside of
# environment variable values should be converted to Windows paths.
# Assumption: all paths will be absolute.
if '; gradlew ' in command: command = reUnixPath.sub(unix2win, command)
return command
def printFileContents(fileName):
# Assume log file was written in system's default encoding, but
# even if we are wrong, we replace errors ... the ASCII chars
# (which is what we mostly care about eg for the test seed) should
# still survive:
txt = codecs.open(fileName, 'r', encoding=sys.getdefaultencoding(), errors='replace').read()
# Encode to our output encoding (likely also system's default
# encoding):
bytes = txt.encode(sys.stdout.encoding, errors='replace')
# Decode back to string and print... we should hit no exception here
# since all errors have been replaced:
print(codecs.getdecoder(sys.stdout.encoding)(bytes)[0])
print()
def run(command, logFile):
if cygwin: command = cygwinifyPaths(command)
if os.system('%s > %s 2>&1' % (command, logFile)):
logPath = os.path.abspath(logFile)
print('\ncommand "%s" failed:' % command)
printFileContents(logFile)
raise RuntimeError('command "%s" failed; see log file %s' % (command, logPath))
def verifyDigests(artifact, urlString, tmpDir):
print(' verify sha512 digest')
sha512Expected, t = load(urlString + '.sha512').strip().split()
if t != '*'+artifact:
raise RuntimeError('SHA512 %s.sha512 lists artifact %s but expected *%s' % (urlString, t, artifact))
s512 = hashlib.sha512()
f = open('%s/%s' % (tmpDir, artifact), 'rb')
while True:
x = f.read(65536)
if len(x) == 0:
break
s512.update(x)
f.close()
sha512Actual = s512.hexdigest()
if sha512Actual != sha512Expected:
raise RuntimeError('SHA512 digest mismatch for %s: expected %s but got %s' % (artifact, sha512Expected, sha512Actual))
def getDirEntries(urlString):
if urlString.startswith('file:/') and not urlString.startswith('file://'):
# stupid bogus ant URI
urlString = "file:///" + urlString[6:]
if urlString.startswith('file://'):
path = urlString[7:]
if path.endswith('/'):
path = path[:-1]
if cygwin: # Convert Windows path to Cygwin path
path = re.sub(r'^/([A-Za-z]):/', r'/cygdrive/\1/', path)
l = []
for ent in os.listdir(path):
entPath = '%s/%s' % (path, ent)
if os.path.isdir(entPath):
entPath += '/'
ent += '/'
l.append((ent, 'file://%s' % entPath))
l.sort()
return l
else:
links = getHREFs(urlString)
for i, (text, subURL) in enumerate(links):
if text == 'Parent Directory' or text == '..':
return links[(i+1):]
def unpackAndVerify(java, tmpDir, artifact, gitRevision, version, testArgs):
destDir = '%s/unpack' % tmpDir
if os.path.exists(destDir):
shutil.rmtree(destDir)
os.makedirs(destDir)
os.chdir(destDir)
print(' unpack %s...' % artifact)
unpackLogFile = '%s/lucene-unpack-%s.log' % (tmpDir, artifact)
if artifact.endswith('.tar.gz') or artifact.endswith('.tgz'):
run('tar xzf %s/%s' % (tmpDir, artifact), unpackLogFile)
elif artifact.endswith('.zip'):
run('unzip %s/%s' % (tmpDir, artifact), unpackLogFile)
# make sure it unpacks to proper subdir
l = os.listdir(destDir)
expected = 'lucene-%s' % version
if l != [expected]:
raise RuntimeError('unpack produced entries %s; expected only %s' % (l, expected))
unpackPath = '%s/%s' % (destDir, expected)
verifyUnpacked(java, artifact, unpackPath, gitRevision, version, testArgs)
return unpackPath
LUCENE_NOTICE = None
LUCENE_LICENSE = None
def is_in_list(in_folder, files, indent=4):
for fileName in files:
print("%sChecking %s" % (" "*indent, fileName))
found = False
for f in [fileName, fileName + '.txt', fileName + '.md']:
if f in in_folder:
in_folder.remove(f)
found = True
if not found:
raise RuntimeError('file "%s" is missing' % fileName)
def verifyUnpacked(java, artifact, unpackPath, gitRevision, version, testArgs):
global LUCENE_NOTICE
global LUCENE_LICENSE
os.chdir(unpackPath)
isSrc = artifact.find('-src') != -1
# Check text files in release
print(" %s" % artifact)
in_root_folder = list(filter(lambda x: x[0] != '.', os.listdir(unpackPath)))
in_lucene_folder = []
if isSrc:
in_lucene_folder.extend(os.listdir(os.path.join(unpackPath, 'lucene')))
is_in_list(in_root_folder, ['LICENSE', 'NOTICE', 'README'])
is_in_list(in_lucene_folder, ['JRE_VERSION_MIGRATION', 'CHANGES', 'MIGRATE', 'SYSTEM_REQUIREMENTS'])
else:
is_in_list(in_root_folder, ['LICENSE', 'NOTICE', 'README', 'JRE_VERSION_MIGRATION', 'CHANGES',
'MIGRATE', 'SYSTEM_REQUIREMENTS'])
if LUCENE_NOTICE is None:
LUCENE_NOTICE = open('%s/NOTICE.txt' % unpackPath, encoding='UTF-8').read()
if LUCENE_LICENSE is None:
LUCENE_LICENSE = open('%s/LICENSE.txt' % unpackPath, encoding='UTF-8').read()
# if not isSrc:
# # TODO: we should add verifyModule/verifySubmodule (e.g. analysis) here and recurse through
# expectedJARs = ()
#
# for fileName in expectedJARs:
# fileName += '.jar'
# if fileName not in l:
# raise RuntimeError('lucene: file "%s" is missing from artifact %s' % (fileName, artifact))
# in_root_folder.remove(fileName)
expected_folders = ['analysis', 'backward-codecs', 'benchmark', 'classification', 'codecs', 'core',
'demo', 'expressions', 'facet', 'grouping', 'highlighter', 'join',
'luke', 'memory', 'misc', 'monitor', 'queries', 'queryparser', 'replicator',
'sandbox', 'spatial-extras', 'spatial3d', 'suggest', 'test-framework', 'licenses']
if isSrc:
expected_src_root_files = ['build.gradle', 'buildSrc', 'dev-docs', 'dev-tools', 'gradle', 'gradlew',
'gradlew.bat', 'help', 'lucene', 'settings.gradle', 'versions.lock', 'versions.props']
expected_src_lucene_files = ['build.gradle', 'documentation', 'distribution', 'dev-docs']
is_in_list(in_root_folder, expected_src_root_files)
is_in_list(in_lucene_folder, expected_folders)
is_in_list(in_lucene_folder, expected_src_lucene_files)
if len(in_lucene_folder) > 0:
raise RuntimeError('lucene: unexpected files/dirs in artifact %s lucene/ folder: %s' % (artifact, in_lucene_folder))
else:
is_in_list(in_root_folder, ['bin', 'docs', 'licenses', 'modules', 'modules-test-framework', 'modules-thirdparty'])
if len(in_root_folder) > 0:
raise RuntimeError('lucene: unexpected files/dirs in artifact %s: %s' % (artifact, in_root_folder))
if isSrc:
print(' make sure no JARs/WARs in src dist...')
lines = os.popen('find . -name \\*.jar').readlines()
if len(lines) != 0:
print(' FAILED:')
for line in lines:
print(' %s' % line.strip())
raise RuntimeError('source release has JARs...')
lines = os.popen('find . -name \\*.war').readlines()
if len(lines) != 0:
print(' FAILED:')
for line in lines:
print(' %s' % line.strip())
raise RuntimeError('source release has WARs...')
validateCmd = './gradlew --no-daemon check -p lucene/documentation'
print(' run "%s"' % validateCmd)
java.run_java11(validateCmd, '%s/validate.log' % unpackPath)
print(" run tests w/ Java 11 and testArgs='%s'..." % testArgs)
java.run_java11('./gradlew --no-daemon test %s' % testArgs, '%s/test.log' % unpackPath)
print(" compile jars w/ Java 11")
java.run_java11('./gradlew --no-daemon jar -Dversion.release=%s' % version, '%s/compile.log' % unpackPath)
testDemo(java.run_java11, isSrc, version, '11')
if java.run_java17:
print(" run tests w/ Java 17 and testArgs='%s'..." % testArgs)
java.run_java17('./gradlew --no-daemon test %s' % testArgs, '%s/test.log' % unpackPath)
print(" compile jars w/ Java 17")
java.run_java17('./gradlew --no-daemon jar -Dversion.release=%s' % version, '%s/compile.log' % unpackPath)
testDemo(java.run_java17, isSrc, version, '17')
print(' confirm all releases have coverage in TestBackwardsCompatibility')
confirmAllReleasesAreTestedForBackCompat(version, unpackPath)
else:
checkAllJARs(os.getcwd(), gitRevision, version)
testDemo(java.run_java11, isSrc, version, '11')
if java.run_java17:
testDemo(java.run_java17, isSrc, version, '17')
testChangesText('.', version)
def testDemo(run_java, isSrc, version, jdk):
if os.path.exists('index'):
shutil.rmtree('index') # nuke any index from any previous iteration
print(' test demo with %s...' % jdk)
sep = ';' if cygwin else ':'
if isSrc:
# For source release, use the classpath for each module.
classPath = ['lucene/core/build/libs/lucene-core-%s.jar' % version,
'lucene/demo/build/libs/lucene-demo-%s.jar' % version,
'lucene/analysis/common/build/libs/lucene-analyzers-common-%s.jar' % version,
'lucene/queryparser/build/libs/lucene-queryparser-%s.jar' % version]
cp = sep.join(classPath)
docsDir = 'lucene/core/src'
checkIndexCmd = 'java -ea -cp "%s" org.apache.lucene.index.CheckIndex index' % cp
indexFilesCmd = 'java -cp "%s" -Dsmoketester=true org.apache.lucene.demo.IndexFiles -index index -docs %s' % (cp, docsDir)
searchFilesCmd = 'java -cp "%s" org.apache.lucene.demo.SearchFiles -index index -query lucene' % cp
else:
# For binary release, set up classpath as modules.
cp = "--module-path modules"
docsDir = 'docs'
checkIndexCmd = 'java -ea %s --module lucene.core/org.apache.lucene.index.CheckIndex index' % cp
indexFilesCmd = 'java -Dsmoketester=true %s --module lucene.demo/org.apache.lucene.demo.IndexFiles -index index -docs %s' % (cp, docsDir)
searchFilesCmd = 'java %s --module lucene.demo/org.apache.lucene.demo.SearchFiles -index index -query lucene' % cp
run_java(indexFilesCmd, 'index.log')
run_java(searchFilesCmd, 'search.log')
reMatchingDocs = re.compile('(\d+) total matching documents')
m = reMatchingDocs.search(open('search.log', encoding='UTF-8').read())
if m is None:
raise RuntimeError('lucene demo\'s SearchFiles found no results')
else:
numHits = int(m.group(1))
if numHits < 100:
raise RuntimeError('lucene demo\'s SearchFiles found too few results: %s' % numHits)
print(' got %d hits for query "lucene"' % numHits)
print(' checkindex with %s...' % jdk)
run_java(checkIndexCmd, 'checkindex.log')
s = open('checkindex.log').read()
m = re.search(r'^\s+version=(.*?)$', s, re.MULTILINE)
if m is None:
raise RuntimeError('unable to locate version=NNN output from CheckIndex; see checkindex.log')
actualVersion = m.group(1)
if removeTrailingZeros(actualVersion) != removeTrailingZeros(version):
raise RuntimeError('wrong version from CheckIndex: got "%s" but expected "%s"' % (actualVersion, version))
def removeTrailingZeros(version):
return re.sub(r'(\.0)*$', '', version)
def checkMaven(baseURL, tmpDir, gitRevision, version, isSigned, keysFile):
print(' download artifacts')
artifacts = []
artifactsURL = '%s/lucene/maven/org/apache/lucene/' % baseURL
targetDir = '%s/maven/org/apache/lucene' % tmpDir
if not os.path.exists(targetDir):
os.makedirs(targetDir)
crawl(artifacts, artifactsURL, targetDir)
print()
verifyPOMperBinaryArtifact(artifacts, version)
verifyMavenDigests(artifacts)
checkJavadocAndSourceArtifacts(artifacts, version)
verifyDeployedPOMsCoordinates(artifacts, version)
if isSigned:
verifyMavenSigs(tmpDir, artifacts, keysFile)
distFiles = getBinaryDistFiles(tmpDir, version, baseURL)
checkIdenticalMavenArtifacts(distFiles, artifacts, version)
checkAllJARs('%s/maven/org/apache/lucene' % tmpDir, gitRevision, version)
def getBinaryDistFiles(tmpDir, version, baseURL):
distribution = 'lucene-%s.tgz' % version
if not os.path.exists('%s/%s' % (tmpDir, distribution)):
distURL = '%s/lucene/%s' % (baseURL, distribution)
print(' download %s...' % distribution, end=' ')
scriptutil.download(distribution, distURL, tmpDir, force_clean=FORCE_CLEAN)
destDir = '%s/unpack-lucene-getBinaryDistFiles' % tmpDir
if os.path.exists(destDir):
shutil.rmtree(destDir)
os.makedirs(destDir)
os.chdir(destDir)
print(' unpack %s...' % distribution)
unpackLogFile = '%s/unpack-%s-getBinaryDistFiles.log' % (tmpDir, distribution)
run('tar xzf %s/%s' % (tmpDir, distribution), unpackLogFile)
distributionFiles = []
for root, dirs, files in os.walk(destDir):
distributionFiles.extend([os.path.join(root, file) for file in files])
return distributionFiles
def checkJavadocAndSourceArtifacts(artifacts, version):
print(' check for javadoc and sources artifacts...')
for artifact in artifacts:
if artifact.endswith(version + '.jar'):
javadocJar = artifact[:-4] + '-javadoc.jar'
if javadocJar not in artifacts:
raise RuntimeError('missing: %s' % javadocJar)
sourcesJar = artifact[:-4] + '-sources.jar'
if sourcesJar not in artifacts:
raise RuntimeError('missing: %s' % sourcesJar)
def getZipFileEntries(fileName):
entries = []
with zipfile.ZipFile(fileName) as zf:
for zi in zf.infolist():
entries.append(zi.filename)
# Sort by name:
entries.sort()
return entries
def checkIdenticalMavenArtifacts(distFiles, artifacts, version):
print(' verify that Maven artifacts are same as in the binary distribution...')
reJarWar = re.compile(r'%s\.[wj]ar$' % version) # exclude *-javadoc.jar and *-sources.jar
distFilenames = dict()
for file in distFiles:
baseName = os.path.basename(file)
distFilenames[baseName] = file
for artifact in artifacts:
if reJarWar.search(artifact):
artifactFilename = os.path.basename(artifact)
if artifactFilename not in distFilenames:
raise RuntimeError('Maven artifact %s is not present in lucene binary distribution' % artifact)
else:
identical = filecmp.cmp(artifact, distFilenames[artifactFilename], shallow=False)
if not identical:
raise RuntimeError('Maven artifact %s is not identical to %s in lucene binary distribution'
% (artifact, distFilenames[artifactFilename]))
def verifyMavenDigests(artifacts):
print(" verify Maven artifacts' md5/sha1 digests...")
reJarWarPom = re.compile(r'\.(?:[wj]ar|pom)$')
for artifactFile in [a for a in artifacts if reJarWarPom.search(a)]:
if artifactFile + '.md5' not in artifacts:
raise RuntimeError('missing: MD5 digest for %s' % artifactFile)
if artifactFile + '.sha1' not in artifacts:
raise RuntimeError('missing: SHA1 digest for %s' % artifactFile)
with open(artifactFile + '.md5', encoding='UTF-8') as md5File:
md5Expected = md5File.read().strip()
with open(artifactFile + '.sha1', encoding='UTF-8') as sha1File:
sha1Expected = sha1File.read().strip()
md5 = hashlib.md5()
sha1 = hashlib.sha1()
inputFile = open(artifactFile, 'rb')
while True:
bytes = inputFile.read(65536)
if len(bytes) == 0:
break
md5.update(bytes)
sha1.update(bytes)
inputFile.close()
md5Actual = md5.hexdigest()
sha1Actual = sha1.hexdigest()
if md5Actual != md5Expected:
raise RuntimeError('MD5 digest mismatch for %s: expected %s but got %s'
% (artifactFile, md5Expected, md5Actual))
if sha1Actual != sha1Expected:
raise RuntimeError('SHA1 digest mismatch for %s: expected %s but got %s'
% (artifactFile, sha1Expected, sha1Actual))
def getPOMcoordinate(treeRoot):
namespace = '{http://maven.apache.org/POM/4.0.0}'
groupId = treeRoot.find('%sgroupId' % namespace)
if groupId is None:
groupId = treeRoot.find('{0}parent/{0}groupId'.format(namespace))
groupId = groupId.text.strip()
artifactId = treeRoot.find('%sartifactId' % namespace).text.strip()
version = treeRoot.find('%sversion' % namespace)
if version is None:
version = treeRoot.find('{0}parent/{0}version'.format(namespace))
version = version.text.strip()
packaging = treeRoot.find('%spackaging' % namespace)
packaging = 'jar' if packaging is None else packaging.text.strip()
return groupId, artifactId, packaging, version
def verifyMavenSigs(tmpDir, artifacts, keysFile):
print(' verify maven artifact sigs', end=' ')
# Set up clean gpg world; import keys file:
gpgHomeDir = '%s/lucene.gpg' % tmpDir
if os.path.exists(gpgHomeDir):
shutil.rmtree(gpgHomeDir)
os.makedirs(gpgHomeDir, 0o700)
run('gpg --homedir %s --import %s' % (gpgHomeDir, keysFile),
'%s/lucene.gpg.import.log' % tmpDir)
reArtifacts = re.compile(r'\.(?:pom|[jw]ar)$')
for artifactFile in [a for a in artifacts if reArtifacts.search(a)]:
artifact = os.path.basename(artifactFile)
sigFile = '%s.asc' % artifactFile
# Test sig (this is done with a clean brand-new GPG world)
logFile = '%s/lucene.%s.gpg.verify.log' % (tmpDir, artifact)
run('gpg --display-charset utf-8 --homedir %s --verify %s %s' % (gpgHomeDir, sigFile, artifactFile),
logFile)
# Forward any GPG warnings, except the expected one (since it's a clean world)
print_warnings_in_file(logFile)
# Test trust (this is done with the real users config)
run('gpg --import %s' % keysFile,
'%s/lucene.gpg.trust.import.log' % tmpDir)
logFile = '%s/lucene.%s.gpg.trust.log' % (tmpDir, artifact)
run('gpg --display-charset utf-8 --verify %s %s' % (sigFile, artifactFile), logFile)
# Forward any GPG warnings:
print_warnings_in_file(logFile)
sys.stdout.write('.')
print()
def print_warnings_in_file(file):
with open(file) as f:
for line in f.readlines():
if line.lower().find('warning') != -1 \
and line.find('WARNING: This key is not certified with a trusted signature') == -1 \
and line.find('WARNING: using insecure memory') == -1:
print(' GPG: %s' % line.strip())
def verifyPOMperBinaryArtifact(artifacts, version):
print(' verify that each binary artifact has a deployed POM...')
reBinaryJarWar = re.compile(r'%s\.[jw]ar$' % re.escape(version))
for artifact in [a for a in artifacts if reBinaryJarWar.search(a)]:
POM = artifact[:-4] + '.pom'
if POM not in artifacts:
raise RuntimeError('missing: POM for %s' % artifact)
def verifyDeployedPOMsCoordinates(artifacts, version):
"""
verify that each POM's coordinate (drawn from its content) matches
its filepath, and verify that the corresponding artifact exists.
"""
print(" verify deployed POMs' coordinates...")
for POM in [a for a in artifacts if a.endswith('.pom')]:
treeRoot = ET.parse(POM).getroot()
groupId, artifactId, packaging, POMversion = getPOMcoordinate(treeRoot)
POMpath = '%s/%s/%s/%s-%s.pom' \
% (groupId.replace('.', '/'), artifactId, version, artifactId, version)
if not POM.endswith(POMpath):
raise RuntimeError("Mismatch between POM coordinate %s:%s:%s and filepath: %s"
% (groupId, artifactId, POMversion, POM))
# Verify that the corresponding artifact exists
artifact = POM[:-3] + packaging
if artifact not in artifacts:
raise RuntimeError('Missing corresponding .%s artifact for POM %s' % (packaging, POM))
def crawl(downloadedFiles, urlString, targetDir, exclusions=set()):
for text, subURL in getDirEntries(urlString):
if text not in exclusions:
path = os.path.join(targetDir, text)
if text.endswith('/'):
if not os.path.exists(path):
os.makedirs(path)
crawl(downloadedFiles, subURL, path, exclusions)
else:
if not os.path.exists(path) or FORCE_CLEAN:
scriptutil.download(text, subURL, targetDir, quiet=True, force_clean=FORCE_CLEAN)
downloadedFiles.append(path)
sys.stdout.write('.')
def make_java_config(parser, java17_home):
def _make_runner(java_home, version):
print('Java %s JAVA_HOME=%s' % (version, java_home))
if cygwin:
java_home = subprocess.check_output('cygpath -u "%s"' % java_home, shell=True).decode('utf-8').strip()
cmd_prefix = 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % \
(java_home, java_home, java_home)
s = subprocess.check_output('%s; java -version' % cmd_prefix,
shell=True, stderr=subprocess.STDOUT).decode('utf-8')
if s.find(' version "%s' % version) == -1:
parser.error('got wrong version for java %s:\n%s' % (version, s))
def run_java(cmd, logfile):
run('%s; %s' % (cmd_prefix, cmd), logfile)
return run_java
java11_home = os.environ.get('JAVA_HOME')
if java11_home is None:
parser.error('JAVA_HOME must be set')
run_java11 = _make_runner(java11_home, '11')
run_java17 = None
if java17_home is not None:
run_java17 = _make_runner(java17_home, '17')
jc = namedtuple('JavaConfig', 'run_java11 java11_home run_java17 java17_home')
return jc(run_java11, java11_home, run_java17, java17_home)
version_re = re.compile(r'(\d+\.\d+\.\d+(-ALPHA|-BETA)?)')
revision_re = re.compile(r'rev-([a-f\d]+)')
def parse_config():
epilogue = textwrap.dedent('''
Example usage:
python3 -u dev-tools/scripts/smokeTestRelease.py https://dist.apache.org/repos/dist/dev/lucene/lucene-9.0.0-RC1-rev-c7510a0...
''')
description = 'Utility to test a release.'
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--tmp-dir', metavar='PATH',
help='Temporary directory to test inside, defaults to /tmp/smoke_lucene_$version_$revision')
parser.add_argument('--not-signed', dest='is_signed', action='store_false', default=True,
help='Indicates the release is not signed')
parser.add_argument('--local-keys', metavar='PATH',
help='Uses local KEYS file instead of fetching from https://archive.apache.org/dist/lucene/KEYS')
parser.add_argument('--revision',
help='GIT revision number that release was built with, defaults to that in URL')
parser.add_argument('--version', metavar='X.Y.Z(-ALPHA|-BETA)?',
help='Version of the release, defaults to that in URL')
parser.add_argument('--test-java17', metavar='java17_home',
help='Path to Java17 home directory, to run tests with if specified')
parser.add_argument('--download-only', action='store_true', default=False,
help='Only perform download and sha hash check steps')
parser.add_argument('url', help='Url pointing to release to test')
parser.add_argument('test_args', nargs=argparse.REMAINDER,
help='Arguments to pass to gradle for testing, e.g. -Dwhat=ever.')
c = parser.parse_args()
if c.version is not None:
if not version_re.match(c.version):
parser.error('version "%s" does not match format X.Y.Z[-ALPHA|-BETA]' % c.version)
else:
version_match = version_re.search(c.url)
if version_match is None:
parser.error('Could not find version in URL')
c.version = version_match.group(1)
if c.revision is None:
revision_match = revision_re.search(c.url)
if revision_match is None:
parser.error('Could not find revision in URL')
c.revision = revision_match.group(1)
print('Revision: %s' % c.revision)
if c.local_keys is not None and not os.path.exists(c.local_keys):
parser.error('Local KEYS file "%s" not found' % c.local_keys)
c.java = make_java_config(parser, c.test_java17)
if c.tmp_dir:
c.tmp_dir = os.path.abspath(c.tmp_dir)
else:
tmp = '/tmp/smoke_lucene_%s_%s' % (c.version, c.revision)
c.tmp_dir = tmp
i = 1
while os.path.exists(c.tmp_dir):
c.tmp_dir = tmp + '_%d' % i
i += 1
return c
reVersion1 = re.compile(r'\>(\d+)\.(\d+)\.(\d+)(-alpha|-beta)?/\<', re.IGNORECASE)
reVersion2 = re.compile(r'-(\d+)\.(\d+)\.(\d+)(-alpha|-beta)?\.', re.IGNORECASE)
def getAllLuceneReleases():
s = load('https://archive.apache.org/dist/lucene/java')
releases = set()
for r in reVersion1, reVersion2:
for tup in r.findall(s):
if tup[-1].lower() == '-alpha':
tup = tup[:3] + ('0',)
elif tup[-1].lower() == '-beta':
tup = tup[:3] + ('1',)
elif tup[-1] == '':
tup = tup[:3]
else:
raise RuntimeError('failed to parse version: %s' % tup[-1])
releases.add(tuple(int(x) for x in tup))
l = list(releases)
l.sort()
return l
def confirmAllReleasesAreTestedForBackCompat(smokeVersion, unpackPath):
print(' find all past Lucene releases...')
allReleases = getAllLuceneReleases()
#for tup in allReleases:
# print(' %s' % '.'.join(str(x) for x in tup))
testedIndices = set()
os.chdir(unpackPath)
print(' run TestBackwardsCompatibility..')
command = './gradlew --no-daemon test -p lucene/backward-codecs --tests TestBackwardsCompatibility --max-workers=1 ' \
'-Dtests.verbose=true '
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
if p.returncode != 0:
# Not good: the test failed!
raise RuntimeError('%s failed:\n%s' % (command, stdout))
stdout = stdout.decode('utf-8',errors='replace').replace('\r\n','\n')
if stderr is not None:
# Should not happen since we redirected stderr to stdout:
raise RuntimeError('stderr non-empty')
reIndexName = re.compile(r'TEST: index[\s*=\s*](.*?)(-cfs|-nocfs)$', re.MULTILINE)
for name, cfsPart in reIndexName.findall(stdout):
# Fragile: decode the inconsistent naming schemes we've used in TestBWC's indices:
#print('parse name %s' % name)
tup = tuple(name.split('.'))
if len(tup) == 3:
# ok
tup = tuple(int(x) for x in tup)
elif tup == ('4', '0', '0', '1'):
# CONFUSING: this is the 4.0.0-alpha index??
tup = 4, 0, 0, 0
elif tup == ('4', '0', '0', '2'):
# CONFUSING: this is the 4.0.0-beta index??
tup = 4, 0, 0, 1
elif name == '5x-with-4x-segments':
# Mixed version test case; ignore it for our purposes because we only
# tally up the "tests single Lucene version" indices
continue
elif name == '5.0.0.singlesegment':
tup = 5, 0, 0
else:
raise RuntimeError('could not parse version %s' % name)
testedIndices.add(tup)
l = list(testedIndices)
l.sort()
if False:
for release in l:
print(' %s' % '.'.join(str(x) for x in release))
allReleases = set(allReleases)
for x in testedIndices:
if x not in allReleases:
# Curious: we test 1.9.0 index but it's not in the releases (I think it was pulled because of nasty bug?)
if x != (1, 9, 0):
raise RuntimeError('tested version=%s but it was not released?' % '.'.join(str(y) for y in x))
notTested = []
for x in allReleases:
if x not in testedIndices:
releaseVersion = '.'.join(str(y) for y in x)
if releaseVersion in ('1.4.3', '1.9.1', '2.3.1', '2.3.2'):
# Exempt the dark ages indices
continue
if x >= tuple(int(y) for y in smokeVersion.split('.')):
# Exempt versions not less than the one being smoke tested
print(' Backcompat testing not required for release %s because it\'s not less than %s'
% (releaseVersion, smokeVersion))
continue
notTested.append(x)
if len(notTested) > 0:
notTested.sort()
print('Releases that don\'t seem to be tested:')
failed = True
for x in notTested:
print(' %s' % '.'.join(str(y) for y in x))
raise RuntimeError('some releases are not tested by TestBackwardsCompatibility?')
else:
print(' success!')
def main():
c = parse_config()
# Pick <major>.<minor> part of version and require script to be from same branch
scriptVersion = re.search(r'((\d+).(\d+)).(\d+)', scriptutil.find_current_version()).group(1).strip()
if not c.version.startswith(scriptVersion + '.'):
raise RuntimeError('smokeTestRelease.py for %s.X is incompatible with a %s release.' % (scriptVersion, c.version))
print('NOTE: output encoding is %s' % sys.stdout.encoding)
smokeTest(c.java, c.url, c.revision, c.version, c.tmp_dir, c.is_signed, c.local_keys, ' '.join(c.test_args),
downloadOnly=c.download_only)
def smokeTest(java, baseURL, gitRevision, version, tmpDir, isSigned, local_keys, testArgs, downloadOnly=False):
startTime = datetime.datetime.now()
# disable flakey tests for smoke-tester runs:
testArgs = '-Dtests.badapples=false %s' % testArgs
# Tests annotated @Nightly are more resource-intensive but often cover
# important code paths. They're disabled by default to preserve a good
# developer experience, but we enable them for smoke tests where we want good
# coverage.
testArgs = '-Dtests.nigthly=true %s' % testArgs
if FORCE_CLEAN:
if os.path.exists(tmpDir):
raise RuntimeError('temp dir %s exists; please remove first' % tmpDir)
if not os.path.exists(tmpDir):
os.makedirs(tmpDir)
lucenePath = None
print()
print('Load release URL "%s"...' % baseURL)
newBaseURL = unshortenURL(baseURL)
if newBaseURL != baseURL:
print(' unshortened: %s' % newBaseURL)
baseURL = newBaseURL
for text, subURL in getDirEntries(baseURL):
if text.lower().find('lucene') != -1:
lucenePath = subURL
if lucenePath is None:
raise RuntimeError('could not find lucene subdir')
print()
print('Get KEYS...')
if local_keys is not None:
print(" Using local KEYS file %s" % local_keys)
keysFile = local_keys
else:
keysFileURL = "https://archive.apache.org/dist/lucene/KEYS"
print(" Downloading online KEYS file %s" % keysFileURL)
scriptutil.download('KEYS', keysFileURL, tmpDir, force_clean=FORCE_CLEAN)
keysFile = '%s/KEYS' % (tmpDir)
print()
print('Test Lucene...')
checkSigs(lucenePath, version, tmpDir, isSigned, keysFile)
if not downloadOnly:
unpackAndVerify(java, tmpDir, 'lucene-%s.tgz' % version, gitRevision, version, testArgs)
unpackAndVerify(java, tmpDir, 'lucene-%s-src.tgz' % version, gitRevision, version, testArgs)
print()
print('Test Maven artifacts...')
checkMaven(baseURL, tmpDir, gitRevision, version, isSigned, keysFile)
else:
print("\nLucene test done (--download-only specified)")
print('\nSUCCESS! [%s]\n' % (datetime.datetime.now() - startTime))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Keyboard interrupt...exiting')
|
the-stack_106_14580
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" A Line Visual that uses the new shader Function.
"""
from __future__ import division
import numpy as np
#from .visual import Visual
#from ..shader.function import Function, Variable
#from ...import gloo
from vispy.scene.visuals.visual import Visual
from vispy.scene.shaders import ModularProgram, Function, Variable, Varying
from vispy.scene.transforms import STTransform
from vispy import gloo
## Snippet templates (defined as string to force user to create fresh Function)
# Consider these stored in a central location in vispy ...
vertex_template = """
void main() {
}
"""
fragment_template = """
void main() {
}
"""
dash_template = """
float dash(float distance) {
float mod = distance / $dash_len;
mod = mod - int(mod);
return 0.5 * sin(mod*3.141593*2.0) + 0.5;
}
"""
stub4 = Function("vec4 stub4(vec4 value) { return value; }")
stub3 = Function("vec4 stub3(vec3 value) { return value; }")
## Actual code
class Line(Visual):
def __init__(self, parent=None, data=None, color=None):
Visual.__init__(self, parent)
# Create a program
self._program = ModularProgram(vertex_template, fragment_template)
# Define how we are going to specify position and color
self._program.vert['gl_Position'] = '$transform(vec4($position, 1.0))'
self._program.frag['gl_FragColor'] = 'vec4($color, 1.0)'
# Set position data
assert data is not None
vbo = gloo.VertexBuffer(data)
self._program.vert['position'] = vbo
self._program.vert['transform'] = self.transform.shader_map()
# Create some variables related to color. We use a combination
# of these depending on the kind of color being set.
# We predefine them here so that we can re-use VBO and uniforms
vbo = gloo.VertexBuffer(data)
self._color_var = Variable('uniform vec3 color')
self._colors_var = Variable('attribute vec3 color', vbo)
self._color_varying = Varying('v_color')
self.set_color((0, 0, 1))
if color is not None:
self.set_color(color)
@property
def transform(self):
return Visual.transform.fget(self)
# todo: this should probably be handled by base visual class..
@transform.setter
def transform(self, tr):
self._program.vert['transform'] = tr.shader_map()
Visual.transform.fset(self, tr)
def set_data(self, data):
""" Set the vertex data for this line.
"""
vbo = self._program.vert['position'].value
vbo.set_data(data)
def set_color(self, color):
""" Set the color for this line. Color can be specified for the
whole line or per vertex.
When the color is changed from single color to array, the shaders
need to be recompiled, otherwise we only need to reset the
uniform / attribute.
"""
if isinstance(color, tuple):
# Single value (via a uniform)
color = [float(v) for v in color]
assert len(color) == 3
self._color_var.value = color
self._program.frag['color'] = self._color_var
elif isinstance(color, np.ndarray):
# A value per vertex, via a VBO
assert color.shape[1] == 3
self._colors_var.value.set_data(color)
self._program.frag['color'] = self._color_varying
self._program.vert[self._color_varying] = self._colors_var
else:
raise ValueError('Line colors must be Nx3 array or color tuple')
def draw(self, event=None):
gloo.set_state(blend=True, blend_func=('src_alpha', 'one'))
# Draw
self._program.draw('line_strip')
class DashedLine(Line):
""" This takes the Line and modifies the composition of Functions
to create a dashing effect.
"""
def __init__(self, *args, **kwargs):
Line.__init__(self, *args, **kwargs)
dasher = Function(dash_template)
distance = Varying('v_distance', dtype='float')
self._program.frag['gl_FragColor.a'] = dasher(distance)
dasher['dash_len'] = Variable('const float dash_len 0.001')
self._program.vert[distance] = 'gl_Position.x'
## Show the visual
if __name__ == '__main__':
from vispy import app
# vertex positions of data to draw
N = 200
pos = np.zeros((N, 3), dtype=np.float32)
pos[:, 0] = np.linspace(-0.9, 0.9, N)
pos[:, 1] = np.random.normal(size=N, scale=0.2).astype(np.float32)
# color array
color = np.ones((N, 3), dtype=np.float32)
color[:, 0] = np.linspace(0, 1, N)
color[:, 1] = color[::-1, 0]
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, close_keys='escape')
self.line1 = Line(None, pos, (3, 9, 0))
self.line2 = DashedLine(None, pos, color)
self.line2.transform = STTransform(scale=(0.5, 0.5),
translate=(0.4, 0.4))
def on_draw(self, ev):
gloo.clear((0, 0, 0, 1), True)
gloo.set_viewport(0, 0, *self.size)
self.line1.draw()
self.line2.draw()
c = Canvas()
c.show()
timer = app.Timer()
timer.start(0.016)
th = 0.0
@timer.connect
def on_timer(event):
global th
th += 0.01
pos = (np.cos(th) * 0.2 + 0.4, np.sin(th) * 0.2 + 0.4)
c.line2.transform.translate = pos
c.update()
app.run()
|
the-stack_106_14582
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from html.parser import HTMLParser # py3
except ImportError:
from HTMLParser import HTMLParser # py2
from django.forms import widgets
from django.utils.translation import ungettext_lazy, ugettext_lazy as _
from django.utils.text import Truncator
from django.utils.html import format_html
from django.forms.models import ModelForm
from django.forms.fields import IntegerField
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer
from cmsplugin_cascade.widgets import NumberInputWidget
from .plugin_base import BootstrapPluginBase
from .panel import panel_heading_sizes, PanelTypeWidget
class AccordionForm(ManageChildrenFormMixin, ModelForm):
num_children = IntegerField(min_value=1, initial=1,
widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em !important;'}),
label=_("Panels"),
help_text=_("Number of panels for this panel group."))
class BootstrapAccordionPlugin(TransparentWrapper, BootstrapPluginBase):
name = _("Accordion")
form = AccordionForm
default_css_class = 'panel-group'
require_parent = True
parent_classes = ('BootstrapColumnPlugin',)
direct_child_classes = ('BootstrapAccordionPanelPlugin',)
allow_children = True
render_template = 'cascade/bootstrap3/{}/accordion.html'
fields = ('num_children', 'glossary',)
close_others = GlossaryField(
widgets.CheckboxInput(),
label=_("Close others"),
initial=True,
help_text=_("Open only one panel at a time.")
)
first_is_open = GlossaryField(
widgets.CheckboxInput(),
label=_("First panel open"),
initial=True,
help_text=_("Start with the first panel open.")
)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapAccordionPlugin, cls).get_identifier(obj)
num_cols = obj.get_num_children()
content = ungettext_lazy('with {0} panel', 'with {0} panels', num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super(BootstrapAccordionPlugin, self).save_model(request, obj, form, change)
self.extend_children(obj, wanted_children, BootstrapAccordionPanelPlugin)
plugin_pool.register_plugin(BootstrapAccordionPlugin)
class BootstrapAccordionPanelPlugin(TransparentContainer, BootstrapPluginBase):
name = _("Accordion Panel")
default_css_class = 'panel-body'
direct_parent_classes = parent_classes = ('BootstrapAccordionPlugin',)
require_parent = True
alien_child_classes = True
render_template = 'cascade/bootstrap3/{}/accordion-panel.html'
glossary_field_order = ('panel_type', 'heading_size', 'panel_title')
panel_type = GlossaryField(
PanelTypeWidget.get_instance(),
label=_("Panel type"),
help_text=_("Display Panel using this style.")
)
heading_size = GlossaryField(
widgets.Select(choices=panel_heading_sizes),
initial='',
label=_("Heading Size")
)
panel_title = GlossaryField(
widgets.TextInput(attrs={'size': 80}),
label=_("Panel Title")
)
class Media:
css = {'all': ('cascade/css/admin/bootstrap.min.css', 'cascade/css/admin/bootstrap-theme.min.css',)}
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapAccordionPanelPlugin, cls).get_identifier(obj)
panel_title = HTMLParser().unescape(obj.glossary.get('panel_title', ''))
panel_title = Truncator(panel_title).words(3, truncate=' ...')
return format_html('{0}{1}', identifier, panel_title)
plugin_pool.register_plugin(BootstrapAccordionPanelPlugin)
|
the-stack_106_14584
|
print('==' * 20)
print(f'{"Aproveitamento Gols":^40}')
print('==' * 20)
jogador = dict()
jogador['Nome'] = str(input('Nome do jogador: ')).strip().title()
partidas = int(input(f'Nº de partidas de {jogador["Nome"]}? '))
gols = list()
jogador['Total'] = 0
for x in range(partidas):
x = int(input(f'Qts gols na {x + 1}ª partida: '))
gols.append(x)
jogador['Total'] += x
jogador['Gols'] = gols
print('==' * 20)
for k, v in jogador.items():
print(f'{k}: {v}')
if k == 'Gols':
for p, v in enumerate(jogador['Gols']):
print(f'Na {p+1}ª partida foram {v} Gol(s)')
|
the-stack_106_14585
|
#!/usr/bin/env python
"""Compute diagnostic report from binary input."""
from operator import itemgetter, ge, lt
import fileinput
import sys
def bit_criteria(report, index, cmp):
return 1 if cmp(sum(map(itemgetter(index), report)), len(report)/2) else 0
def generate_rating(report, rating="oxygen"):
if rating == "oxygen":
cmp = ge
elif rating == "co2":
cmp = lt
else:
sys.exit(1)
for index in range(len(report)):
bit = bit_criteria(report, index, cmp)
report = list(filter(lambda x: x[index] == bit, report))
if len(report) == 1:
break
return int(''.join(map(str, report[0])),2)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: day3_diagnostic_part2.py <input file>")
sys.exit(-1)
report = [list(map(int, line.strip())) for line in fileinput.input(files=sys.argv[1])]
oxygen_generator_reading = generate_rating(report, rating="oxygen")
co2_scrubber_rating = generate_rating(report, rating="co2")
print(oxygen_generator_reading*co2_scrubber_rating)
|
the-stack_106_14587
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for saving/loading training configs."""
import os
import tensorflow.compat.v1 as tf
import yaml
def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
"""
if logdir:
# with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.GFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = (
'Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
tf.logging.info(message)
return config
def load_config(logdir):
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.gfile.Exists(config_path):
message = (
'Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with tf.gfile.GFile(config_path, 'r') as file_:
config = yaml.load(file_)
print('Resume run and write summaries and checkpoints to {}.'.format(
config.logdir))
return config
|
the-stack_106_14589
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
import os
import sys
import compas_rhino
import compas._os
import compas.plugins
__all__ = [
'install',
'installable_rhino_packages',
'after_rhino_install',
]
def install(version=None, packages=None, clean=False):
"""Install COMPAS for Rhino.
Parameters
----------
version : {'5.0', '6.0', '7.0', '8.0'}, optional
The version number of Rhino.
Default is ``'6.0'``.
packages : list of str, optional
List of packages to install or None to use default package list.
Default is the result of ``installable_rhino_packages``,
which collects all installable packages in the current environment.
clean : bool, optional
If ``True``, this will clean up the entire scripts folder and remove
also existing symlinks that are not importable in the current environment.
Examples
--------
.. code-block:: python
import compas_rhino.install
compas_rhino.install.install('6.0')
.. code-block:: bash
python -m compas_rhino.install -v 6.0
"""
version = compas_rhino._check_rhino_version(version)
# We install COMPAS packages in the scripts folder
# instead of directly as IPy module.
scripts_path = compas_rhino._get_scripts_path(version)
# This is for old installs
ipylib_path = compas_rhino._get_ironpython_lib_path(version)
# Filter the provided list of packages
# If no packages are provided
# this first collects all installable packages from the environment.
packages = _filter_installable_packages(version, packages)
results = []
symlinks_to_install = []
symlinks_to_uninstall = []
exit_code = 0
# check all installable packages
# add the packages that can't be imported from the current env to the list of symlinks to uninstall
# and remove the package name from the list of installable packages
# make a copy of the list to avoid problems with removing items
# note: perhaps this should already happen in the filter function...
for name in packages[:]:
try:
importlib.import_module(name)
except ImportError:
path = os.path.join(scripts_path, name)
symlinks_to_uninstall.append(dict(name=name, link=path))
packages.remove(name)
# Also remove all broken symlinks from the scripts folder
# because ... they're broken!
# If it is an actual folder or a file, leave it alone
# because probably someone put it there on purpose.
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if not os.path.exists(path):
symlinks_to_uninstall.append(dict(name=name, link=path))
try:
importlib.import_module(name)
except ImportError:
pass
else:
if name not in packages:
packages.append(name)
# If the scripts folder is supposed to be cleaned
# also remove all existing symlinks that cannot be imported
# and reinstall symlinks that can be imported
if clean:
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if os.path.exists(path):
try:
importlib.import_module(name)
except ImportError:
path = os.path.join(scripts_path, name)
symlinks_to_uninstall.append(dict(name=name, link=path))
else:
if name not in packages:
packages.append(name)
# add all of the packages in the list of installable packages
# to the list of symlinks to uninstall
# and to the list of symlinks to install
for package in packages:
symlink_path = os.path.join(scripts_path, package)
symlinks_to_uninstall.append(dict(name=package, link=symlink_path))
package_path = compas_rhino._get_package_path(importlib.import_module(package))
symlinks_to_install.append(dict(name=package, source_path=package_path, link=symlink_path))
# Handle legacy install location
# This does not always work,
# and especially not in cases where it is not necessary :)
if ipylib_path:
legacy_path = os.path.join(ipylib_path, package)
if os.path.exists(legacy_path):
symlinks_to_uninstall.append(dict(name=package, link=legacy_path))
# -------------------------
# Uninstall first
# -------------------------
symlinks = [link['link'] for link in symlinks_to_uninstall]
uninstall_results = compas._os.remove_symlinks(symlinks)
# Let the user know if some symlinks could not be removed.
for uninstall_data, success in zip(symlinks_to_uninstall, uninstall_results):
if not success:
results.append((uninstall_data['name'], 'ERROR: Cannot remove symlink, try to run as administrator.'))
# Handle legacy bootstrapper
# Again, only if possible...
if ipylib_path:
if not compas_rhino._try_remove_bootstrapper(ipylib_path):
results.append(('compas_bootstrapper', 'ERROR: Cannot remove legacy compas_bootstrapper, try to run as administrator.'))
# -------------------------
# Ready to start installing
# -------------------------
# create new symlinks and register the results
symlinks = [(link['source_path'], link['link']) for link in symlinks_to_install]
install_results = compas._os.create_symlinks(symlinks)
# set the exit code based on the installation results
if not all(install_results):
exit_code = -1
# make a list of installed packages
# based on the installation results
# and update the general results list
installed_packages = []
for install_data, success in zip(symlinks_to_install, install_results):
if success:
installed_packages.append(install_data['name'])
result = 'OK'
else:
result = 'ERROR: Cannot create symlink, try to run as administrator.'
results.append((install_data['name'], result))
# finalize the general results list with info about the bootstrapper
if exit_code == -1:
results.append(('compas_bootstrapper', 'WARNING: One or more packages failed, will not install bootstrapper, try uninstalling first'))
else:
try:
_update_bootstrapper(scripts_path, packages)
results.append(('compas_bootstrapper', 'OK'))
except: # noqa: E722
results.append(('compas_bootstrapper', 'ERROR: Could not create compas_bootstrapper to auto-determine Python environment'))
# output the outcome of the installation process
# perhaps we should more info here
print('Installing COMPAS packages to Rhino {0} scripts folder:'.format(version))
print('{}\n'.format(scripts_path))
for package, status in results:
print(' {} {}'.format(package.ljust(20), status))
if status != 'OK':
exit_code = -1
if exit_code == 0 and len(installed_packages):
print('\nRunning post-installation steps...\n')
if not _run_post_execution_steps(after_rhino_install(installed_packages)):
exit_code = -1
print('\nInstall completed.')
if exit_code != 0:
sys.exit(exit_code)
def _run_post_execution_steps(steps_generator):
all_steps_succeeded = True
post_execution_errors = []
for result in steps_generator:
if isinstance(result, Exception):
post_execution_errors.append(result)
continue
for item in result:
try:
package, message, success = item
status = 'OK' if success else 'ERROR'
if not success:
all_steps_succeeded = False
print(' {} {}: {}'.format(package.ljust(20), status, message))
except ValueError:
post_execution_errors.append(ValueError('Step ran without errors but result is wrongly formatted: {}'.format(str(item))))
if post_execution_errors:
print('\nOne or more errors occurred:\n')
for error in post_execution_errors:
print(' - {}'.format(repr(error)))
all_steps_succeeded = False
return all_steps_succeeded
@compas.plugins.plugin(category='install', pluggable_name='installable_rhino_packages', tryfirst=True)
def default_installable_rhino_packages():
# While this list could obviously be hard-coded, I think
# eating our own dogfood and using plugins to define this, just like
# any other extension/plugin would be is a better way to ensure consistent behavior.
return ['compas', 'compas_rhino']
@compas.plugins.pluggable(category='install', selector='collect_all')
def installable_rhino_packages():
"""Provide a list of packages to make available inside Rhino.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to automatically
have their packages made available inside Rhino when
COMPAS is installed into it.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def installable_rhino_packages():
... return ['compas_fab']
Returns
-------
:obj:`list` of :obj:`str`
List of package names to make available inside Rhino.
"""
pass
@compas.plugins.pluggable(category='install', selector='collect_all')
def after_rhino_install(installed_packages):
"""Allows extensions to execute actions after install to Rhino is done.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to perform
additional steps after an installation to Rhino has
been completed.
Parameters
----------
installed_packages : :obj:`list` of :obj:`str`
List of packages that have been installed successfully.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def after_rhino_install(installed_packages):
... # Do something after package is installed to Rhino, eg, copy components, etc
... return [('compas_ghpython', 'GH Components installed', True)]
Returns
-------
:obj:`list` of 3-tuple (str, str, bool)
List containing a 3-tuple with component name, message and ``True``/``False`` success flag.
"""
pass
def _update_bootstrapper(install_path, packages):
# Take either the CONDA environment directory or the current Python executable's directory
python_directory = os.environ.get('CONDA_PREFIX', None) or os.path.dirname(sys.executable)
environment_name = os.environ.get('CONDA_DEFAULT_ENV', '')
conda_exe = os.environ.get('CONDA_EXE', '')
compas_bootstrapper = compas_rhino._get_bootstrapper_path(install_path)
bootstrapper_data = compas_rhino._get_bootstrapper_data(compas_bootstrapper)
installed_packages = bootstrapper_data.get('INSTALLED_PACKAGES', [])
installed_packages = list(set(installed_packages + list(packages)))
with open(compas_bootstrapper, 'w') as f:
f.write('ENVIRONMENT_NAME = r"{}"\n'.format(environment_name))
f.write('PYTHON_DIRECTORY = r"{}"\n'.format(python_directory))
f.write('CONDA_EXE = r"{}"\n'.format(conda_exe))
f.write('INSTALLED_PACKAGES = {}'.format(repr(installed_packages)))
def _filter_installable_packages(version, packages):
ghpython_incompatible = False
if compas.OSX and version == 5.0:
ghpython_incompatible = True
if not packages:
# Flatten list of results (resulting from collect_all pluggable)
packages = sorted(set(itertools.chain.from_iterable(installable_rhino_packages())))
elif 'compas_ghpython' in packages and ghpython_incompatible:
print('Skipping installation of compas_ghpython since it\'s not supported for Rhino 5 for Mac')
if ghpython_incompatible:
packages.remove('compas_ghpython')
return packages
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--version',
choices=compas_rhino.SUPPORTED_VERSIONS,
default=compas_rhino.DEFAULT_VERSION,
help="The version of Rhino to install the packages in."
)
parser.add_argument('-p', '--packages', nargs='+', help="The packages to install.")
parser.add_argument('--clean', dest='clean', default=False, action='store_true')
args = parser.parse_args()
install(version=args.version, packages=args.packages, clean=args.clean)
|
the-stack_106_14591
|
from cryptography import Fernet as fernet
import string
# The Code from the Cryptography branch will be deleted; it is only here for inspiration while making our own function.
# The Code for Fernet is encoded in UTF-8 and as such we should try to create an encoding function first.
def write_key():
# This is a key to our encrpyted item
key = fernet.generate_key()
with open('key.key', 'wb') as key_file:
key_file.write(key)
def load_key():
# This loads the key for encryption from the key.key directory
return open('key.key', 'rb').read()
write_key() # this creates the folder for our encryption key
key = load_key() # loads the key for our encrypted function
f = fernet(key)
message = input('Paste data to encrypt.').encode() # this encodes our message in UTF-8.
encrypted = f.encrypt(message) # this encrypts the message to be decrypted later.
decrypted = f.decrypt(encrypted) # this decrypts whatever the input is.
|
the-stack_106_14592
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobPatchOptions(Model):
"""
Additional parameters for the Job_Patch operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=None, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
the-stack_106_14594
|
from django.conf.urls import include, url
from publisher.views import catalog
from publisher.views import my_publication
from publisher.views import publication
urlpatterns = [
# Publications(s)
url(r'^publish$', catalog.catalog_page),
url(r'^publication/(\d+)$', publication.publication_page),
url(r'^publication/(\d+)/peer_review_modal$', publication.peer_review_modal),
url(r'^publication/(\d+)/save_peer_review$', publication.save_peer_review),
url(r'^publication/(\d+)/delete_peer_review$', publication.delete_peer_review),
# My Publications
url(r'^my_publications$', my_publication.my_publications_page),
url(r'^refresh_publications_table$', my_publication.refresh_publications_table),
url(r'^my_publication_modal$', my_publication.my_publication_modal),
url(r'^save_publication$', my_publication.save_publication),
url(r'^delete_publication$', my_publication.delete_publication),
]
|
the-stack_106_14598
|
# import necessary requirements
import time
import numpy as np
from constants import STARTING_POSITION_OF_AGENT, INF, PROBABILITY_OF_GRID, NUM_ROWS, NUM_COLS, NUM_ITERATIONS, \
ACCURACY_TO_ACHIEVE
from helpers.agent8 import set_random_target, examine_and_propagate_probability, calculate_global_threshold
from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, \
length_of_path_from_source_to_goal
from src.Agent8 import Agent8 # make changes in Agent8 src
agent = Agent8() # Initialize Agent8
def find_the_target():
"""
Function to find the target
:return: results containing final sum of probabilities of each cell(containing), examinations, movements
"""
global p # contains sum of probabilities of each cell
agent.reset() # reset all the attributes of Agent8
target_found = False # used to check if target if found or not
agent_num = 7 # indicates which type of prob to use
result = list() # list that contains final results
# list containing global threshold of executions for different terrain types.
# global_threshold_for_examinations = calculate_global_threshold(ACCURACY_TO_ACHIEVE)
print("Global Threshold", agent.global_threshold)
# print("Global Threshold:", global_threshold_for_examinations)
# loop for find a "reachable" target
while True:
random_maze = generate_grid_with_probability_p(PROBABILITY_OF_GRID) # generate gridworld with full information
target_pos = set_random_target() # setting the target randomly
# loop till target isn't blocked
while random_maze[target_pos[0]][target_pos[1]] != 4:
target_pos = set_random_target() # setting the target randomly
# check if target is reachable or by finding path length to it from start
if length_of_path_from_source_to_goal(random_maze, STARTING_POSITION_OF_AGENT, target_pos) != INF:
break
# reachable target is set, so now reset all the variables of the Agent8
agent.reset()
# print("Main Here")
print("Global Threshold", agent.global_threshold)
# loop target_found is FALSE
while not target_found:
agent.pre_planning(agent_num) # set the agent.current_estimated_goal attribute
# find path from agent.current_position to agent.current_estimates_goal using A* and set parents dict()
agent.planning(agent.current_estimated_goal)
# loop till the path given by Astar contains agent.current_estimated_goal
while agent.current_estimated_goal not in agent.parents:
# not in path so make it blocked and examine it and propagate the probability to all the cells accordingly.
agent.maze[agent.current_estimated_goal[0]][agent.current_estimated_goal[1]].is_blocked = True
examine_and_propagate_probability(agent.maze, agent.current_position, target_pos,
agent.current_estimated_goal, agent.current_estimated_goal)
agent.pre_planning(agent_num) # set new agent.current_estimated_target
# find path from agent.current_position to new agent.current_estimates_goal using A*
agent.planning(agent.current_estimated_goal)
# out of loop means a "reachable" agent.current_estimated_goal is found
# execution and examine k% cells with top probabilities.
target_found = agent.execution(random_maze, target_pos)
p = 0.0
# compute final sum of probabilities
for row in range(NUM_ROWS):
for col in range(NUM_COLS):
p += agent.maze[row][col].probability_of_containing_target
print("Total Probability:", p)
movements = compute_explored_cells_from_path(agent.final_paths)
result.append(p)
result.append(agent.num_examinations)
result.append(movements)
return result
if __name__ == "__main__":
results = find_the_target()
print("Sum of Probability:", results[0])
print("Total examinations:", results[1])
print("Total movements:", results[2])
print("Total cost:", (results[1]+results[2]))
|
the-stack_106_14599
|
import os
import platform
import socket
import subprocess
import time
from .client import Client
from .exceptions import ProxyServerError
class RemoteServer(object):
def __init__(self, host, port):
"""
Initialises a RemoteServer object
:param host: The host of the proxy server.
:param port: The port of the proxy server.
"""
self.host = host
self.port = port
@property
def url(self):
"""
Gets the url that the proxy is running on. This is not the URL clients
should connect to.
"""
return "http://%s:%d" % (self.host, self.port)
def create_proxy(self, params=None):
"""
Gets a client class that allow to set all the proxy details that you
may need to.
:param dict params: Dictionary where you can specify params
like httpProxy and httpsProxy
"""
params = params if params is not None else {}
client = Client(self.url[7:], params)
return client
def _is_listening(self):
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect((self.host, self.port))
socket_.close()
return True
except socket.error:
return False
class Server(RemoteServer):
def __init__(self, path='browsermob-proxy', options=None):
"""
Initialises a Server object
:param str path: Path to the browsermob proxy batch file
:param dict options: Dictionary that can hold the port.
More items will be added in the future.
This defaults to an empty dictionary
"""
options = options if options is not None else {}
path_var_sep = ':'
if platform.system() == 'Windows':
path_var_sep = ';'
if not path.endswith('.bat'):
path += '.bat'
exec_not_on_path = True
for directory in os.environ['PATH'].split(path_var_sep):
if(os.path.isfile(os.path.join(directory, path))):
exec_not_on_path = False
break
if not os.path.isfile(path) and exec_not_on_path:
raise ProxyServerError("Browsermob-Proxy binary couldn't be found "
"in path provided: %s" % path)
self.path = path
self.host = 'localhost'
self.port = options.get('port', 8080)
self.process = None
if platform.system() == 'Darwin':
self.command = ['sh']
else:
self.command = []
self.command += [path, '--port=%s' % self.port]
def start(self, options=None):
"""
This will start the browsermob proxy and then wait until it can
interact with it
:param dict options: Dictionary that can hold the path and filename
of the log file with resp. keys of `log_path` and `log_file`
"""
if options is None:
options = {}
log_path = options.get('log_path', os.getcwd())
log_file = options.get('log_file', 'server.log')
retry_sleep = options.get('retry_sleep', 0.5)
retry_count = options.get('retry_count', 60)
log_path_name = os.path.join(log_path, log_file)
self.log_file = open(log_path_name, 'w')
self.process = subprocess.Popen(self.command,
stdout=self.log_file,
stderr=subprocess.STDOUT)
count = 0
while not self._is_listening():
if self.process.poll():
message = (
"The Browsermob-Proxy server process failed to start. "
"Check {0}"
"for a helpful error message.".format(self.log_file))
raise ProxyServerError(message)
time.sleep(retry_sleep)
count += 1
if count == retry_count:
self.stop()
raise ProxyServerError("Can't connect to Browsermob-Proxy")
def stop(self):
"""
This will stop the process running the proxy
"""
if self.process.poll() is not None:
return
try:
self.process.kill()
self.process.wait()
except AttributeError:
# kill may not be available under windows environment
pass
self.log_file.close()
|
the-stack_106_14600
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="scatter3d.line.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
|
the-stack_106_14601
|
#!/usr/bin/env python3
import sys
sys.path.extend(["/home/jmark/projects/stirturb/turbubox/tools/%s" % s for s in "bin lib".split()])
import numpy as np
import gausslobatto
import flash, flexi, hopr
import ulz
import interpolate
gamma = 5/3
srcfp = "/mnt/seagate/flexi/stirturb/run-shockcapturing/sim_State_0000000.350000000.h5"
#srcfp = "/home/jmark/dump/data/sim_State_0000002.450000000.h5"
#srcfp = "/mnt/seagate/flexi/stirturb/snapshots/sim_State_0000000.500000000.h5"
with flexi.PeriodicBox(srcfp) as ff:
#dens,momx,momy,momz,ener = ff.get_cons_fv()
dens,velx,vely,velz,pres = ff.get_prims_fv()
npoly = ff.Npoly
xs = gausslobatto.mk_nodes_from_to(-1,1,npoly)
ys = xs
# modal Vandermonde Matrix
Vmodal = np.zeros((npoly+1,npoly+1))
for i in range(0,npoly+1):
for j in range(0,npoly+1):
Vmodal[i,j] = gausslobatto.LegendrePolynomialAndDerivative(j,xs[i])[0]
Wmodal = np.linalg.inv(Vmodal)
modal = interpolate.change_basis(Wmodal, pres)
|
the-stack_106_14602
|
#!/usr/bin/env python
#
# Runs clang-tidy on files based on a `compile_commands.json` file
#
"""
Run clang-tidy in parallel on compile databases.
Example run:
# This prepares the build. NOTE this is `build` not `gen` because the build
# step generates required header files (this can be simplified if needed
# to invoke ninja to compile only generated files if needed)
./scripts/build/build_examples.py --target linux-x64-chip-tool-clang build
# Actually running clang-tidy to check status
./scripts/run-clang-tidy-on-compile-commands.py check
# Run and output a fix yaml
./scripts/run-clang-tidy-on-compile-commands.py --export-fixes out/fixes.yaml check
# Apply the fixes
clang-apply-replacements out/fixes.yaml
"""
import build
import click
import coloredlogs
import glob
import json
import logging
import multiprocessing
import os
import queue
import re
import shlex
import subprocess
import sys
import tempfile
import threading
import traceback
import yaml
class TidyResult:
def __init__(self, path: str, ok: bool):
self.path = path
self.ok = ok
def __repr__(self):
if self.ok:
status = "OK"
else:
status = "FAIL"
return "%s(%s)" % (status, self.path)
def __str__(self):
return self.__repr__()
class ClangTidyEntry:
"""Represents a single entry for running clang-tidy based
on a compile_commands.json item.
"""
def __init__(self, json_entry, gcc_sysroot=None):
# Entries in compile_commands:
# - "directory": location to run the compile
# - "file": a relative path to directory
# - "command": full compilation command
self.directory = json_entry["directory"]
self.file = json_entry["file"]
self.valid = False
self.clang_arguments = []
self.tidy_arguments = []
command = json_entry["command"]
command_items = shlex.split(command)
compiler = os.path.basename(command_items[0])
# Allow gcc/g++ invocations to also be tidied - arguments should be
# compatible and on darwin gcc/g++ is actually a symlink to clang
if compiler in ['clang++', 'clang', 'gcc', 'g++']:
self.valid = True
self.clang_arguments = command_items[1:]
else:
logging.warning(
"Cannot tidy %s - not a clang compile command", self.file)
return
if compiler in ['gcc', 'g++'] and gcc_sysroot:
self.clang_arguments.insert(0, '--sysroot='+gcc_sysroot)
@property
def full_path(self):
return os.path.abspath(os.path.join(self.directory, self.file))
def ExportFixesTo(self, f: str):
self.tidy_arguments.append("--export-fixes")
self.tidy_arguments.append(f)
def SetChecks(self, checks: str):
self.tidy_arguments.append("--checks")
self.tidy_arguments.append(checks)
def Check(self):
logging.debug("Running tidy on %s from %s", self.file, self.directory)
try:
cmd = ["clang-tidy", self.file] + \
self.tidy_arguments + ["--"] + self.clang_arguments
logging.debug("Executing: %r" % cmd)
proc = subprocess.Popen(
cmd,
cwd=self.directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, err = proc.communicate()
if output:
logging.info("TIDY %s: %s", self.file, output.decode("utf-8"))
if err:
logging.warning("TIDY %s: %s", self.file, err.decode("utf-8"))
if proc.returncode != 0:
if proc.returncode < 0:
logging.error(
"Failed %s with signal %d", self.file, -proc.returncode
)
else:
logging.warning(
"Tidy %s ended with code %d", self.file, proc.returncode
)
return TidyResult(self.full_path, False)
except:
traceback.print_exc()
return TidyResult(self.full_path, False)
return TidyResult(self.full_path, True)
class TidyState:
def __init__(self):
self.successes = 0
self.failures = 0
self.lock = threading.Lock()
self.failed_files = []
def Success(self):
with self.lock:
self.successes += 1
def Failure(self, path: str):
with self.lock:
self.failures += 1
self.failed_files.append(path)
logging.error("Failed to process %s", path)
def find_darwin_gcc_sysroot():
for line in subprocess.check_output('xcodebuild -sdk -version'.split()).decode('utf8').split('\n'):
if not line.startswith('Path: '):
continue
path = line[line.find(': ')+2:]
if not '/MacOSX.platform/' in path:
continue
logging.info("Found %s" % path)
return path
# A hard-coded value that works on default installations
logging.warning("Using default platform sdk path. This may be incorrect.")
return '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
class ClangTidyRunner:
"""Handles running clang-tidy"""
def __init__(self):
self.entries = []
self.state = TidyState()
self.fixes_file = None
self.fixes_temporary_file_dir = None
self.gcc_sysroot = None
if sys.platform == 'darwin':
# Darwin gcc invocation will auto select a system root, however clang requires an explicit path since
# we are using the built-in pigweed clang-tidy.
logging.info(
'Searching for a MacOS system root for gcc invocations...')
self.gcc_sysroot = find_darwin_gcc_sysroot()
logging.info(' Chose: %s' % self.gcc_sysroot)
def AddDatabase(self, compile_commands_json):
database = json.load(open(compile_commands_json))
for entry in database:
item = ClangTidyEntry(entry, self.gcc_sysroot)
if not item.valid:
continue
self.entries.append(item)
def Cleanup(self):
if self.fixes_temporary_file_dir:
all_diagnostics = []
# When running over several files, fixes may be applied to the same
# file over and over again, like 'append override' can result in the
# same override being appended multiple times.
already_seen = set()
for name in glob.iglob(
os.path.join(self.fixes_temporary_file_dir.name, "*.yaml")
):
content = yaml.safe_load(open(name, "r"))
if not content:
continue
diagnostics = content.get("Diagnostics", [])
# Allow all diagnostics for distinct paths to be applied
# at once but never again for future paths
for d in diagnostics:
if d['DiagnosticMessage']['FilePath'] not in already_seen:
all_diagnostics.append(d)
# in the future assume these files were already processed
for d in diagnostics:
already_seen.add(d['DiagnosticMessage']['FilePath'])
if all_diagnostics:
with open(self.fixes_file, "w") as out:
yaml.safe_dump(
{"MainSourceFile": "", "Diagnostics": all_diagnostics}, out
)
else:
open(self.fixes_file, "w").close()
logging.info(
"Cleaning up directory: %r", self.fixes_temporary_file_dir.name
)
self.fixes_temporary_file_dir.cleanup()
self.fixes_temporary_file_dir = None
def ExportFixesTo(self, f):
# use absolute path since running things will change working directories
self.fixes_file = os.path.abspath(f)
self.fixes_temporary_file_dir = tempfile.TemporaryDirectory(
prefix="tidy-", suffix="-fixes"
)
logging.info(
"Storing temporary fix files into %s", self.fixes_temporary_file_dir.name
)
for idx, e in enumerate(self.entries):
e.ExportFixesTo(
os.path.join(
self.fixes_temporary_file_dir.name, "fixes%d.yaml" % (
idx + 1,)
)
)
def SetChecks(self, checks: str):
for e in self.entries:
e.SetChecks(checks)
def FilterEntries(self, f):
for e in self.entries:
if not f(e):
logging.info("Skipping %s in %s", e.file, e.directory)
self.entries = [e for e in self.entries if f(e)]
def CheckThread(self, task_queue):
while True:
entry = task_queue.get()
status = entry.Check()
if status.ok:
self.state.Success()
else:
self.state.Failure(status.path)
task_queue.task_done()
def Check(self):
count = multiprocessing.cpu_count()
task_queue = queue.Queue(count)
for _ in range(count):
t = threading.Thread(target=self.CheckThread, args=(task_queue,))
t.daemon = True
t.start()
for e in self.entries:
task_queue.put(e)
task_queue.join()
logging.info("Successfully processed %d paths", self.state.successes)
logging.info("Failed to process %d paths", self.state.failures)
if self.state.failures:
for name in self.state.failed_files:
logging.warning("Failure reported for %s", name)
return self.state.failures == 0
# Supported log levels, mapping string values required for argument
# parsing into logging constants
__LOG_LEVELS__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARN,
"fatal": logging.FATAL,
}
@click.group(chain=True)
@click.option(
"--compile-database",
default=[],
multiple=True,
help="Path to `compile_commands.json` to use for executing clang-tidy.",
)
@click.option(
"--file-include-regex",
default="/(src|examples)/",
help="regular expression to apply to the file paths for running.",
)
@click.option(
"--file-exclude-regex",
# NOTE: if trying '/third_party/' note that a lot of sources are routed through
# paths like `../../examples/chip-tool/third_party/connectedhomeip/src/`
default="/(repo|zzz_generated)/",
help="Regular expression to apply to the file paths for running. Skip overrides includes.",
)
@click.option(
"--log-level",
default="INFO",
type=click.Choice(__LOG_LEVELS__.keys(), case_sensitive=False),
help="Determines the verbosity of script output.",
)
@click.option(
"--no-log-timestamps",
default=False,
is_flag=True,
help="Skip timestaps in log output",
)
@click.option(
"--export-fixes",
default=None,
type=click.Path(),
help="Where to export fixes to apply.",
)
@click.option(
"--checks",
default=None,
type=str,
help="Checks to run (passed in to clang-tidy). If not set the .clang-tidy file is used.",
)
@click.pass_context
def main(
context,
compile_database,
file_include_regex,
file_exclude_regex,
log_level,
no_log_timestamps,
export_fixes,
checks,
):
log_fmt = "%(asctime)s %(levelname)-7s %(message)s"
if no_log_timestamps:
log_fmt = "%(levelname)-7s %(message)s"
coloredlogs.install(level=__LOG_LEVELS__[log_level], fmt=log_fmt)
if not compile_database:
logging.warning(
"Compilation database file not provided. Searching for first item in ./out"
)
compile_database = next(
glob.iglob("./out/**/compile_commands.json", recursive=True)
)
if not compile_database:
raise Exception("Could not find `compile_commands.json` in ./out")
logging.info("Will use %s for compile", compile_database)
context.obj = runner = ClangTidyRunner()
@context.call_on_close
def cleanup():
runner.Cleanup()
for name in compile_database:
runner.AddDatabase(name)
if file_include_regex:
r = re.compile(file_include_regex)
runner.FilterEntries(lambda e: r.search(e.file))
if file_exclude_regex:
r = re.compile(file_exclude_regex)
runner.FilterEntries(lambda e: not r.search(e.file))
if export_fixes:
runner.ExportFixesTo(export_fixes)
if checks:
runner.SetChecks(checks)
for e in context.obj.entries:
logging.info("Will tidy %s", e.full_path)
@main.command("check", help="Run clang-tidy check")
@click.pass_context
def cmd_check(context):
if not context.obj.Check():
sys.exit(1)
@main.command("fix", help="Run check followd by fix")
@click.pass_context
def cmd_fix(context):
runner = context.obj
with tempfile.TemporaryDirectory(prefix="tidy-apply-fixes") as tmpdir:
if not runner.fixes_file:
runner.ExportFixesTo(os.path.join(tmpdir, "fixes.tmp"))
runner.Check()
runner.Cleanup()
if runner.state.failures:
fixes_yaml = os.path.join(tmpdir, "fixes.yaml")
with open(fixes_yaml, "w") as out:
out.write(open(runner.fixes_file, "r").read())
logging.info("Applying fixes in %s", tmpdir)
subprocess.check_call(["clang-apply-replacements", tmpdir])
else:
logging.info("No failures detected, no fixes to apply.")
if __name__ == "__main__":
main()
|
the-stack_106_14603
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test HTTP support.
"""
import base64
import calendar
import random
import hamcrest
from urllib.parse import urlparse, urlunsplit, parse_qs
from urllib.parse import clear_cache # type: ignore[attr-defined]
from unittest import skipIf
from typing import Sequence, Union
from io import BytesIO
from itertools import cycle
from zope.interface import (
provider,
directlyProvides,
providedBy,
)
from zope.interface.verify import verifyObject
from twisted.python.compat import (iterbytes, long, networkString,
unicode, intToBytes)
from twisted.python.components import proxyForInterface
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.web import http, http_headers, iweb
from twisted.web.http import PotentialDataLoss, _DataLoss
from twisted.web.http import _IdentityTransferDecoder
from twisted.internet import address
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionLost, ConnectionDone
from twisted.protocols import loopback
from twisted.test.proto_helpers import (StringTransport, NonStreamingProducer,
EventLoggingObserver)
from twisted.test.test_internet import DummyProducer
from twisted.web.test.requesthelper import (
DummyChannel,
bytesLinearWhitespaceComponents,
sanitizedBytes,
textLinearWhitespaceComponents,
)
from twisted.logger import globalLogPublisher
from ._util import (
assertIsFilesystemTemporary,
)
class _IDeprecatedHTTPChannelToRequestInterfaceProxy(proxyForInterface( # type: ignore[misc] # noqa
http._IDeprecatedHTTPChannelToRequestInterface)):
"""
Proxy L{_IDeprecatedHTTPChannelToRequestInterface}. Used to
assert that the interface matches what L{HTTPChannel} expects.
"""
def _makeRequestProxyFactory(clsToWrap):
"""
Return a callable that proxies instances of C{clsToWrap} via
L{_IDeprecatedHTTPChannelToRequestInterface}.
@param clsToWrap: The class whose instances will be proxied.
@type cls: L{_IDeprecatedHTTPChannelToRequestInterface}
implementer.
@return: A factory that returns
L{_IDeprecatedHTTPChannelToRequestInterface} proxies.
@rtype: L{callable} whose interface matches C{clsToWrap}'s constructor.
"""
def _makeRequestProxy(*args, **kwargs):
instance = clsToWrap(*args, **kwargs)
return _IDeprecatedHTTPChannelToRequestInterfaceProxy(instance)
# For INonQueuedRequestFactory
directlyProvides(_makeRequestProxy, providedBy(clsToWrap))
return _makeRequestProxy
class DummyPullProducerHandler(http.Request):
"""
An HTTP request handler that registers a dummy pull producer to serve the
body.
The owner must call C{finish} to complete the response.
"""
def process(self):
self._actualProducer = NonStreamingProducer(self)
self.setResponseCode(200)
self.registerProducer(self._actualProducer, False)
DummyPullProducerHandlerProxy = _makeRequestProxyFactory(
DummyPullProducerHandler)
class DateTimeTests(unittest.TestCase):
"""Test date parsing functions."""
def testRoundtrip(self):
for i in range(10000):
time = random.randint(0, 2000000000)
timestr = http.datetimeToString(time)
time2 = http.stringToDatetime(timestr)
self.assertEqual(time, time2)
def testStringToDatetime(self):
dateStrings = [
b"Sun, 06 Nov 1994 08:49:37 GMT",
b"06 Nov 1994 08:49:37 GMT",
b"Sunday, 06-Nov-94 08:49:37 GMT",
b"06-Nov-94 08:49:37 GMT",
b"Sunday, 06-Nov-1994 08:49:37 GMT",
b"06-Nov-1994 08:49:37 GMT",
b"Sun Nov 6 08:49:37 1994",
b"Nov 6 08:49:37 1994",
]
dateInt = calendar.timegm((1994, 11, 6, 8, 49, 37, 6, 6, 0))
for dateString in dateStrings:
self.assertEqual(http.stringToDatetime(dateString), dateInt)
self.assertEqual(
http.stringToDatetime(b"Thursday, 29-Sep-16 17:15:29 GMT"),
calendar.timegm((2016, 9, 29, 17, 15, 29, 3, 273, 0)))
class DummyHTTPHandler(http.Request):
def process(self):
self.content.seek(0, 0)
data = self.content.read()
length = self.getHeader(b'content-length')
if length is None:
length = networkString(str(length))
request = b"'''\n" + length + b"\n" + data + b"'''\n"
self.setResponseCode(200)
self.setHeader(b"Request", self.uri)
self.setHeader(b"Command", self.method)
self.setHeader(b"Version", self.clientproto)
self.setHeader(b"Content-Length", intToBytes(len(request)))
self.write(request)
self.finish()
DummyHTTPHandlerProxy = _makeRequestProxyFactory(DummyHTTPHandler)
@provider(iweb.INonQueuedRequestFactory)
class DummyNewHTTPHandler(DummyHTTPHandler):
"""
This is exactly like the DummyHTTPHandler but it takes only one argument
in its constructor, with no default arguments. This exists to test an
alternative code path in L{HTTPChannel}.
"""
def __init__(self, channel):
DummyHTTPHandler.__init__(self, channel)
DummyNewHTTPHandlerProxy = _makeRequestProxyFactory(DummyNewHTTPHandler)
class DelayedHTTPHandler(DummyHTTPHandler):
"""
Like L{DummyHTTPHandler}, but doesn't respond immediately.
"""
def process(self):
pass
def delayedProcess(self):
DummyHTTPHandler.process(self)
DelayedHTTPHandlerProxy = _makeRequestProxyFactory(DelayedHTTPHandler)
class LoopbackHTTPClient(http.HTTPClient):
def connectionMade(self):
self.sendCommand(b"GET", b"/foo/bar")
self.sendHeader(b"Content-Length", 10)
self.endHeaders()
self.transport.write(b"0123456789")
def parametrizeTimeoutMixin(protocol, reactor):
"""
Parametrizes the L{TimeoutMixin} so that it works with whatever reactor is
being used by the test.
@param protocol: A L{_GenericHTTPChannel} or something implementing a
similar interface.
@type protocol: L{_GenericHTTPChannel}
@param reactor: An L{IReactorTime} implementation.
@type reactor: L{IReactorTime}
@return: The C{channel}, with its C{callLater} method patched.
"""
# This is a terrible violation of the abstraction later of
# _genericHTTPChannelProtocol, but we need to do it because
# policies.TimeoutMixin doesn't accept a reactor on the object.
# See https://twistedmatrix.com/trac/ticket/8488
protocol._channel.callLater = reactor.callLater
return protocol
class ResponseTestMixin:
"""
A mixin that provides a simple means of comparing an actual response string
to an expected response string by performing the minimal parsing.
"""
def assertResponseEquals(self, responses, expected):
"""
Assert that the C{responses} matches the C{expected} responses.
@type responses: C{bytes}
@param responses: The bytes sent in response to one or more requests.
@type expected: C{list} of C{tuple} of C{bytes}
@param expected: The expected values for the responses. Each tuple
element of the list represents one response. Each byte string
element of the tuple is a full header line without delimiter, except
for the last element which gives the full response body.
"""
for response in expected:
expectedHeaders, expectedContent = response[:-1], response[-1]
# Intentionally avoid mutating the inputs here.
expectedStatus = expectedHeaders[0]
expectedHeaders = expectedHeaders[1:]
headers, rest = responses.split(b'\r\n\r\n', 1)
headers = headers.splitlines()
status = headers.pop(0)
self.assertEqual(expectedStatus, status)
self.assertEqual(set(headers), set(expectedHeaders))
content = rest[:len(expectedContent)]
responses = rest[len(expectedContent):]
self.assertEqual(content, expectedContent)
class HTTP1_0Tests(unittest.TestCase, ResponseTestMixin):
requests = (
b"GET / HTTP/1.0\r\n"
b"\r\n"
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.0 200 OK",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.0",
b"Content-Length: 13",
b"'''\nNone\n'''\n")] # type: Union[Sequence[Sequence[bytes]], bytes]
def test_buffer(self):
"""
Send requests over a channel and check responses match what is expected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandlerProxy
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all one"))
value = b.value()
self.assertResponseEquals(value, self.expected_response)
def test_requestBodyTimeout(self):
"""
L{HTTPChannel} resets its timeout whenever data from a request body is
delivered to it.
"""
clock = Clock()
transport = StringTransport()
protocol = http.HTTPChannel()
protocol.timeOut = 100
protocol.callLater = clock.callLater
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived(b'x')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived(b'x')
self.assertEqual(len(protocol.requests), 1)
def test_requestBodyDefaultTimeout(self):
"""
L{HTTPChannel}'s default timeout is 60 seconds.
"""
clock = Clock()
transport = StringTransport()
factory = http.HTTPFactory()
protocol = factory.buildProtocol(None)
protocol = parametrizeTimeoutMixin(protocol, clock)
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(59)
self.assertFalse(transport.disconnecting)
clock.advance(1)
self.assertTrue(transport.disconnecting)
def test_transportForciblyClosed(self):
"""
If a timed out transport doesn't close after 15 seconds, the
L{HTTPChannel} will forcibly close it.
"""
logObserver = EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
clock = Clock()
transport = StringTransport()
factory = http.HTTPFactory()
protocol = factory.buildProtocol(None)
protocol = parametrizeTimeoutMixin(protocol, clock)
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
self.assertFalse(transport.disconnecting)
self.assertFalse(transport.disconnected)
# Force the initial timeout.
clock.advance(60)
self.assertTrue(transport.disconnecting)
self.assertFalse(transport.disconnected)
self.assertEquals(1, len(logObserver))
event = logObserver[0]
self.assertIn("Timing out client: {peer}", event["log_format"])
# Watch the transport get force-closed.
clock.advance(14)
self.assertTrue(transport.disconnecting)
self.assertFalse(transport.disconnected)
clock.advance(1)
self.assertTrue(transport.disconnecting)
self.assertTrue(transport.disconnected)
self.assertEquals(2, len(logObserver))
event = logObserver[1]
self.assertEquals(
"Forcibly timing out client: {peer}",
event["log_format"]
)
def test_transportNotAbortedAfterConnectionLost(self):
"""
If a timed out transport ends up calling C{connectionLost}, it prevents
the force-closure of the transport.
"""
clock = Clock()
transport = StringTransport()
factory = http.HTTPFactory()
protocol = factory.buildProtocol(None)
protocol = parametrizeTimeoutMixin(protocol, clock)
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
self.assertFalse(transport.disconnecting)
self.assertFalse(transport.disconnected)
# Force the initial timeout.
clock.advance(60)
self.assertTrue(transport.disconnecting)
self.assertFalse(transport.disconnected)
# Move forward nearly to the timeout, then fire connectionLost.
clock.advance(14)
protocol.connectionLost(None)
# Check that the transport isn't forcibly closed.
clock.advance(1)
self.assertTrue(transport.disconnecting)
self.assertFalse(transport.disconnected)
def test_transportNotAbortedWithZeroAbortTimeout(self):
"""
If the L{HTTPChannel} has its c{abortTimeout} set to L{None}, it never
aborts.
"""
clock = Clock()
transport = StringTransport()
factory = http.HTTPFactory()
protocol = factory.buildProtocol(None)
protocol._channel.abortTimeout = None
protocol = parametrizeTimeoutMixin(protocol, clock)
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
self.assertFalse(transport.disconnecting)
self.assertFalse(transport.disconnected)
# Force the initial timeout.
clock.advance(60)
self.assertTrue(transport.disconnecting)
self.assertFalse(transport.disconnected)
# Move an absurdly long way just to prove the point.
clock.advance(2**32)
self.assertTrue(transport.disconnecting)
self.assertFalse(transport.disconnected)
def test_connectionLostAfterForceClose(self):
"""
If a timed out transport doesn't close after 15 seconds, the
L{HTTPChannel} will forcibly close it.
"""
clock = Clock()
transport = StringTransport()
factory = http.HTTPFactory()
protocol = factory.buildProtocol(None)
protocol = parametrizeTimeoutMixin(protocol, clock)
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
self.assertFalse(transport.disconnecting)
self.assertFalse(transport.disconnected)
# Force the initial timeout and the follow-on forced closure.
clock.advance(60)
clock.advance(15)
self.assertTrue(transport.disconnecting)
self.assertTrue(transport.disconnected)
# Now call connectionLost on the protocol. This is done by some
# transports, including TCP and TLS. We don't have anything we can
# assert on here: this just must not explode.
protocol.connectionLost(ConnectionDone)
def test_noPipeliningApi(self):
"""
Test that a L{http.Request} subclass with no queued kwarg works as
expected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandlerProxy
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
value = b.value()
self.assertResponseEquals(value, self.expected_response)
def test_noPipelining(self):
"""
Test that pipelined requests get buffered, not processed in parallel.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DelayedHTTPHandlerProxy
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
value = b.value()
# So far only one request should have been dispatched.
self.assertEqual(value, b'')
self.assertEqual(1, len(a.requests))
# Now, process each request one at a time.
while a.requests:
self.assertEqual(1, len(a.requests))
request = a.requests[0].original
request.delayedProcess()
value = b.value()
self.assertResponseEquals(value, self.expected_response)
class HTTP1_1Tests(HTTP1_0Tests):
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"\r\n"
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789HEAD / HTTP/1.1\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"'''\nNone\n'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: HEAD",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"")]
class HTTP1_1_close_Tests(HTTP1_0Tests):
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"Connection: close\r\n"
b"\r\n"
b"GET / HTTP/1.0\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.1 200 OK",
b"Connection: close",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"'''\nNone\n'''\n")]
class HTTP0_9Tests(HTTP1_0Tests):
requests = (
b"GET /\r\n")
expected_response = b"HTTP/1.1 400 Bad Request\r\n\r\n"
def assertResponseEquals(self, response, expectedResponse):
self.assertEqual(response, expectedResponse)
def test_noPipelining(self):
raise unittest.SkipTest("HTTP/0.9 not supported")
class PipeliningBodyTests(unittest.TestCase, ResponseTestMixin):
"""
Tests that multiple pipelined requests with bodies are correctly buffered.
"""
requests = (
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789"
)
expectedResponses = [
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n")]
def test_noPipelining(self):
"""
Test that pipelined requests get buffered, not processed in parallel.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DelayedHTTPHandlerProxy
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
value = b.value()
# So far only one request should have been dispatched.
self.assertEqual(value, b'')
self.assertEqual(1, len(a.requests))
# Now, process each request one at a time.
while a.requests:
self.assertEqual(1, len(a.requests))
request = a.requests[0].original
request.delayedProcess()
value = b.value()
self.assertResponseEquals(value, self.expectedResponses)
def test_pipeliningReadLimit(self):
"""
When pipelined requests are received, we will optimistically continue
receiving data up to a specified limit, then pause the transport.
@see: L{http.HTTPChannel._optimisticEagerReadSize}
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DelayedHTTPHandlerProxy
a.makeConnection(b)
underLimit = a._optimisticEagerReadSize // len(self.requests)
for x in range(1, underLimit + 1):
a.dataReceived(self.requests)
self.assertEqual(b.producerState, 'producing',
'state was {state!r} after {x} iterations'
.format(state=b.producerState, x=x))
a.dataReceived(self.requests)
self.assertEquals(b.producerState, 'paused')
class ShutdownTests(unittest.TestCase):
"""
Tests that connections can be shut down by L{http.Request} objects.
"""
class ShutdownHTTPHandler(http.Request):
"""
A HTTP handler that just immediately calls loseConnection.
"""
def process(self):
self.loseConnection()
request = (
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789"
)
def test_losingConnection(self):
"""
Calling L{http.Request.loseConnection} causes the transport to be
disconnected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = _makeRequestProxyFactory(self.ShutdownHTTPHandler)
a.makeConnection(b)
a.dataReceived(self.request)
# The transport should have been shut down.
self.assertTrue(b.disconnecting)
# No response should have been written.
value = b.value()
self.assertEqual(value, b'')
class SecurityTests(unittest.TestCase):
"""
Tests that L{http.Request.isSecure} correctly takes the transport into
account.
"""
def test_isSecure(self):
"""
Calling L{http.Request.isSecure} when the channel is backed with a
secure transport will return L{True}.
"""
b = DummyChannel.SSL()
a = http.HTTPChannel()
a.makeConnection(b)
req = http.Request(a)
self.assertTrue(req.isSecure())
def test_notSecure(self):
"""
Calling L{http.Request.isSecure} when the channel is not backed with a
secure transport will return L{False}.
"""
b = DummyChannel.TCP()
a = http.HTTPChannel()
a.makeConnection(b)
req = http.Request(a)
self.assertFalse(req.isSecure())
def test_notSecureAfterFinish(self):
"""
After a request is finished, calling L{http.Request.isSecure} will
always return L{False}.
"""
b = DummyChannel.SSL()
a = http.HTTPChannel()
a.makeConnection(b)
req = http.Request(a)
a.requests.append(req)
req.setResponseCode(200)
req.finish()
self.assertFalse(req.isSecure())
class GenericHTTPChannelTests(unittest.TestCase):
"""
Tests for L{http._genericHTTPChannelProtocol}, a L{HTTPChannel}-alike which
can handle different HTTP protocol channels.
"""
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"Connection: close\r\n"
b"\r\n"
b"GET / HTTP/1.0\r\n"
b"\r\n")
def _negotiatedProtocolForTransportInstance(self, t):
"""
Run a request using the specific instance of a transport. Returns the
negotiated protocol string.
"""
a = http._genericHTTPChannelProtocolFactory(b'')
a.requestFactory = DummyHTTPHandlerProxy
a.makeConnection(t)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
return a._negotiatedProtocol
@skipIf(not http.H2_ENABLED, "HTTP/2 support not present")
def test_h2CancelsH11Timeout(self):
"""
When the transport is switched to H2, the HTTPChannel timeouts are
cancelled.
"""
clock = Clock()
a = http._genericHTTPChannelProtocolFactory(b'')
a.requestFactory = DummyHTTPHandlerProxy
# Set the original timeout to be 100s
a.timeOut = 100
a.callLater = clock.callLater
b = StringTransport()
b.negotiatedProtocol = b'h2'
a.makeConnection(b)
# We've made the connection, but we actually check if we've negotiated
# H2 when data arrives. Right now, the HTTPChannel will have set up a
# single delayed call.
hamcrest.assert_that(
clock.getDelayedCalls(),
hamcrest.contains(
hamcrest.has_property(
"cancelled",
hamcrest.equal_to(False),
),
),
)
h11Timeout = clock.getDelayedCalls()[0]
# We give it the HTTP data, and it switches out for H2.
a.dataReceived(b'')
self.assertEqual(a._negotiatedProtocol, b'h2')
# The first delayed call is cancelled, and H2 creates a new one for its
# own timeouts.
self.assertTrue(h11Timeout.cancelled)
hamcrest.assert_that(
clock.getDelayedCalls(),
hamcrest.contains(
hamcrest.has_property(
"cancelled",
hamcrest.equal_to(False),
),
),
)
def test_protocolUnspecified(self):
"""
If the transport has no support for protocol negotiation (no
negotiatedProtocol attribute), HTTP/1.1 is assumed.
"""
b = StringTransport()
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'http/1.1')
def test_protocolNone(self):
"""
If the transport has no support for protocol negotiation (returns None
for negotiatedProtocol), HTTP/1.1 is assumed.
"""
b = StringTransport()
b.negotiatedProtocol = None
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'http/1.1')
def test_http11(self):
"""
If the transport reports that HTTP/1.1 is negotiated, that's what's
negotiated.
"""
b = StringTransport()
b.negotiatedProtocol = b'http/1.1'
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'http/1.1')
@skipIf(not http.H2_ENABLED, "HTTP/2 support not present")
def test_http2_present(self):
"""
If the transport reports that HTTP/2 is negotiated and HTTP/2 is
present, that's what's negotiated.
"""
b = StringTransport()
b.negotiatedProtocol = b'h2'
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'h2')
@skipIf(http.H2_ENABLED, "HTTP/2 support present")
def test_http2_absent(self):
"""
If the transport reports that HTTP/2 is negotiated and HTTP/2 is not
present, an error is encountered.
"""
b = StringTransport()
b.negotiatedProtocol = b'h2'
self.assertRaises(
ValueError,
self._negotiatedProtocolForTransportInstance,
b,
)
def test_unknownProtocol(self):
"""
If the transport reports that a protocol other than HTTP/1.1 or HTTP/2
is negotiated, an error occurs.
"""
b = StringTransport()
b.negotiatedProtocol = b'smtp'
self.assertRaises(
AssertionError,
self._negotiatedProtocolForTransportInstance,
b,
)
def test_factory(self):
"""
The C{factory} attribute is taken from the inner channel.
"""
a = http._genericHTTPChannelProtocolFactory(b'')
a._channel.factory = b"Foo"
self.assertEqual(a.factory, b"Foo")
def test_GenericHTTPChannelPropagatesCallLater(self):
"""
If C{callLater} is patched onto the L{http._GenericHTTPChannelProtocol}
then we need to propagate it through to the backing channel.
"""
clock = Clock()
factory = http.HTTPFactory(reactor=clock)
protocol = factory.buildProtocol(None)
self.assertEqual(protocol.callLater, clock.callLater)
self.assertEqual(protocol._channel.callLater, clock.callLater)
@skipIf(not http.H2_ENABLED, "HTTP/2 support not present")
def test_genericHTTPChannelCallLaterUpgrade(self):
"""
If C{callLater} is patched onto the L{http._GenericHTTPChannelProtocol}
then we need to propagate it across onto a new backing channel after
upgrade.
"""
clock = Clock()
factory = http.HTTPFactory(reactor=clock)
protocol = factory.buildProtocol(None)
self.assertEqual(protocol.callLater, clock.callLater)
self.assertEqual(protocol._channel.callLater, clock.callLater)
transport = StringTransport()
transport.negotiatedProtocol = b'h2'
protocol.requestFactory = DummyHTTPHandler
protocol.makeConnection(transport)
# Send a byte to make it think the handshake is done.
protocol.dataReceived(b'P')
self.assertEqual(protocol.callLater, clock.callLater)
self.assertEqual(protocol._channel.callLater, clock.callLater)
@skipIf(not http.H2_ENABLED, "HTTP/2 support not present")
def test_unregistersProducer(self):
"""
The L{_GenericHTTPChannelProtocol} will unregister its proxy channel
from the transport if upgrade is negotiated.
"""
transport = StringTransport()
transport.negotiatedProtocol = b'h2'
genericProtocol = http._genericHTTPChannelProtocolFactory(b'')
genericProtocol.requestFactory = DummyHTTPHandlerProxy
genericProtocol.makeConnection(transport)
originalChannel = genericProtocol._channel
# We expect the transport has a underlying channel registered as
# a producer.
self.assertIs(transport.producer, originalChannel)
# Force the upgrade.
genericProtocol.dataReceived(b'P')
# The transport should not have the original channel as its
# producer...
self.assertIsNot(transport.producer, originalChannel)
# ...it should have the new H2 channel as its producer
self.assertIs(transport.producer, genericProtocol._channel)
class HTTPLoopbackTests(unittest.TestCase):
expectedHeaders = {b'request': b'/foo/bar',
b'command': b'GET',
b'version': b'HTTP/1.0',
b'content-length': b'21'}
numHeaders = 0
gotStatus = 0
gotResponse = 0
gotEndHeaders = 0
def _handleStatus(self, version, status, message):
self.gotStatus = 1
self.assertEqual(version, b"HTTP/1.0")
self.assertEqual(status, b"200")
def _handleResponse(self, data):
self.gotResponse = 1
self.assertEqual(data, b"'''\n10\n0123456789'''\n")
def _handleHeader(self, key, value):
self.numHeaders = self.numHeaders + 1
self.assertEqual(self.expectedHeaders[key.lower()], value)
def _handleEndHeaders(self):
self.gotEndHeaders = 1
self.assertEqual(self.numHeaders, 4)
def testLoopback(self):
server = http.HTTPChannel()
server.requestFactory = DummyHTTPHandlerProxy
client = LoopbackHTTPClient()
client.handleResponse = self._handleResponse
client.handleHeader = self._handleHeader
client.handleEndHeaders = self._handleEndHeaders
client.handleStatus = self._handleStatus
d = loopback.loopbackAsync(server, client)
d.addCallback(self._cbTestLoopback)
return d
def _cbTestLoopback(self, ignored):
if not (self.gotStatus and self.gotResponse and self.gotEndHeaders):
raise RuntimeError(
"didn't get all callbacks {}".format(
[self.gotStatus, self.gotResponse, self.gotEndHeaders],
)
)
del self.gotEndHeaders
del self.gotResponse
del self.gotStatus
del self.numHeaders
def _prequest(**headers):
"""
Make a request with the given request headers for the persistence tests.
"""
request = http.Request(DummyChannel(), False)
for headerName, v in headers.items():
request.requestHeaders.setRawHeaders(networkString(headerName), v)
return request
class PersistenceTests(unittest.TestCase):
"""
Tests for persistent HTTP connections.
"""
def setUp(self):
self.channel = http.HTTPChannel()
self.request = _prequest()
def test_http09(self):
"""
After being used for an I{HTTP/0.9} request, the L{HTTPChannel} is not
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/0.9")
self.assertFalse(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http10(self):
"""
After being used for an I{HTTP/1.0} request, the L{HTTPChannel} is not
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/1.0")
self.assertFalse(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http11(self):
"""
After being used for an I{HTTP/1.1} request, the L{HTTPChannel} is
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/1.1")
self.assertTrue(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http11Close(self):
"""
After being used for an I{HTTP/1.1} request with a I{Connection: Close}
header, the L{HTTPChannel} is not persistent.
"""
request = _prequest(connection=[b"close"])
persist = self.channel.checkPersistence(request, b"HTTP/1.1")
self.assertFalse(persist)
self.assertEqual(
[(b"Connection", [b"close"])],
list(request.responseHeaders.getAllRawHeaders()))
class IdentityTransferEncodingTests(TestCase):
"""
Tests for L{_IdentityTransferDecoder}.
"""
def setUp(self):
"""
Create an L{_IdentityTransferDecoder} with callbacks hooked up so that
calls to them can be inspected.
"""
self.data = []
self.finish = []
self.contentLength = 10
self.decoder = _IdentityTransferDecoder(
self.contentLength, self.data.append, self.finish.append)
def test_exactAmountReceived(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a byte string
with length equal to the content length passed to
L{_IdentityTransferDecoder}'s initializer, the data callback is invoked
with that string and the finish callback is invoked with a zero-length
string.
"""
self.decoder.dataReceived(b'x' * self.contentLength)
self.assertEqual(self.data, [b'x' * self.contentLength])
self.assertEqual(self.finish, [b''])
def test_shortStrings(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called multiple times
with byte strings which, when concatenated, are as long as the content
length provided, the data callback is invoked with each string and the
finish callback is invoked only after the second call.
"""
self.decoder.dataReceived(b'x')
self.assertEqual(self.data, [b'x'])
self.assertEqual(self.finish, [])
self.decoder.dataReceived(b'y' * (self.contentLength - 1))
self.assertEqual(self.data, [b'x', b'y' * (self.contentLength - 1)])
self.assertEqual(self.finish, [b''])
def test_longString(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a byte string
with length greater than the provided content length, only the prefix
of that string up to the content length is passed to the data callback
and the remainder is passed to the finish callback.
"""
self.decoder.dataReceived(b'x' * self.contentLength + b'y')
self.assertEqual(self.data, [b'x' * self.contentLength])
self.assertEqual(self.finish, [b'y'])
def test_rejectDataAfterFinished(self):
"""
If data is passed to L{_IdentityTransferDecoder.dataReceived} after the
finish callback has been invoked, C{RuntimeError} is raised.
"""
failures = []
def finish(bytes):
try:
decoder.dataReceived(b'foo')
except:
failures.append(Failure())
decoder = _IdentityTransferDecoder(5, self.data.append, finish)
decoder.dataReceived(b'x' * 4)
self.assertEqual(failures, [])
decoder.dataReceived(b'y')
failures[0].trap(RuntimeError)
self.assertEqual(
str(failures[0].value),
"_IdentityTransferDecoder cannot decode data after finishing")
def test_unknownContentLength(self):
"""
If L{_IdentityTransferDecoder} is constructed with L{None} for the
content length, it passes all data delivered to it through to the data
callback.
"""
data = []
finish = []
decoder = _IdentityTransferDecoder(None, data.append, finish.append)
decoder.dataReceived(b'x')
self.assertEqual(data, [b'x'])
decoder.dataReceived(b'y')
self.assertEqual(data, [b'x', b'y'])
self.assertEqual(finish, [])
def _verifyCallbacksUnreferenced(self, decoder):
"""
Check the decoder's data and finish callbacks and make sure they are
None in order to help avoid references cycles.
"""
self.assertIdentical(decoder.dataCallback, None)
self.assertIdentical(decoder.finishCallback, None)
def test_earlyConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the content length is known but not enough bytes have been
delivered.
"""
self.decoder.dataReceived(b'x' * (self.contentLength - 1))
self.assertRaises(_DataLoss, self.decoder.noMoreData)
self._verifyCallbacksUnreferenced(self.decoder)
def test_unknownContentLengthConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} calls the finish callback and
raises L{PotentialDataLoss} if it is called and the content length is
unknown.
"""
body = []
finished = []
decoder = _IdentityTransferDecoder(None, body.append, finished.append)
self.assertRaises(PotentialDataLoss, decoder.noMoreData)
self.assertEqual(body, [])
self.assertEqual(finished, [b''])
self._verifyCallbacksUnreferenced(decoder)
def test_finishedConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} does not raise any exception if
it is called when the content length is known and that many bytes have
been delivered.
"""
self.decoder.dataReceived(b'x' * self.contentLength)
self.decoder.noMoreData()
self._verifyCallbacksUnreferenced(self.decoder)
class ChunkedTransferEncodingTests(unittest.TestCase):
"""
Tests for L{_ChunkedTransferDecoder}, which turns a byte stream encoded
using HTTP I{chunked} C{Transfer-Encoding} back into the original byte
stream.
"""
def test_decoding(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunked-encoded data
and passes the result to the specified callback.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'3\r\nabc\r\n5\r\n12345\r\n')
p.dataReceived(b'a\r\n0123456789\r\n')
self.assertEqual(L, [b'abc', b'12345', b'0123456789'])
def test_short(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunks broken up and
delivered in multiple calls.
"""
L = []
finished = []
p = http._ChunkedTransferDecoder(L.append, finished.append)
for s in iterbytes(b'3\r\nabc\r\n5\r\n12345\r\n0\r\n\r\n'):
p.dataReceived(s)
self.assertEqual(L, [b'a', b'b', b'c', b'1', b'2', b'3', b'4', b'5'])
self.assertEqual(finished, [b''])
def test_newlines(self):
"""
L{_ChunkedTransferDecoder.dataReceived} doesn't treat CR LF pairs
embedded in chunk bodies specially.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'2\r\n\r\n\r\n')
self.assertEqual(L, [b'\r\n'])
def test_extensions(self):
"""
L{_ChunkedTransferDecoder.dataReceived} disregards chunk-extension
fields.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'3; x-foo=bar\r\nabc\r\n')
self.assertEqual(L, [b'abc'])
def test_finish(self):
"""
L{_ChunkedTransferDecoder.dataReceived} interprets a zero-length
chunk as the end of the chunked data stream and calls the completion
callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived(b'0\r\n\r\n')
self.assertEqual(finished, [b''])
def test_extra(self):
"""
L{_ChunkedTransferDecoder.dataReceived} passes any bytes which come
after the terminating zero-length chunk to the completion callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived(b'0\r\n\r\nhello')
self.assertEqual(finished, [b'hello'])
def test_afterFinished(self):
"""
L{_ChunkedTransferDecoder.dataReceived} raises C{RuntimeError} if it
is called after it has seen the last chunk.
"""
p = http._ChunkedTransferDecoder(None, lambda bytes: None)
p.dataReceived(b'0\r\n\r\n')
self.assertRaises(RuntimeError, p.dataReceived, b'hello')
def test_earlyConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the end of the last trailer has not yet been received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived(b'0\r\n\r')
exc = self.assertRaises(_DataLoss, parser.noMoreData)
self.assertEqual(
str(exc),
"Chunked decoder in 'TRAILER' state, still expecting more data "
"to get to 'FINISHED' state.")
def test_finishedConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} does not raise any exception if
it is called after the terminal zero length chunk is received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived(b'0\r\n\r\n')
parser.noMoreData()
def test_reentrantFinishedNoMoreData(self):
"""
L{_ChunkedTransferDecoder.noMoreData} can be called from the finished
callback without raising an exception.
"""
errors = []
successes = []
def finished(extra):
try:
parser.noMoreData()
except:
errors.append(Failure())
else:
successes.append(True)
parser = http._ChunkedTransferDecoder(None, finished)
parser.dataReceived(b'0\r\n\r\n')
self.assertEqual(errors, [])
self.assertEqual(successes, [True])
class ChunkingTests(unittest.TestCase, ResponseTestMixin):
strings = [b"abcv", b"", b"fdfsd423", b"Ffasfas\r\n",
b"523523\n\rfsdf", b"4234"]
def testChunks(self):
for s in self.strings:
chunked = b''.join(http.toChunk(s))
self.assertEqual((s, b''), http.fromChunk(chunked))
self.assertRaises(ValueError, http.fromChunk, b'-5\r\nmalformed!\r\n')
def testConcatenatedChunks(self):
chunked = b''.join([b''.join(http.toChunk(t)) for t in self.strings])
result = []
buffer = b""
for c in iterbytes(chunked):
buffer = buffer + c
try:
data, buffer = http.fromChunk(buffer)
result.append(data)
except ValueError:
pass
self.assertEqual(result, self.strings)
def test_chunkedResponses(self):
"""
Test that the L{HTTPChannel} correctly chunks responses when needed.
"""
trans = StringTransport()
channel = http.HTTPChannel()
channel.makeConnection(trans)
req = http.Request(channel, False)
req.setResponseCode(200)
req.clientproto = b"HTTP/1.1"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
req.write(b'World!')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.1 200 OK",
b"Test: lemur",
b"Transfer-Encoding: chunked",
b"5\r\nHello\r\n6\r\nWorld!\r\n")])
def runChunkedRequest(self, httpRequest, requestFactory=None,
chunkSize=1):
"""
Execute a web request based on plain text content, chunking
the request payload.
This is a stripped-down, chunking version of ParsingTests.runRequest.
"""
channel = http.HTTPChannel()
if requestFactory:
channel.requestFactory = _makeRequestProxyFactory(requestFactory)
httpRequest = httpRequest.replace(b"\n", b"\r\n")
header, body = httpRequest.split(b"\r\n\r\n", 1)
transport = StringTransport()
channel.makeConnection(transport)
channel.dataReceived(header+b"\r\n\r\n")
for pos in range(len(body)//chunkSize+1):
if channel.transport.disconnecting:
break
channel.dataReceived(b"".join(
http.toChunk(body[pos*chunkSize:(pos+1)*chunkSize])))
channel.dataReceived(b"".join(http.toChunk(b"")))
channel.connectionLost(IOError("all done"))
return channel
def test_multipartFormData(self):
"""
Test that chunked uploads are actually processed into args.
This is essentially a copy of ParsingTests.test_multipartFormData,
just with chunking put in.
This fails as of twisted version 18.9.0 because of bug #9678.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.write(b"done")
self.finish()
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Transfer-Encoding: chunked
--AaB03x
Content-Type: text/plain
Content-Disposition: form-data; name="text"
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
channel = self.runChunkedRequest(req, MyRequest, chunkSize=5)
self.assertEqual(channel.transport.value(),
b"HTTP/1.0 200 OK\r\n\r\ndone")
self.assertEqual(len(processed), 1)
self.assertEqual(processed[0].args, {b"text": [b"abasdfg"]})
class ParsingTests(unittest.TestCase):
"""
Tests for protocol parsing in L{HTTPChannel}.
"""
def setUp(self):
self.didRequest = False
def runRequest(self, httpRequest, requestFactory=None, success=True,
channel=None):
"""
Execute a web request based on plain text content.
@param httpRequest: Content for the request which is processed. Each
L{"\n"} will be replaced with L{"\r\n"}.
@type httpRequest: C{bytes}
@param requestFactory: 2-argument callable returning a Request.
@type requestFactory: C{callable}
@param success: Value to compare against I{self.didRequest}.
@type success: C{bool}
@param channel: Channel instance over which the request is processed.
@type channel: L{HTTPChannel}
@return: Returns the channel used for processing the request.
@rtype: L{HTTPChannel}
"""
if not channel:
channel = http.HTTPChannel()
if requestFactory:
channel.requestFactory = _makeRequestProxyFactory(requestFactory)
httpRequest = httpRequest.replace(b"\n", b"\r\n")
transport = StringTransport()
channel.makeConnection(transport)
# one byte at a time, to stress it.
for byte in iterbytes(httpRequest):
if channel.transport.disconnecting:
break
channel.dataReceived(byte)
channel.connectionLost(IOError("all done"))
if success:
self.assertTrue(self.didRequest)
else:
self.assertFalse(self.didRequest)
return channel
def assertRequestRejected(self, requestLines):
"""
Execute a HTTP request and assert that it is rejected with a 400 Bad
Response and disconnection.
@param requestLines: Plain text lines of the request. These lines will
be joined with newlines to form the HTTP request that is processed.
@type requestLines: C{list} of C{bytes}
"""
httpRequest = b"\n".join(requestLines)
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
channel = self.runRequest(httpRequest, MyRequest, success=False)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n",
)
self.assertTrue(channel.transport.disconnecting)
self.assertEqual(processed, [])
def test_invalidNonAsciiMethod(self):
"""
When client sends invalid HTTP method containing
non-ascii characters HTTP 400 'Bad Request' status will be returned.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
badRequestLine = b"GE\xc2\xa9 / HTTP/1.1\r\n\r\n"
channel = self.runRequest(badRequestLine, MyRequest, 0)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
self.assertEqual(processed, [])
def test_basicAuth(self):
"""
L{HTTPChannel} provides username and password information supplied in
an I{Authorization} header to the L{Request} which makes it available
via its C{getUser} and C{getPassword} methods.
"""
requests = []
class Request(http.Request):
def process(self):
self.credentials = (self.getUser(), self.getPassword())
requests.append(self)
for u, p in [(b"foo", b"bar"), (b"hello", b"there:z")]:
s = base64.b64encode(b":".join((u, p)))
f = b"GET / HTTP/1.0\nAuthorization: Basic " + s + b"\n\n"
self.runRequest(f, Request, 0)
req = requests.pop()
self.assertEqual((u, p), req.credentials)
def test_headers(self):
"""
Headers received by L{HTTPChannel} in a request are made available to
the L{Request}.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [
b"GET / HTTP/1.0",
b"Foo: bar",
b"baz: Quux",
b"baz: quux",
b"",
b""]
self.runRequest(b'\n'.join(requestLines), MyRequest, 0)
[request] = processed
self.assertEqual(
request.requestHeaders.getRawHeaders(b'foo'), [b'bar'])
self.assertEqual(
request.requestHeaders.getRawHeaders(b'bAz'), [b'Quux', b'quux'])
def test_headersMultiline(self):
"""
Line folded headers are handled by L{HTTPChannel} by replacing each
fold with a single space by the time they are made available to the
L{Request}. Any leading whitespace in the folded lines of the header
value is preserved.
See RFC 7230 section 3.2.4.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [
b"GET / HTTP/1.0",
b"nospace: ",
b" nospace\t",
b"space:space",
b" space",
b"spaces: spaces",
b" spaces",
b" spaces",
b"tab: t",
b"\ta",
b"\tb",
b"",
b"",
]
self.runRequest(b"\n".join(requestLines), MyRequest, 0)
[request] = processed
# All leading and trailing whitespace is stripped from the
# header-value.
self.assertEqual(
request.requestHeaders.getRawHeaders(b"nospace"),
[b"nospace"],
)
self.assertEqual(
request.requestHeaders.getRawHeaders(b"space"),
[b"space space"],
)
self.assertEqual(
request.requestHeaders.getRawHeaders(b"spaces"),
[b"spaces spaces spaces"],
)
self.assertEqual(
request.requestHeaders.getRawHeaders(b"tab"),
[b"t \ta \tb"],
)
def test_tooManyHeaders(self):
"""
C{HTTPChannel} enforces a limit of C{HTTPChannel.maxHeaders} on the
number of headers received per request.
"""
requestLines = [b"GET / HTTP/1.0"]
for i in range(http.HTTPChannel.maxHeaders + 2):
requestLines.append(networkString("%s: foo" % (i,)))
requestLines.extend([b"", b""])
self.assertRequestRejected(requestLines)
def test_invalidContentLengthHeader(self):
"""
If a I{Content-Length} header with a non-integer value is received,
a 400 (Bad Request) response is sent to the client and the connection
is closed.
"""
self.assertRequestRejected([
b"GET / HTTP/1.0",
b"Content-Length: x",
b"",
b"",
])
def test_invalidHeaderNoColon(self):
"""
If a header without colon is received a 400 (Bad Request) response
is sent to the client and the connection is closed.
"""
self.assertRequestRejected([
b"GET / HTTP/1.0",
b"HeaderName ",
b"",
b"",
])
def test_invalidHeaderOnlyColon(self):
"""
C{HTTPChannel} rejects a request with an empty header name (i.e.
nothing before the colon). It produces a 400 (Bad Request) response is
generated and closes the connection.
"""
self.assertRequestRejected([
b"GET / HTTP/1.0",
b": foo",
b"",
b"",
])
def test_invalidHeaderWhitespaceBeforeColon(self):
"""
C{HTTPChannel} rejects a request containing a header with whitespace
between the header name and colon as requried by RFC 7230 section
3.2.4. A 400 (Bad Request) response is generated and the connection
closed.
"""
self.assertRequestRejected([
b"GET / HTTP/1.0",
b"HeaderName : foo",
b"",
b"",
])
def test_headerLimitPerRequest(self):
"""
C{HTTPChannel} enforces the limit of C{HTTPChannel.maxHeaders} per
request so that headers received in an earlier request do not count
towards the limit when processing a later request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
self.patch(http.HTTPChannel, 'maxHeaders', 1)
requestLines = [
b"GET / HTTP/1.1",
b"Foo: bar",
b"",
b"",
b"GET / HTTP/1.1",
b"Bar: baz",
b"",
b""]
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
[first, second] = processed
self.assertEqual(first.getHeader(b'foo'), b'bar')
self.assertEqual(second.getHeader(b'bar'), b'baz')
self.assertEqual(
channel.transport.value(),
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n')
def test_headersTooBigInitialCommand(self):
"""
Enforces a limit of C{HTTPChannel.totalHeadersSize}
on the size of headers received per request starting from initial
command line.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
channel = http.HTTPChannel()
channel.totalHeadersSize = 10
httpRequest = b'GET /path/longer/than/10 HTTP/1.1\n'
channel = self.runRequest(
httpRequest=httpRequest,
requestFactory=MyRequest,
channel=channel,
success=False
)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_headersTooBigOtherHeaders(self):
"""
Enforces a limit of C{HTTPChannel.totalHeadersSize}
on the size of headers received per request counting first line
and total headers.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
channel = http.HTTPChannel()
channel.totalHeadersSize = 40
httpRequest = (
b'GET /less/than/40 HTTP/1.1\n'
b'Some-Header: less-than-40\n'
)
channel = self.runRequest(
httpRequest=httpRequest,
requestFactory=MyRequest,
channel=channel, success=False
)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_headersTooBigPerRequest(self):
"""
Enforces total size of headers per individual request and counter
is reset at the end of each request.
"""
class SimpleRequest(http.Request):
def process(self):
self.finish()
channel = http.HTTPChannel()
channel.totalHeadersSize = 60
channel.requestFactory = SimpleRequest
httpRequest = (
b'GET / HTTP/1.1\n'
b'Some-Header: total-less-than-60\n'
b'\n'
b'GET / HTTP/1.1\n'
b'Some-Header: less-than-60\n'
b'\n'
)
channel = self.runRequest(
httpRequest=httpRequest, channel=channel, success=False)
self.assertEqual(
channel.transport.value(),
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
)
def testCookies(self):
"""
Test cookies parsing and reading.
"""
httpRequest = b'''\
GET / HTTP/1.0
Cookie: rabbit="eat carrot"; ninja=secret; spam="hey 1=1!"
'''
cookies = {}
testcase = self
class MyRequest(http.Request):
def process(self):
for name in [b'rabbit', b'ninja', b'spam']:
cookies[name] = self.getCookie(name)
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(
cookies, {
b'rabbit': b'"eat carrot"',
b'ninja': b'secret',
b'spam': b'"hey 1=1!"'})
def testGET(self):
httpRequest = b'''\
GET /?key=value&multiple=two+words&multiple=more%20words&empty= HTTP/1.0
'''
method = []
args = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
args.extend([
self.args[b"key"],
self.args[b"empty"],
self.args[b"multiple"]])
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b"GET"])
self.assertEqual(
args, [[b"value"], [b""], [b"two words", b"more words"]])
def test_extraQuestionMark(self):
"""
While only a single '?' is allowed in an URL, several other servers
allow several and pass all after the first through as part of the
query arguments. Test that we emulate this behavior.
"""
httpRequest = b'GET /foo?bar=?&baz=quux HTTP/1.0\n\n'
method = []
path = []
args = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
path.append(self.path)
args.extend([self.args[b'bar'], self.args[b'baz']])
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b'GET'])
self.assertEqual(path, [b'/foo'])
self.assertEqual(args, [[b'?'], [b'quux']])
def test_formPOSTRequest(self):
"""
The request body of a I{POST} request with a I{Content-Type} header
of I{application/x-www-form-urlencoded} is parsed according to that
content type and made available in the C{args} attribute of the
request object. The original bytes of the request may still be read
from the C{content} attribute.
"""
query = 'key=value&multiple=two+words&multiple=more%20words&empty='
httpRequest = networkString('''\
POST / HTTP/1.0
Content-Length: %d
Content-Type: application/x-www-form-urlencoded
%s''' % (len(query), query))
method = []
args = []
content = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
args.extend([
self.args[b'key'], self.args[b'empty'],
self.args[b'multiple']])
content.append(self.content.read())
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b"POST"])
self.assertEqual(
args, [[b"value"], [b""], [b"two words", b"more words"]])
# Reading from the content file-like must produce the entire request
# body.
self.assertEqual(content, [networkString(query)])
def test_multipartProcessingFailure(self):
"""
When the multipart processing fails the client gets a 400 Bad Request.
"""
# The parsing failure is having a UTF-8 boundary -- the spec
# says it must be ASCII.
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=\xe2\x98\x83
Content-Length: 103
--\xe2\x98\x83
Content-Type: text/plain
Content-Length: 999999999999999999999999999999999999999999999999999999999999999
Content-Transfer-Encoding: quoted-printable
abasdfg
--\xe2\x98\x83--
'''
channel = self.runRequest(req, http.Request, success=False)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_multipartFormData(self):
"""
If the request has a Content-Type of C{multipart/form-data}, and the
form data is parseable, the form arguments will be added to the
request's args.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.write(b"done")
self.finish()
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 149
--AaB03x
Content-Type: text/plain
Content-Disposition: form-data; name="text"
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
channel = self.runRequest(req, MyRequest, success=False)
self.assertEqual(channel.transport.value(),
b"HTTP/1.0 200 OK\r\n\r\ndone")
self.assertEqual(len(processed), 1)
self.assertEqual(processed[0].args, {b"text": [b"abasdfg"]})
def test_multipartFileData(self):
"""
If the request has a Content-Type of C{multipart/form-data},
and the form data is parseable and contains files, the file
portions will be added to the request's args.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.write(b"done")
self.finish()
body = b"""-----------------------------738837029596785559389649595
Content-Disposition: form-data; name="uploadedfile"; filename="test"
Content-Type: application/octet-stream
abasdfg
-----------------------------738837029596785559389649595--
"""
req = '''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=---------------------------738837029596785559389649595
Content-Length: ''' + str(len(body.replace(b"\n", b"\r\n"))) + '''
'''
channel = self.runRequest(req.encode('ascii') + body, MyRequest,
success=False)
self.assertEqual(channel.transport.value(),
b"HTTP/1.0 200 OK\r\n\r\ndone")
self.assertEqual(len(processed), 1)
self.assertEqual(processed[0].args, {b"uploadedfile": [b"abasdfg"]})
def test_chunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, the request body is
decoded accordingly before it is made available on the request.
"""
httpRequest = b'''\
GET / HTTP/1.0
Content-Type: text/plain
Transfer-Encoding: chunked
6
Hello,
14
spam,eggs spam spam
0
'''
path = []
method = []
content = []
decoder = []
testcase = self
class MyRequest(http.Request):
def process(self):
content.append(self.content)
content.append(self.content.read())
# Don't let it close the original content object. We want to
# inspect it later.
self.content = BytesIO()
method.append(self.method)
path.append(self.path)
decoder.append(self.channel._transferDecoder)
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
# We took responsibility for closing this when we replaced the request
# attribute, above.
self.addCleanup(content[0].close)
assertIsFilesystemTemporary(self, content[0])
self.assertEqual(content[1], b'Hello, spam,eggs spam spam')
self.assertEqual(method, [b'GET'])
self.assertEqual(path, [b'/'])
self.assertEqual(decoder, [None])
def test_malformedChunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, but provides an
invalid chunk length value, the request fails with a 400 error.
"""
# See test_chunkedEncoding for the correct form of this request.
httpRequest = b'''\
GET / HTTP/1.1
Content-Type: text/plain
Transfer-Encoding: chunked
MALFORMED_LINE_THIS_SHOULD_BE_'6'
Hello,
14
spam,eggs spam spam
0
'''
didRequest = []
class MyRequest(http.Request):
def process(self):
# This request should fail, so this should never be called.
didRequest.append(True)
channel = self.runRequest(httpRequest, MyRequest, success=False)
self.assertFalse(didRequest, "Request.process called")
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
def test_basicAuthException(self):
"""
A L{Request} that throws an exception processing basic authorization
logs an error and uses an empty username and password.
"""
logObserver = EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
requests = []
class Request(http.Request):
def process(self):
self.credentials = (self.getUser(), self.getPassword())
requests.append(self)
u = b"foo"
p = b"bar"
s = base64.b64encode(b":".join((u, p)))
f = b"GET / HTTP/1.0\nAuthorization: Basic " + s + b"\n\n"
self.patch(base64, 'b64decode', lambda x: [])
self.runRequest(f, Request, 0)
req = requests.pop()
self.assertEqual((b'', b''), req.credentials)
self.assertEquals(1, len(logObserver))
event = logObserver[0]
f = event["log_failure"]
self.assertIsInstance(f.value, AttributeError)
self.flushLoggedErrors(AttributeError)
def test_duplicateContentLengths(self):
"""
A request which includes multiple C{content-length} headers
fails with a 400 response without calling L{Request.process}.
"""
self.assertRequestRejected([
b'GET /a HTTP/1.1',
b'Content-Length: 56',
b'Content-Length: 0',
b'Host: host.invalid',
b'',
b'',
])
def test_duplicateContentLengthsWithPipelinedRequests(self):
"""
Two pipelined requests, the first of which includes multiple
C{content-length} headers, trigger a 400 response without
calling L{Request.process}.
"""
self.assertRequestRejected([
b'GET /a HTTP/1.1',
b'Content-Length: 56',
b'Content-Length: 0',
b'Host: host.invalid',
b'',
b'',
b'GET /a HTTP/1.1',
b'Host: host.invalid',
b'',
b'',
])
def test_contentLengthAndTransferEncoding(self):
"""
A request that includes both C{content-length} and
C{transfer-encoding} headers fails with a 400 response without
calling L{Request.process}.
"""
self.assertRequestRejected([
b'GET /a HTTP/1.1',
b'Transfer-Encoding: chunked',
b'Content-Length: 0',
b'Host: host.invalid',
b'',
b'',
])
def test_contentLengthAndTransferEncodingWithPipelinedRequests(self):
"""
Two pipelined requests, the first of which includes both
C{content-length} and C{transfer-encoding} headers, triggers a
400 response without calling L{Request.process}.
"""
self.assertRequestRejected([
b'GET /a HTTP/1.1',
b'Transfer-Encoding: chunked',
b'Content-Length: 0',
b'Host: host.invalid',
b'',
b'',
b'GET /a HTTP/1.1',
b'Host: host.invalid',
b'',
b'',
])
def test_unknownTransferEncoding(self):
"""
A request whose C{transfer-encoding} header includes a value
other than C{chunked} or C{identity} fails with a 400 response
without calling L{Request.process}.
"""
self.assertRequestRejected([
b'GET /a HTTP/1.1',
b'Transfer-Encoding: unknown',
b'Host: host.invalid',
b'',
b'',
])
def test_transferEncodingIdentity(self):
"""
A request with a valid C{content-length} and a
C{transfer-encoding} whose value is C{identity} succeeds.
"""
body = []
class SuccessfulRequest(http.Request):
processed = False
def process(self):
body.append(self.content.read())
self.setHeader(b'content-length', b'0')
self.finish()
request = b'''\
GET / HTTP/1.1
Host: host.invalid
Content-Length: 2
Transfer-Encoding: identity
ok
'''
channel = self.runRequest(request, SuccessfulRequest, False)
self.assertEqual(body, [b'ok'])
self.assertEqual(
channel.transport.value(),
b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n',
)
class QueryArgumentsTests(unittest.TestCase):
def testParseqs(self):
self.assertEqual(
parse_qs(b"a=b&d=c;+=f"),
http.parse_qs(b"a=b&d=c;+=f"))
self.assertRaises(
ValueError, http.parse_qs, b"blah", strict_parsing=True)
self.assertEqual(
parse_qs(b"a=&b=c", keep_blank_values=1),
http.parse_qs(b"a=&b=c", keep_blank_values=1))
self.assertEqual(
parse_qs(b"a=&b=c"),
http.parse_qs(b"a=&b=c"))
def test_urlparse(self):
"""
For a given URL, L{http.urlparse} should behave the same as L{urlparse},
except it should always return C{bytes}, never text.
"""
def urls():
for scheme in (b'http', b'https'):
for host in (b'example.com',):
for port in (None, 100):
for path in (b'', b'path'):
if port is not None:
host = host + b':' + networkString(str(port))
yield urlunsplit((scheme, host, path, b'', b''))
def assertSameParsing(url, decode):
"""
Verify that C{url} is parsed into the same objects by both
L{http.urlparse} and L{urlparse}.
"""
urlToStandardImplementation = url
if decode:
urlToStandardImplementation = url.decode('ascii')
# stdlib urlparse will give back whatever type we give it. To be
# able to compare the values meaningfully, if it gives back unicode,
# convert all the values to bytes.
standardResult = urlparse(urlToStandardImplementation)
if isinstance(standardResult.scheme, unicode):
# The choice of encoding is basically irrelevant. The values
# are all in ASCII. UTF-8 is, of course, the correct choice.
expected = (standardResult.scheme.encode('utf-8'),
standardResult.netloc.encode('utf-8'),
standardResult.path.encode('utf-8'),
standardResult.params.encode('utf-8'),
standardResult.query.encode('utf-8'),
standardResult.fragment.encode('utf-8'))
else:
expected = (standardResult.scheme,
standardResult.netloc,
standardResult.path,
standardResult.params,
standardResult.query,
standardResult.fragment)
scheme, netloc, path, params, query, fragment = http.urlparse(url)
self.assertEqual(
(scheme, netloc, path, params, query, fragment), expected)
self.assertIsInstance(scheme, bytes)
self.assertIsInstance(netloc, bytes)
self.assertIsInstance(path, bytes)
self.assertIsInstance(params, bytes)
self.assertIsInstance(query, bytes)
self.assertIsInstance(fragment, bytes)
# With caching, unicode then str
clear_cache()
for url in urls():
assertSameParsing(url, True)
assertSameParsing(url, False)
# With caching, str then unicode
clear_cache()
for url in urls():
assertSameParsing(url, False)
assertSameParsing(url, True)
# Without caching
for url in urls():
clear_cache()
assertSameParsing(url, True)
clear_cache()
assertSameParsing(url, False)
def test_urlparseRejectsUnicode(self):
"""
L{http.urlparse} should reject unicode input early.
"""
self.assertRaises(TypeError, http.urlparse, u'http://example.org/path')
class ClientDriver(http.HTTPClient):
def handleStatus(self, version, status, message):
self.version = version
self.status = status
self.message = message
class ClientStatusParsingTests(unittest.TestCase):
def testBaseline(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201 foo')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'foo')
def testNoMessage(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'')
def testNoMessage_trailingSpace(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201 ')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'')
class RequestTests(unittest.TestCase, ResponseTestMixin):
"""
Tests for L{http.Request}
"""
def _compatHeadersTest(self, oldName, newName):
"""
Verify that each of two different attributes which are associated with
the same state properly reflect changes made through the other.
This is used to test that the C{headers}/C{responseHeaders} and
C{received_headers}/C{requestHeaders} pairs interact properly.
"""
req = http.Request(DummyChannel(), False)
getattr(req, newName).setRawHeaders(b"test", [b"lemur"])
self.assertEqual(getattr(req, oldName)[b"test"], b"lemur")
setattr(req, oldName, {b"foo": b"bar"})
self.assertEqual(
list(getattr(req, newName).getAllRawHeaders()),
[(b"Foo", [b"bar"])])
setattr(req, newName, http_headers.Headers())
self.assertEqual(getattr(req, oldName), {})
def test_getHeader(self):
"""
L{http.Request.getHeader} returns the value of the named request
header.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur"])
self.assertEqual(req.getHeader(b"test"), b"lemur")
def test_getRequestHostname(self):
"""
L{http.Request.getRequestHostname} returns the hostname portion of the
request, based on the C{Host:} header.
"""
req = http.Request(DummyChannel(), False)
def check(header, expectedHost):
req.requestHeaders.setRawHeaders(b"host", [header])
self.assertEqual(req.getRequestHostname(), expectedHost)
check(b"example.com", b"example.com")
check(b"example.com:8443", b"example.com")
check(b"192.168.1.1", b"192.168.1.1")
check(b"192.168.1.1:19289", b"192.168.1.1")
check(b"[2607:f0d0:1002:51::4]",
b"2607:f0d0:1002:51::4")
check(b"[2607:f0d0:1002:0051:0000:0000:0000:0004]",
b"2607:f0d0:1002:0051:0000:0000:0000:0004")
check(b"[::1]", b"::1")
check(b"[::1]:8080", b"::1")
check(b"[2607:f0d0:1002:51::4]:9443", b"2607:f0d0:1002:51::4")
def test_getHeaderReceivedMultiples(self):
"""
When there are multiple values for a single request header,
L{http.Request.getHeader} returns the last value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur", b"panda"])
self.assertEqual(req.getHeader(b"test"), b"panda")
def test_getHeaderNotFound(self):
"""
L{http.Request.getHeader} returns L{None} when asked for the value of a
request header which is not present.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.getHeader(b"test"), None)
def test_getAllHeaders(self):
"""
L{http.Request.getAllheaders} returns a C{dict} mapping all request
header names to their corresponding values.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur"])
self.assertEqual(req.getAllHeaders(), {b"test": b"lemur"})
def test_getAllHeadersNoHeaders(self):
"""
L{http.Request.getAllHeaders} returns an empty C{dict} if there are no
request headers.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.getAllHeaders(), {})
def test_getAllHeadersMultipleHeaders(self):
"""
When there are multiple values for a single request header,
L{http.Request.getAllHeaders} returns only the last value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur", b"panda"])
self.assertEqual(req.getAllHeaders(), {b"test": b"panda"})
def test_setResponseCode(self):
"""
L{http.Request.setResponseCode} takes a status code and causes it to be
used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.setResponseCode(201)
req.write(b'')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
b"(no clientproto yet) 201 Created")
def test_setResponseCodeAndMessage(self):
"""
L{http.Request.setResponseCode} takes a status code and a message and
causes them to be used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.setResponseCode(202, b"happily accepted")
req.write(b'')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
b'(no clientproto yet) 202 happily accepted')
def test_setResponseCodeAndMessageNotBytes(self):
"""
L{http.Request.setResponseCode} accepts C{bytes} for the message
parameter and raises L{TypeError} if passed anything else.
"""
channel = DummyChannel()
req = http.Request(channel, False)
self.assertRaises(TypeError, req.setResponseCode,
202, u"not happily accepted")
def test_setResponseCodeAcceptsIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{int} for the code parameter
and raises L{TypeError} if passed anything else.
"""
req = http.Request(DummyChannel(), False)
req.setResponseCode(1)
self.assertRaises(TypeError, req.setResponseCode, "1")
def test_setResponseCodeAcceptsLongIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{long} for the code
parameter.
"""
req = http.Request(DummyChannel(), False)
req.setResponseCode(long(1))
def test_setLastModifiedNeverSet(self):
"""
When no previous value was set and no 'if-modified-since' value was
requested, L{http.Request.setLastModified} takes a timestamp in seconds
since the epoch and sets the request's lastModified attribute.
"""
req = http.Request(DummyChannel(), False)
req.setLastModified(42)
self.assertEqual(req.lastModified, 42)
def test_setLastModifiedUpdate(self):
"""
If the supplied timestamp is later than the lastModified attribute's
value, L{http.Request.setLastModified} updates the lastModifed
attribute.
"""
req = http.Request(DummyChannel(), False)
req.setLastModified(0)
req.setLastModified(1)
self.assertEqual(req.lastModified, 1)
def test_setLastModifiedIgnore(self):
"""
If the supplied timestamp occurs earlier than the current lastModified
attribute, L{http.Request.setLastModified} ignores it.
"""
req = http.Request(DummyChannel(), False)
req.setLastModified(1)
req.setLastModified(0)
self.assertEqual(req.lastModified, 1)
def test_setLastModifiedCached(self):
"""
If the resource is older than the if-modified-since date in the request
header, L{http.Request.setLastModified} returns L{http.CACHED}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'02 Jan 1970 00:00:00 GMT']
)
result = req.setLastModified(42)
self.assertEqual(result, http.CACHED)
def test_setLastModifiedNotCached(self):
"""
If the resource is newer than the if-modified-since date in the request
header, L{http.Request.setLastModified} returns None
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'01 Jan 1970 00:00:00 GMT']
)
result = req.setLastModified(1000000)
self.assertEqual(result, None)
def test_setLastModifiedTwiceNotCached(self):
"""
When L{http.Request.setLastModified} is called multiple times, the
highest supplied value is honored. If that value is higher than the
if-modified-since date in the request header, the method returns None.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'01 Jan 1970 00:00:01 GMT']
)
req.setLastModified(1000000)
result = req.setLastModified(0)
self.assertEqual(result, None)
def test_setLastModifiedTwiceCached(self):
"""
When L{http.Request.setLastModified} is called multiple times, the
highest supplied value is honored. If that value is lower than the
if-modified-since date in the request header, the method returns
L{http.CACHED}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'01 Jan 1999 00:00:01 GMT']
)
req.setLastModified(1)
result = req.setLastModified(0)
self.assertEqual(result, http.CACHED)
def test_setHost(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should not be added because it is the default.
"""
req = http.Request(DummyChannel(), False)
req.setHost(b"example.com", 80)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com"])
def test_setHostSSL(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should not be added because it is the default.
"""
d = DummyChannel()
d.transport = DummyChannel.SSL()
req = http.Request(d, False)
req.setHost(b"example.com", 443)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com"])
def test_setHostNonDefaultPort(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should be added because it is not the default.
"""
req = http.Request(DummyChannel(), False)
req.setHost(b"example.com", 81)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com:81"])
def test_setHostSSLNonDefaultPort(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should be added because it is not the default.
"""
d = DummyChannel()
d.transport = DummyChannel.SSL()
req = http.Request(d, False)
req.setHost(b"example.com", 81)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com:81"])
def test_setHeader(self):
"""
L{http.Request.setHeader} sets the value of the given response header.
"""
req = http.Request(DummyChannel(), False)
req.setHeader(b"test", b"lemur")
self.assertEqual(req.responseHeaders.getRawHeaders(b"test"), [b"lemur"])
def _checkCookie(self, expectedCookieValue, *args, **kwargs):
"""
Call L{http.Request.addCookie} with C{*args} and C{**kwargs}, and check
that the cookie value is equal to C{expectedCookieValue}.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.addCookie(*args, **kwargs)
self.assertEqual(req.cookies[0], expectedCookieValue)
# Write nothing to make it produce the headers
req.write(b"")
writtenLines = channel.transport.written.getvalue().split(b"\r\n")
# There should be one Set-Cookie header
addCookieLines = [x for x in writtenLines
if x.startswith(b"Set-Cookie")]
self.assertEqual(len(addCookieLines), 1)
self.assertEqual(addCookieLines[0],
b"Set-Cookie: " + expectedCookieValue)
def test_addCookieWithMinimumArgumentsUnicode(self):
"""
L{http.Request.addCookie} adds a new cookie to be sent with the
response, and can be called with just a key and a value. L{unicode}
arguments are encoded using UTF-8.
"""
expectedCookieValue = b"foo=bar"
self._checkCookie(expectedCookieValue, u"foo", u"bar")
def test_addCookieWithAllArgumentsUnicode(self):
"""
L{http.Request.addCookie} adds a new cookie to be sent with the
response. L{unicode} arguments are encoded using UTF-8.
"""
expectedCookieValue = (
b"foo=bar; Expires=Fri, 31 Dec 9999 23:59:59 GMT; "
b"Domain=.example.com; Path=/; Max-Age=31536000; "
b"Comment=test; Secure; HttpOnly")
self._checkCookie(expectedCookieValue,
u"foo", u"bar", expires=u"Fri, 31 Dec 9999 23:59:59 GMT",
domain=u".example.com", path=u"/", max_age=u"31536000",
comment=u"test", secure=True, httpOnly=True)
def test_addCookieWithMinimumArgumentsBytes(self):
"""
L{http.Request.addCookie} adds a new cookie to be sent with the
response, and can be called with just a key and a value. L{bytes}
arguments are not decoded.
"""
expectedCookieValue = b"foo=bar"
self._checkCookie(expectedCookieValue, b"foo", b"bar")
def test_addCookieWithAllArgumentsBytes(self):
"""
L{http.Request.addCookie} adds a new cookie to be sent with the
response. L{bytes} arguments are not decoded.
"""
expectedCookieValue = (
b"foo=bar; Expires=Fri, 31 Dec 9999 23:59:59 GMT; "
b"Domain=.example.com; Path=/; Max-Age=31536000; "
b"Comment=test; Secure; HttpOnly")
self._checkCookie(
expectedCookieValue,
b"foo", b"bar", expires=b"Fri, 31 Dec 9999 23:59:59 GMT",
domain=b".example.com", path=b"/", max_age=b"31536000",
comment=b"test", secure=True, httpOnly=True)
def test_addCookieSanitization(self):
"""
L{http.Request.addCookie} replaces linear whitespace and
semicolons with single spaces.
"""
def cookieValue(key, value):
return b'='.join([key, value])
arguments = [('expires', b'Expires'),
('domain', b'Domain'),
('path', b'Path'),
('max_age', b'Max-Age'),
('comment', b'Comment')]
inputsAndOutputs = list(
zip(textLinearWhitespaceComponents +
bytesLinearWhitespaceComponents,
cycle([sanitizedBytes])))
inputsAndOutputs = [
["Foo; bar", b"Foo bar"],
[b"Foo; bar", b"Foo bar"],
]
for inputValue, outputValue in inputsAndOutputs:
self._checkCookie(cookieValue(outputValue, outputValue),
inputValue, inputValue)
for argument, parameter in arguments:
expected = b"; ".join([
cookieValue(outputValue, outputValue),
cookieValue(parameter, outputValue),
])
self._checkCookie(expected, inputValue, inputValue,
**{argument: inputValue})
def test_addCookieSameSite(self):
"""
L{http.Request.setCookie} supports a C{sameSite} argument.
"""
self._checkCookie(
b"foo=bar; SameSite=lax", b"foo", b"bar", sameSite="lax")
self._checkCookie(
b"foo=bar; SameSite=lax", b"foo", b"bar", sameSite="Lax")
self._checkCookie(
b"foo=bar; SameSite=strict", b"foo", b"bar", sameSite="strict")
self.assertRaises(
ValueError,
self._checkCookie,
b"", b"foo", b"bar", sameSite="anything-else")
def test_firstWrite(self):
"""
For an HTTP 1.0 request, L{http.Request.write} sends an HTTP 1.0
Response-Line and whatever response headers are set.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: lemur",
b"Hello")])
def test_firstWriteHTTP11Chunked(self):
"""
For an HTTP 1.1 request, L{http.Request.write} sends an HTTP 1.1
Response-Line, whatever response headers are set, and uses chunked
encoding for the response body.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.1"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
req.write(b'World!')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.1 200 OK",
b"Test: lemur",
b"Transfer-Encoding: chunked",
b"5\r\nHello\r\n6\r\nWorld!\r\n")])
def test_firstWriteLastModified(self):
"""
For an HTTP 1.0 request for a resource with a known last modified time,
L{http.Request.write} sends an HTTP Response-Line, whatever response
headers are set, and a last-modified header with that time.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.lastModified = 0
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: lemur",
b"Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT",
b"Hello")]
)
def test_lastModifiedAlreadyWritten(self):
"""
If the last-modified header already exists in the L{http.Request}
response headers, the lastModified attribute is ignored and a message
is logged.
"""
logObserver = EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.lastModified = 1000000000
req.responseHeaders.setRawHeaders(
b"last-modified",
[b"Thu, 01 Jan 1970 00:00:00 GMT"]
)
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT",
b"Hello")])
self.assertEquals(1, len(logObserver))
event = logObserver[0]
self.assertEquals(
"Warning: last-modified specified both in"
" header list and lastModified attribute.",
event["log_format"]
)
def test_receivedCookiesDefault(self):
"""
L{http.Request.received_cookies} defaults to an empty L{dict}.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.received_cookies, {})
def test_parseCookies(self):
"""
L{http.Request.parseCookies} extracts cookies from C{requestHeaders}
and adds them to C{received_cookies}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'test="lemur"; test2="panda"'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_parseCookiesMultipleHeaders(self):
"""
L{http.Request.parseCookies} can extract cookies from multiple Cookie
headers.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'test="lemur"', b'test2="panda"'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_parseCookiesNoCookie(self):
"""
L{http.Request.parseCookies} can be called on a request without a
cookie header.
"""
req = http.Request(DummyChannel(), False)
req.parseCookies()
self.assertEqual(req.received_cookies, {})
def test_parseCookiesEmptyCookie(self):
"""
L{http.Request.parseCookies} can be called on a request with an
empty cookie header.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [])
req.parseCookies()
self.assertEqual(req.received_cookies, {})
def test_parseCookiesIgnoreValueless(self):
"""
L{http.Request.parseCookies} ignores cookies which don't have a
value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo; bar; baz;'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {})
def test_parseCookiesEmptyValue(self):
"""
L{http.Request.parseCookies} parses cookies with an empty value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo='])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b''})
def test_parseCookiesRetainRightSpace(self):
"""
L{http.Request.parseCookies} leaves trailing whitespace in the
cookie value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo=bar '])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b'bar '})
def test_parseCookiesStripLeftSpace(self):
"""
L{http.Request.parseCookies} strips leading whitespace in the
cookie key.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b' foo=bar'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b'bar'})
def test_parseCookiesContinueAfterMalformedCookie(self):
"""
L{http.Request.parseCookies} parses valid cookies set before or
after malformed cookies.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'12345; test="lemur"; 12345; test2="panda"; 12345'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_connectionLost(self):
"""
L{http.Request.connectionLost} closes L{Request.content} and drops the
reference to the L{HTTPChannel} to assist with garbage collection.
"""
req = http.Request(DummyChannel(), False)
# Cause Request.content to be created at all.
req.gotLength(10)
# Grab a reference to content in case the Request drops it later on.
content = req.content
# Put some bytes into it
req.handleContentChunk(b"hello")
# Then something goes wrong and content should get closed.
req.connectionLost(Failure(ConnectionLost("Finished")))
self.assertTrue(content.closed)
self.assertIdentical(req.channel, None)
def test_registerProducerTwiceFails(self):
"""
Calling L{Request.registerProducer} when a producer is already
registered raises ValueError.
"""
req = http.Request(DummyChannel(), False)
req.registerProducer(DummyProducer(), True)
self.assertRaises(
ValueError, req.registerProducer, DummyProducer(), True)
def test_registerProducerWhenNotQueuedRegistersPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is not queued registers the producer as a push producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, True)
self.assertEqual([(producer, True)], req.transport.producers)
def test_registerProducerWhenNotQueuedRegistersPullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is not queued registers the producer as a pull producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, False)
self.assertEqual([(producer, False)], req.transport.producers)
def test_connectionLostNotification(self):
"""
L{Request.connectionLost} triggers all finish notification Deferreds
and cleans up per-request state.
"""
d = DummyChannel()
request = http.Request(d, True)
finished = request.notifyFinish()
request.connectionLost(Failure(ConnectionLost("Connection done")))
self.assertIdentical(request.channel, None)
return self.assertFailure(finished, ConnectionLost)
def test_finishNotification(self):
"""
L{Request.finish} triggers all finish notification Deferreds.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.finish()
return finished
def test_writeAfterFinish(self):
"""
Calling L{Request.write} after L{Request.finish} has been called results
in a L{RuntimeError} being raised.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.write(b'foobar')
request.finish()
self.assertRaises(RuntimeError, request.write, b'foobar')
return finished
def test_finishAfterConnectionLost(self):
"""
Calling L{Request.finish} after L{Request.connectionLost} has been
called results in a L{RuntimeError} being raised.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.connectionLost(Failure(ConnectionLost("The end.")))
self.assertRaises(RuntimeError, req.finish)
def test_writeAfterConnectionLost(self):
"""
Calling L{Request.write} after L{Request.connectionLost} has been
called does not raise an exception. L{RuntimeError} will be raised
when finish is called on the request.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.connectionLost(Failure(ConnectionLost("The end.")))
req.write(b'foobar')
self.assertRaises(RuntimeError, req.finish)
def test_reprUninitialized(self):
"""
L{Request.__repr__} returns the class name, object address, and
dummy-place holder values when used on a L{Request} which has not yet
been initialized.
"""
request = http.Request(DummyChannel(), False)
self.assertEqual(
repr(request),
'<Request at 0x%x method=(no method yet) uri=(no uri yet) '
'clientproto=(no clientproto yet)>' % (id(request),))
def test_reprInitialized(self):
"""
L{Request.__repr__} returns, as a L{str}, the class name, object
address, and the method, uri, and client protocol of the HTTP request
it represents. The string is in the form::
<Request at ADDRESS method=METHOD uri=URI clientproto=PROTOCOL>
"""
request = http.Request(DummyChannel(), False)
request.clientproto = b'HTTP/1.0'
request.method = b'GET'
request.uri = b'/foo/bar'
self.assertEqual(
repr(request),
'<Request at 0x%x method=GET uri=/foo/bar '
'clientproto=HTTP/1.0>' % (id(request),))
def test_reprSubclass(self):
"""
Subclasses of L{Request} inherit a C{__repr__} implementation which
includes the subclass's name in place of the string C{"Request"}.
"""
class Otherwise(http.Request):
pass
request = Otherwise(DummyChannel(), False)
self.assertEqual(
repr(request),
'<Otherwise at 0x%x method=(no method yet) uri=(no uri yet) '
'clientproto=(no clientproto yet)>' % (id(request),))
def test_unregisterNonQueuedNonStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a non-queued non-streaming
producer from the request and the request's transport.
"""
req = http.Request(DummyChannel(), False)
req.transport = StringTransport()
req.registerProducer(DummyProducer(), False)
req.unregisterProducer()
self.assertEqual((None, None), (req.producer, req.transport.producer))
def test_unregisterNonQueuedStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a non-queued streaming
producer from the request and the request's transport.
"""
req = http.Request(DummyChannel(), False)
req.transport = StringTransport()
req.registerProducer(DummyProducer(), True)
req.unregisterProducer()
self.assertEqual((None, None), (req.producer, req.transport.producer))
def test_finishProducesLog(self):
"""
L{http.Request.finish} will call the channel's factory to produce a log
message.
"""
factory = http.HTTPFactory()
factory.timeOut = None
factory._logDateTime = "sometime"
factory._logDateTimeCall = True
factory.startFactory()
factory.logFile = BytesIO()
proto = factory.buildProtocol(None)
val = [
b"GET /path HTTP/1.1\r\n",
b"\r\n\r\n"
]
trans = StringTransport()
proto.makeConnection(trans)
for x in val:
proto.dataReceived(x)
proto._channel.requests[0].finish()
# A log message should be written out
self.assertIn(b'sometime "GET /path HTTP/1.1"',
factory.logFile.getvalue())
def test_requestBodyTimeoutFromFactory(self):
"""
L{HTTPChannel} timeouts whenever data from a request body is not
delivered to it in time, even when it gets built from a L{HTTPFactory}.
"""
clock = Clock()
factory = http.HTTPFactory(timeout=100, reactor=clock)
factory.startFactory()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol = parametrizeTimeoutMixin(protocol, clock)
# Confirm that the timeout is what we think it is.
self.assertEqual(protocol.timeOut, 100)
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(99)
self.assertFalse(transport.disconnecting)
clock.advance(2)
self.assertTrue(transport.disconnecting)
def test_finishCleansConnection(self):
"""
L{http.Request.finish} will notify the channel that it is finished, and
will put the transport back in the producing state so that the reactor
can close the connection.
"""
factory = http.HTTPFactory()
factory.timeOut = None
factory._logDateTime = "sometime"
factory._logDateTimeCall = True
factory.startFactory()
factory.logFile = BytesIO()
proto = factory.buildProtocol(None)
proto._channel._optimisticEagerReadSize = 0
val = [
b"GET /path HTTP/1.1\r\n",
b"\r\n\r\n"
]
trans = StringTransport()
proto.makeConnection(trans)
self.assertEqual(trans.producerState, 'producing')
for x in val:
proto.dataReceived(x)
proto.dataReceived(b'GET ') # just a few extra bytes to exhaust the
# optimistic buffer size
self.assertEqual(trans.producerState, 'paused')
proto._channel.requests[0].finish()
self.assertEqual(trans.producerState, 'producing')
def test_provides_IDeprecatedHTTPChannelToRequestInterface(self):
"""
L{http.Request} provides
L{http._IDeprecatedHTTPChannelToRequestInterface}, which
defines the interface used by L{http.HTTPChannel}.
"""
req = http.Request(DummyChannel(), False)
verifyObject(http._IDeprecatedHTTPChannelToRequestInterface, req)
def test_eq(self):
"""
A L{http.Request} is equal to itself.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req, req)
def test_ne(self):
"""
A L{http.Request} is not equal to another object.
"""
req = http.Request(DummyChannel(), False)
self.assertNotEqual(req, http.Request(DummyChannel(), False))
def test_hashable(self):
"""
A L{http.Request} is hashable.
"""
req = http.Request(DummyChannel(), False)
hash(req)
def test_eqWithNonRequest(self):
"""
A L{http.Request} on the left hand side of an equality
comparison to an instance that is not a L{http.Request} hands
the comparison off to that object's C{__eq__} implementation.
"""
eqCalls = []
class _NotARequest:
def __eq__(self, other: object) -> bool:
eqCalls.append(other)
return True
req = http.Request(DummyChannel(), False)
self.assertEqual(req, _NotARequest())
self.assertEqual(eqCalls, [req])
def test_neWithNonRequest(self):
"""
A L{http.Request} on the left hand side of an inequality
comparison to an instance that is not a L{http.Request} hands
the comparison off to that object's C{__ne__} implementation.
"""
eqCalls = []
class _NotARequest:
def __ne__(self, other: object) -> bool:
eqCalls.append(other)
return True
req = http.Request(DummyChannel(), False)
self.assertNotEqual(req, _NotARequest())
self.assertEqual(eqCalls, [req])
def test_finishProducerStillRegistered(self):
"""
A RuntimeError is logged if a producer is still registered
when an L{http.Request} is finished.
"""
logObserver = EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
request = http.Request(DummyChannel(), False)
request.registerProducer(DummyProducer(), True)
request.finish()
self.assertEquals(1, len(logObserver))
event = logObserver[0]
f = event["log_failure"]
self.assertIsInstance(f.value, RuntimeError)
self.flushLoggedErrors(RuntimeError)
def test_getClientIPWithIPv4(self):
"""
L{http.Request.getClientIP} returns the host part of the
client's address when connected over IPv4.
"""
request = http.Request(
DummyChannel(peer=address.IPv6Address("TCP", "127.0.0.1", 12344)))
self.assertEqual(request.getClientIP(), "127.0.0.1")
def test_getClientIPWithIPv6(self):
"""
L{http.Request.getClientIP} returns the host part of the
client's address when connected over IPv6.
"""
request = http.Request(
DummyChannel(peer=address.IPv6Address("TCP", "::1", 12344)))
self.assertEqual(request.getClientIP(), "::1")
def test_getClientIPWithNonTCPPeer(self):
"""
L{http.Request.getClientIP} returns L{None} for the client's
IP address when connected over a non-TCP transport.
"""
request = http.Request(
DummyChannel(peer=address.UNIXAddress("/path/to/socket")))
self.assertEqual(request.getClientIP(), None)
def test_getClientAddress(self):
"""
L{http.Request.getClientAddress} returns the client's address
as an L{IAddress} provider.
"""
client = address.UNIXAddress("/path/to/socket")
request = http.Request(DummyChannel(peer=client))
self.assertIs(request.getClientAddress(), client)
class MultilineHeadersTests(unittest.TestCase):
"""
Tests to exercise handling of multiline headers by L{HTTPClient}. RFCs 1945
(HTTP 1.0) and 2616 (HTTP 1.1) state that HTTP message header fields can
span multiple lines if each extra line is preceded by at least one space or
horizontal tab.
"""
def setUp(self):
"""
Initialize variables used to verify that the header-processing functions
are getting called.
"""
self.handleHeaderCalled = False
self.handleEndHeadersCalled = False
# Dictionary of sample complete HTTP header key/value pairs, including
# multiline headers.
expectedHeaders = {b'Content-Length': b'10',
b'X-Multiline' : b'line-0\tline-1',
b'X-Multiline2' : b'line-2 line-3'}
def ourHandleHeader(self, key, val):
"""
Dummy implementation of L{HTTPClient.handleHeader}.
"""
self.handleHeaderCalled = True
self.assertEqual(val, self.expectedHeaders[key])
def ourHandleEndHeaders(self):
"""
Dummy implementation of L{HTTPClient.handleEndHeaders}.
"""
self.handleEndHeadersCalled = True
def test_extractHeader(self):
"""
A header isn't processed by L{HTTPClient.extractHeader} until it is
confirmed in L{HTTPClient.lineReceived} that the header has been
received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
c.lineReceived(b'Content-Length: 10')
self.assertIdentical(c.length, None)
self.assertFalse(self.handleHeaderCalled)
self.assertFalse(self.handleEndHeadersCalled)
# Signal end of headers.
c.lineReceived(b'')
self.assertTrue(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.length, 10)
def test_noHeaders(self):
"""
An HTTP request with no headers will not cause any calls to
L{handleHeader} but will cause L{handleEndHeaders} to be called on
L{HTTPClient} subclasses.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
# Signal end of headers.
c.lineReceived(b'')
self.assertFalse(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
def test_multilineHeaders(self):
"""
L{HTTPClient} parses multiline headers by buffering header lines until
an empty line or a line that does not start with whitespace hits
lineReceived, confirming that the header has been received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
c.lineReceived(b'X-Multiline: line-0')
self.assertFalse(self.handleHeaderCalled)
# Start continuing line with a tab.
c.lineReceived(b'\tline-1')
c.lineReceived(b'X-Multiline2: line-2')
# The previous header must be complete, so now it can be processed.
self.assertTrue(self.handleHeaderCalled)
# Start continuing line with a space.
c.lineReceived(b' line-3')
c.lineReceived(b'Content-Length: 10')
# Signal end of headers.
c.lineReceived(b'')
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.length, 10)
class Expect100ContinueServerTests(unittest.TestCase, ResponseTestMixin):
"""
Test that the HTTP server handles 'Expect: 100-continue' header correctly.
The tests in this class all assume a simplistic behavior where user code
cannot choose to deny a request. Once ticket #288 is implemented and user
code can run before the body of a POST is processed this should be
extended to support overriding this behavior.
"""
def test_HTTP10(self):
"""
HTTP/1.0 requests do not get 100-continue returned, even if 'Expect:
100-continue' is included (RFC 2616 10.1.1).
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandlerProxy
channel.makeConnection(transport)
channel.dataReceived(b"GET / HTTP/1.0\r\n")
channel.dataReceived(b"Host: www.example.com\r\n")
channel.dataReceived(b"Content-Length: 3\r\n")
channel.dataReceived(b"Expect: 100-continue\r\n")
channel.dataReceived(b"\r\n")
self.assertEqual(transport.value(), b"")
channel.dataReceived(b"abc")
self.assertResponseEquals(
transport.value(),
[(b"HTTP/1.0 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.0",
b"Request: /",
b"'''\n3\nabc'''\n")])
def test_expect100ContinueHeader(self):
"""
If a HTTP/1.1 client sends a 'Expect: 100-continue' header, the server
responds with a 100 response code before handling the request body, if
any. The normal resource rendering code will then be called, which
will send an additional response code.
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandlerProxy
channel.makeConnection(transport)
channel.dataReceived(b"GET / HTTP/1.1\r\n")
channel.dataReceived(b"Host: www.example.com\r\n")
channel.dataReceived(b"Expect: 100-continue\r\n")
channel.dataReceived(b"Content-Length: 3\r\n")
# The 100 continue response is not sent until all headers are
# received:
self.assertEqual(transport.value(), b"")
channel.dataReceived(b"\r\n")
# The 100 continue response is sent *before* the body is even
# received:
self.assertEqual(transport.value(), b"HTTP/1.1 100 Continue\r\n\r\n")
channel.dataReceived(b"abc")
response = transport.value()
self.assertTrue(
response.startswith(b"HTTP/1.1 100 Continue\r\n\r\n"))
response = response[len(b"HTTP/1.1 100 Continue\r\n\r\n"):]
self.assertResponseEquals(
response,
[(b"HTTP/1.1 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.1",
b"Request: /",
b"'''\n3\nabc'''\n")])
def sub(keys, d):
"""
Create a new dict containing only a subset of the items of an existing
dict.
@param keys: An iterable of the keys which will be added (with values from
C{d}) to the result.
@param d: The existing L{dict} from which to copy items.
@return: The new L{dict} with keys given by C{keys} and values given by the
corresponding values in C{d}.
@rtype: L{dict}
"""
return dict([(k, d[k]) for k in keys])
class DeprecatedRequestAttributesTests(unittest.TestCase):
"""
Tests for deprecated attributes of L{twisted.web.http.Request}.
"""
def test_getClientIP(self):
"""
L{Request.getClientIP} is deprecated in favor of
L{Request.getClientAddress}.
"""
request = http.Request(
DummyChannel(peer=address.IPv6Address("TCP", "127.0.0.1", 12345)))
request.gotLength(0)
request.requestReceived(b"GET", b"/", b"HTTP/1.1")
request.getClientIP()
warnings = self.flushWarnings(
offendingFunctions=[self.test_getClientIP])
self.assertEqual(1, len(warnings))
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.getClientIP was deprecated "
"in Twisted 18.4.0; please use getClientAddress instead")},
sub(["category", "message"], warnings[0]))
def test_noLongerQueued(self):
"""
L{Request.noLongerQueued} is deprecated, as we no longer process
requests simultaneously.
"""
channel = DummyChannel()
request = http.Request(channel)
request.noLongerQueued()
warnings = self.flushWarnings(
offendingFunctions=[self.test_noLongerQueued])
self.assertEqual(1, len(warnings))
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.noLongerQueued was deprecated "
"in Twisted 16.3.0")},
sub(["category", "message"], warnings[0]))
class ChannelProductionTests(unittest.TestCase):
"""
Tests for the way HTTPChannel manages backpressure.
"""
request = (
b'GET / HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'\r\n'
)
def buildChannelAndTransport(self, transport, requestFactory):
"""
Setup a L{HTTPChannel} and a transport and associate them.
@param transport: A transport to back the L{HTTPChannel}
@param requestFactory: An object that can construct L{Request} objects.
@return: A tuple of the channel and the transport.
"""
transport = transport
channel = http.HTTPChannel()
channel.requestFactory = _makeRequestProxyFactory(requestFactory)
channel.makeConnection(transport)
return channel, transport
def test_HTTPChannelIsAProducer(self):
"""
L{HTTPChannel} registers itself as a producer with its transport when a
connection is made.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyHTTPHandler
)
self.assertEqual(transport.producer, channel)
self.assertTrue(transport.streaming)
def test_HTTPChannelUnregistersSelfWhenCallingLoseConnection(self):
"""
L{HTTPChannel} unregisters itself when it has loseConnection called.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyHTTPHandler
)
channel.loseConnection()
self.assertIs(transport.producer, None)
self.assertIs(transport.streaming, None)
def test_HTTPChannelRejectsMultipleProducers(self):
"""
If two producers are registered on a L{HTTPChannel} without the first
being unregistered, a L{RuntimeError} is thrown.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyHTTPHandler
)
channel.registerProducer(DummyProducer(), True)
self.assertRaises(
RuntimeError, channel.registerProducer, DummyProducer(), True
)
def test_HTTPChannelCanUnregisterWithNoProducer(self):
"""
If there is no producer, the L{HTTPChannel} can still have
C{unregisterProducer} called.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyHTTPHandler
)
channel.unregisterProducer()
self.assertIs(channel._requestProducer, None)
def test_HTTPChannelStopWithNoRequestOutstanding(self):
"""
If there is no request producer currently registered, C{stopProducing}
does nothing.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyHTTPHandler
)
channel.unregisterProducer()
self.assertIs(channel._requestProducer, None)
def test_HTTPChannelStopRequestProducer(self):
"""
If there is a request producer registered with L{HTTPChannel}, calling
C{stopProducing} causes that producer to be stopped as well.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DelayedHTTPHandler
)
# Feed a request in to spawn a Request object, then grab it.
channel.dataReceived(self.request)
request = channel.requests[0].original
# Register a dummy producer.
producer = DummyProducer()
request.registerProducer(producer, True)
# The dummy producer is currently unpaused.
self.assertEqual(producer.events, [])
# The transport now stops production. This stops the request producer.
channel.stopProducing()
self.assertEqual(producer.events, ['stop'])
def test_HTTPChannelPropagatesProducingFromTransportToTransport(self):
"""
When L{HTTPChannel} has C{pauseProducing} called on it by the transport
it will call C{pauseProducing} on the transport. When unpaused, the
L{HTTPChannel} will call C{resumeProducing} on its transport.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyHTTPHandler
)
# The transport starts in producing state.
self.assertEqual(transport.producerState, 'producing')
# Pause producing. The transport should now be paused as well.
channel.pauseProducing()
self.assertEqual(transport.producerState, 'paused')
# Resume producing. The transport should be unpaused.
channel.resumeProducing()
self.assertEqual(transport.producerState, 'producing')
def test_HTTPChannelPropagatesPausedProductionToRequest(self):
"""
If a L{Request} object has registered itself as a producer with a
L{HTTPChannel} object, and the L{HTTPChannel} object is paused, both
the transport and L{Request} objects get paused.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DelayedHTTPHandler
)
channel._optimisticEagerReadSize = 0
# Feed a request in to spawn a Request object, then grab it.
channel.dataReceived(self.request)
# A little extra data to pause the transport.
channel.dataReceived(b'123')
request = channel.requests[0].original
# Register a dummy producer.
producer = DummyProducer()
request.registerProducer(producer, True)
# Note that the transport is paused while it waits for a response.
# The dummy producer, however, is unpaused.
self.assertEqual(transport.producerState, 'paused')
self.assertEqual(producer.events, [])
# The transport now pauses production. This causes the producer to be
# paused. The transport stays paused.
channel.pauseProducing()
self.assertEqual(transport.producerState, 'paused')
self.assertEqual(producer.events, ['pause'])
# The transport has become unblocked and resumes production. This
# unblocks the dummy producer, but leaves the transport blocked.
channel.resumeProducing()
self.assertEqual(transport.producerState, 'paused')
self.assertEqual(producer.events, ['pause', 'resume'])
# Unregister the producer and then complete the response. Because the
# channel is not paused, the transport now gets unpaused.
request.unregisterProducer()
request.delayedProcess()
self.assertEqual(transport.producerState, 'producing')
def test_HTTPChannelStaysPausedWhenRequestCompletes(self):
"""
If a L{Request} object completes its response while the transport is
paused, the L{HTTPChannel} does not resume the transport.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DelayedHTTPHandler
)
channel._optimisticEagerReadSize = 0
# Feed a request in to spawn a Request object, then grab it.
channel.dataReceived(self.request)
channel.dataReceived(b'extra') # exceed buffer size to pause the
# transport.
request = channel.requests[0].original
# Register a dummy producer.
producer = DummyProducer()
request.registerProducer(producer, True)
# Note that the transport is paused while it waits for a response.
# The dummy producer, however, is unpaused.
self.assertEqual(transport.producerState, 'paused')
self.assertEqual(producer.events, [])
# The transport now pauses production. This causes the producer to be
# paused. The transport stays paused.
channel.pauseProducing()
self.assertEqual(transport.producerState, 'paused')
self.assertEqual(producer.events, ['pause'])
# Unregister the producer and then complete the response. Because the
# channel is still paused, the transport stays paused
request.unregisterProducer()
request.delayedProcess()
self.assertEqual(transport.producerState, 'paused')
# At this point the channel is resumed, and so is the transport.
channel.resumeProducing()
self.assertEqual(transport.producerState, 'producing')
def test_HTTPChannelToleratesDataWhenTransportPaused(self):
"""
If the L{HTTPChannel} has paused the transport, it still tolerates
receiving data, and does not attempt to pause the transport again.
"""
class NoDoublePauseTransport(StringTransport):
"""
A version of L{StringTransport} that fails tests if it is paused
while already paused.
"""
def pauseProducing(self):
if self.producerState == 'paused':
raise RuntimeError("Transport was paused twice!")
StringTransport.pauseProducing(self)
# Confirm that pausing a NoDoublePauseTransport twice fails.
transport = NoDoublePauseTransport()
transport.pauseProducing()
self.assertRaises(RuntimeError, transport.pauseProducing)
channel, transport = self.buildChannelAndTransport(
NoDoublePauseTransport(), DummyHTTPHandler
)
# The transport starts in producing state.
self.assertEqual(transport.producerState, 'producing')
# Pause producing. The transport should now be paused as well.
channel.pauseProducing()
self.assertEqual(transport.producerState, 'paused')
# Write in a request, even though the transport is paused.
channel.dataReceived(self.request)
# The transport is still paused, but we have tried to write the
# response out.
self.assertEqual(transport.producerState, 'paused')
self.assertTrue(transport.value().startswith(b'HTTP/1.1 200 OK\r\n'))
# Resume producing. The transport should be unpaused.
channel.resumeProducing()
self.assertEqual(transport.producerState, 'producing')
def test_HTTPChannelToleratesPullProducers(self):
"""
If the L{HTTPChannel} has a L{IPullProducer} registered with it it can
adapt that producer into an L{IPushProducer}.
"""
channel, transport = self.buildChannelAndTransport(
StringTransport(), DummyPullProducerHandler
)
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyPullProducerHandlerProxy
channel.makeConnection(transport)
channel.dataReceived(self.request)
request = channel.requests[0].original
responseComplete = request._actualProducer.result
def validate(ign):
responseBody = transport.value().split(b'\r\n\r\n', 1)[1]
expectedResponseBody = (
b'1\r\n0\r\n'
b'1\r\n1\r\n'
b'1\r\n2\r\n'
b'1\r\n3\r\n'
b'1\r\n4\r\n'
b'1\r\n5\r\n'
b'1\r\n6\r\n'
b'1\r\n7\r\n'
b'1\r\n8\r\n'
b'1\r\n9\r\n'
)
self.assertEqual(responseBody, expectedResponseBody)
return responseComplete.addCallback(validate)
def test_HTTPChannelUnregistersSelfWhenTimingOut(self):
"""
L{HTTPChannel} unregisters itself when it times out a connection.
"""
clock = Clock()
transport = StringTransport()
channel = http.HTTPChannel()
# Patch the channel's callLater method.
channel.timeOut = 100
channel.callLater = clock.callLater
channel.makeConnection(transport)
# Tick the clock forward almost to the timeout.
clock.advance(99)
self.assertIs(transport.producer, channel)
self.assertIs(transport.streaming, True)
# Fire the timeout.
clock.advance(1)
self.assertIs(transport.producer, None)
self.assertIs(transport.streaming, None)
class HTTPChannelSanitizationTests(unittest.SynchronousTestCase):
"""
Test that L{HTTPChannel} sanitizes its output.
"""
def test_writeHeadersSanitizesLinearWhitespace(self):
"""
L{HTTPChannel.writeHeaders} removes linear whitespace from the
list of header names and values it receives.
"""
for component in bytesLinearWhitespaceComponents:
transport = StringTransport()
channel = http.HTTPChannel()
channel.makeConnection(transport)
channel.writeHeaders(
version=b"HTTP/1.1",
code=b"200",
reason=b"OK",
headers=[(component, component)])
sanitizedHeaderLine = b": ".join([
sanitizedBytes, sanitizedBytes,
]) + b'\r\n'
self.assertEqual(
transport.value(),
b"\r\n".join([
b"HTTP/1.1 200 OK",
sanitizedHeaderLine,
b'',
]))
class HTTPClientSanitizationTests(unittest.SynchronousTestCase):
"""
Test that L{http.HTTPClient} sanitizes its output.
"""
def test_sendHeaderSanitizesLinearWhitespace(self):
"""
L{HTTPClient.sendHeader} replaces linear whitespace in its
header keys and values with a single space.
"""
for component in bytesLinearWhitespaceComponents:
transport = StringTransport()
client = http.HTTPClient()
client.makeConnection(transport)
client.sendHeader(component, component)
self.assertEqual(
transport.value().splitlines(),
[b": ".join([sanitizedBytes, sanitizedBytes])]
)
|
the-stack_106_14604
|
"""A set of Python Classes for connecting to and interacting with a VOSpace
service.
Connections to VOSpace are made using a SSL X509 certificat which is
stored in a .pem file.
"""
#from contextlib import nested
import copy
import errno
import fnmatch
import hashlib
import requests
from requests.exceptions import HTTPError
import html2text
import logging
import mimetypes
import os
import re
import stat
import string
import sys
import time
#import urllib
#import urlparse
from xml.etree import ElementTree
from copy import deepcopy
from NodeCache import NodeCache
from __version__ import version
import netrc
try:
_unicode = unicode
except NameError:
try:
_unicode = str
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class Unicode(object):
pass
_unicode = unicode
try:
from contextlib import nested # Python 2
except ImportError:
from contextlib import ExitStack, contextmanager
@contextmanager
def nested(*contexts):
"""
Reimplementation of nested in python 3.
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx)
yield contexts
try:
import ConfigParser # Python 2
from urllib import splittag, splitquery, urlencode
from urlparse import parse_qs, urlparse
from cStringIO import StringIO
import httplib as http_client
except ImportError:
import configparser as ConfigParser # Python 3
from urllib.parse import splittag, splitquery, urlencode
from urllib.parse import parse_qs, urlparse
from io import StringIO
import http.client as http_client
http_client.HTTPConnection.debuglevel = 0 #1
logger = logging.getLogger('vos')
logger.setLevel(logging.ERROR)
if sys.version_info[1] > 6:
logger.addHandler(logging.NullHandler())
BUFSIZE = 8388608 # Size of read/write buffer
MAX_RETRY_DELAY = 128 # maximum delay between retries
DEFAULT_RETRY_DELAY = 30 # start delay between retries when Try_After not sent by server.
MAX_RETRY_TIME = 900 # maximum time for retries before giving up...
CONNECTION_TIMEOUT = 30 # seconds before HTTP connection should drop, should be less than DAEMON timeout in vofs
VOSPACE_ARCHIVE = os.getenv("VOSPACE_ARCHIVE", "vospace")
#HEADER_DELEG_TOKEN = 'X-CADC-DelegationToken'
HEADER_DELEG_TOKEN = 'X-DL-AuthToken'
HEADER_CONTENT_LENGTH = 'X-CADC-Content-Length'
HEADER_PARTIAL_READ = 'X-CADC-Partial-Read'
CONNECTION_COUNTER = 0
CADC_GMS_PREFIX = ''
requests.packages.urllib3.disable_warnings()
logging.getLogger("requests").setLevel(logging.WARNING)
def convert_vospace_time_to_seconds(str_date):
"""A convenience method that takes a string from a vospace time field and converts it to seconds since epoch.
:param str_date: string to parse into a VOSpace time
:type str_date: str
:return: A datetime object for the provided string date
:rtype: datetime
"""
right = str_date.rfind(":") + 3
mtime = time.mktime(time.strptime(str_date[0:right], '%Y-%m-%dT%H:%M:%S'))
return mtime - time.mktime(time.gmtime()) + time.mktime(time.localtime())
def compute_md5(filename, block_size=BUFSIZE):
"""
Given a file compute the MD5 of that file.
:param filename: name of file to open and compute MD5 for.
:type filename: str
:param block_size: size of read blocks to stream through MD5 calculator.
:type block_size: int
:return: md5 as hex
:rtype: hex
"""
md5 = hashlib.md5()
with open(filename, 'r') as r:
while True:
buf = r.read(block_size)
if len(buf) == 0:
break
md5.update(buf)
return md5.hexdigest()
class URLParser(object):
""" Parse out the structure of a URL.
There is a difference between the 2.5 and 2.7 version of the
urlparse.urlparse command, so here I roll my own...
"""
def __init__(self, url):
self.scheme = None
self.netloc = None
self.args = None
self.path = None
m = re.match("(^(?P<scheme>[a-zA-Z]*):)?(//(?P<netloc>(?P<server>[^!~]*)[!~](?P<service>[^/]*)))?"
"(?P<path>/?[^?]*)?(?P<args>\?.*)?", url)
self.scheme = m.group('scheme')
self.netloc = m.group('netloc')
self.server = m.group('server')
self.service = m.group('service')
self.path = (m.group('path') is not None and m.group('path')) or ''
self.args = (m.group('args') is not None and m.group('args')) or ''
def __str__(self):
return "[scheme: %s, netloc: %s, path: %s]" % (self.scheme,
self.netloc, self.path)
class Connection(object):
"""Class to hold and act on the X509 certificate"""
def __init__(self, vospace_certfile=None, vospace_token=None, http_debug=False):
"""Setup the Certificate for later usage
vospace_certfile -- where to store the certificate, if None then
${HOME}/.ssl or a temporary filename
vospace_token -- token string (alternative to vospace_certfile)
http_debug -- set True to generate debug statements
The user must supply a valid certificate or connection will be 'anonymous'.
"""
self.http_debug = http_debug
# tokens trump certs. We should only ever have token or certfile
# set in order to avoid confusion.
self.vospace_certfile = None
self.vospace_token = vospace_token
if self.vospace_token is None:
# allow anonymous access if no certfile specified
if vospace_certfile is not None and not os.access(vospace_certfile, os.F_OK):
logger.warning(
"Could not access certificate at {0}. Reverting to anonymous.".format(vospace_certfile))
vospace_certfile = None
self.vospace_certfile = vospace_certfile
# create a requests session object that all requests will be made via.
session = requests.Session()
if self.vospace_certfile is not None:
session.cert = (self.vospace_certfile, self.vospace_certfile)
if self.vospace_certfile is None: # MJG look at this in operation
try:
auth = netrc.netrc().authenticators(EndPoints.VOSPACE_WEBSERVICE)
if auth is not None:
session.auth = (auth[0], auth[2])
except:
pass
if self.vospace_token is not None:
session.headers.update({HEADER_DELEG_TOKEN: self.vospace_token})
user_agent = 'vos ' + version
if "vofs" in sys.argv[0]:
user_agent = 'vofs ' + version
session.headers.update({"User-Agent": user_agent})
assert isinstance(session, requests.Session)
self.session = session
def get_connection(self, url=None):
"""Create an HTTPSConnection object and return. Uses the client
certificate if None given.
:param url: a VOSpace uri
"""
if url is not None:
raise OSError(errno.ENOSYS, "Connections are no longer set per URL.")
return self.session
class Node(object):
"""A VOSpace node"""
IVOAURL = "ivo://ivoa.net/vospace/core"
VOSNS = "http://www.ivoa.net/xml/VOSpace/v2.0"
XSINS = "http://www.w3.org/2001/XMLSchema-instance"
TYPE = '{%s}type' % XSINS
NODES = '{%s}nodes' % VOSNS
NODE = '{%s}node' % VOSNS
PROTOCOL = '{%s}protocol' % VOSNS
PROPERTIES = '{%s}properties' % VOSNS
PROPERTY = '{%s}property' % VOSNS
ACCEPTS = '{%s}accepts' % VOSNS
PROVIDES = '{%s}provides' % VOSNS
ENDPOINT = '{%s}endpoint' % VOSNS
TARGET = '{%s}target' % VOSNS
DATA_NODE = "vos:DataNode"
LINK_NODE = "vos:LinkNode"
CONTAINER_NODE = "vos:ContainerNode"
def __init__(self, node, node_type=None, properties=None, subnodes=None):
"""Create a Node object based on the DOM passed to the init method
if node is a string then create a node named node of nodeType with
properties
"""
self.uri = None
self.name = None
self.target = None
self.groupread = None
self.groupwrite = None
self.is_public = None
self.type = None
self.props = {}
self.attr = {}
self.xattr = {}
self._node_list = None
self._endpoints = None
if not subnodes:
subnodes = []
if not properties:
properties = {}
if node_type is None:
node_type = Node.DATA_NODE
if type(node) == unicode or type(node) == str:
node = self.create(node, node_type, properties, subnodes=subnodes)
if node is None:
raise LookupError("no node found or created?")
self.node = node
self.node.set('xmlns:vos', self.VOSNS)
self.update()
def __eq__(self, node):
if not isinstance(node, Node):
return False
return self.props == node.props
@property
def endpoints(self):
if not self._endpoints:
self._endpoints = EndPoints(self.uri)
return self._endpoints
def update(self):
"""Update the convience links of this node as we update the xml file"""
self.type = self.node.get(Node.TYPE)
if self.type is None:
# logger.debug("Node type unknown, no node created")
return None
if self.type == "vos:LinkNode":
self.target = self.node.findtext(Node.TARGET)
self.uri = self.node.get('uri')
self.name = os.path.basename(self.uri)
for propertiesNode in self.node.findall(Node.PROPERTIES):
self.set_props(propertiesNode)
self.is_public = False
if self.props.get('ispublic', 'false') == 'true':
self.is_public = True
logger.debug("{0} {1} -> {2}".format(self.uri, self.endpoints.islocked, self.props))
self.groupwrite = self.props.get('groupwrite', '')
self.groupread = self.props.get('groupread', '')
logger.debug("Setting file attributes via setattr")
self.setattr()
logger.debug("Setting file x-attributes via setxattr")
self.setxattr()
def set_property(self, key, value):
"""Create a key/value pair Node.PROPERTY element.
:param key: the property key
:param value: the property value
"""
properties = self.node.find(Node.PROPERTIES)
uri = "%s#%s" % (Node.IVOAURL, key)
ElementTree.SubElement(properties, Node.PROPERTY,
attrib={'uri': uri, 'readOnly': 'false'}).text = value
def __str__(self):
"""Convert the Node to a string representation of the Node"""
class Dummy(object):
pass
data = []
file_handle = Dummy()
file_handle.write = data.append
ElementTree.ElementTree(self.node).write(file_handle) # MJG , encoding="UTF-8")
return "".join(data)
def setattr(self, attr=None):
"""return / augment a dictionary of attributes associated with the Node
These attributes are determined from the node on VOSpace.
:param attr: the dictionary that holds the attributes
"""
if not attr:
attr = {}
# Get the flags for file mode settings.
self.attr = {}
# Only one date provided by VOSpace, so use this as all possible dates.
access_time = time.time()
if not self.props.get('date', None):
modified_time = access_time
else:
# mktime is expecting a localtime but we're sending a UT date, so
# some correction will be needed
modified_time = convert_vospace_time_to_seconds(self.props.get('date'))
self.attr['st_ctime'] = attr.get('st_ctime', modified_time)
self.attr['st_mtime'] = attr.get('st_mtime', modified_time)
self.attr['st_atime'] = access_time
# set the MODE by or'ing together all flags from stat
st_mode = 0
st_nlink = 1
if self.type == 'vos:ContainerNode':
st_mode |= stat.S_IFDIR
st_nlink = max(2, len(self.get_info_list()) + 2)
# if getInfoList length is < 0 we have a problem elsewhere, so above hack solves that problem.
elif self.type == 'vos:LinkNode':
st_mode |= stat.S_IFLNK
else:
st_mode |= stat.S_IFREG
self.attr['st_nlink'] = st_nlink
# Set the OWNER permissions: all vospace Nodes have read/write/execute by owner
st_mode |= stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
# Set the GROUP permissions
# MJG if self.props.get('groupwrite', "NONE") != "NONE":
if 'groupwrite' in self.props and self.props.get('groupwrite') is not None:
st_mode |= stat.S_IWGRP
# if self.props.get('groupread', "NONE") != "NONE":
if 'groupread' in self.props and self.props.get('groupread') is not None:
st_mode |= stat.S_IRGRP
st_mode |= stat.S_IXGRP
# Set the OTHER permissions
if self.props.get('ispublic', 'false') == 'true':
# If you can read the file then you can execute too.
# Public does NOT mean writeable. EVER
st_mode |= stat.S_IROTH | stat.S_IXOTH
self.attr['st_mode'] = attr.get('st_mode', st_mode)
# We set the owner and group bits to be those of the currently running process.
# This is a hack since we don't have an easy way to figure these out.
# TODO Come up with a better approach to uid setting
self.attr['st_uid'] = attr.get('st_uid', os.getuid())
self.attr['st_gid'] = attr.get('st_uid', os.getgid())
st_size = int(self.props.get('length', 0))
self.attr['st_size'] = st_size > 0 and st_size or 0
self.attr['st_blocks'] = self.attr['st_size'] / 512
def setxattr(self, attrs=None):
"""Initialize the extended attributes using the Node properties that are not part of the core set.
:param attrs: An input list of attributes being sent from an external source, not supported.
"""
if attrs is not None:
raise OSError(errno.ENOSYS, "No externally set extended Attributes for vofs yet.")
for key in self.props:
if key in Client.vosProperties:
continue
self.xattr[key] = self.props[key]
return
def chwgrp(self, group):
"""Set the groupwrite value to group for this node
:param group: the uri of he group to give write access to.
:type group: str
"""
logger.debug("Setting groups to: {0}".format(group))
if group is not None and len(group.split()) > 3:
raise AttributeError("Exceeded max of 4 write groups: {0}<-".format(group.split()))
self.groupwrite = group
return self.change_prop('groupwrite', group)
def chrgrp(self, group):
"""Set the groupread value to group for this node
:param group: the uri of the group to give read access to.
:type group: str
"""
if group is not None and len(group.split()) > 3:
raise AttributeError("Exceeded max of 4 read groups: {0}<-".format(group))
self.groupread = group
return self.change_prop('groupread', group)
def set_public(self, value):
"""
:param value: should the is_public flag be set? (true/false)
:type value: str
"""
return self.change_prop('ispublic', value)
@staticmethod
def fix_prop(prop):
"""Check if prop is a well formed uri and if not then make into one
:param prop: the property to expand into a IVOA uri value for a property.
:rtype str
"""
(url, tag) = urllib.splittag(prop)
if tag is None and url in ['title',
'creator',
'subject',
'description',
'publisher',
'contributer',
'date',
'type',
'format',
'identifier',
'source',
'language',
'relation',
'coverage',
'rights',
'availableSpace',
'groupread',
'groupwrite',
'publicread',
'quota',
'length',
'MD5',
'mtime',
'ctime',
'ispublic']:
tag = url
url = Node.IVOAURL
prop = url + "#" + tag
parts = URLParser(url)
if parts.path is None or tag is None:
raise ValueError("Invalid VOSpace property uri: {0}".format(prop))
return prop
@staticmethod
def set_prop():
"""Build the XML for a given node"""
raise NotImplementedError('No set prop.')
def change_prop(self, key, value):
"""Change the node property 'key' to 'value'.
:param key: The property key to update
:type key: str
:param value: The value to give that property.
:type value: str,None
:return True/False depending on if the property value was updated.
"""
# TODO split into 'set' and 'delete'
uri = self.fix_prop(key)
changed = False
found = False
properties = self.node.findall(Node.PROPERTIES)
for props in properties:
for prop in props.findall(Node.PROPERTY):
if uri != prop.attrib.get('uri', None):
continue
found = True
if prop.attrib.get('text', None) == value:
break
changed = True
if value is None:
# this is actually a delete property
prop.attrib['xsi:nil'] = 'true'
prop.attrib["xmlns:xsi"] = Node.XSINS
prop.text = ""
self.props[self.get_prop_name(uri)] = None
else:
prop.text = value
if found:
return changed
# must not have had this kind of property already, so set value
property_node = ElementTree.SubElement(properties[0], Node.PROPERTY)
property_node.attrib['readOnly'] = "false"
property_node.attrib['uri'] = uri
property_node.text = value
self.props[self.get_prop_name(uri)] = value
return changed
def chmod(self, mode):
"""Set the MODE of this Node...
translates unix style MODE to voSpace and updates the properties...
This function is quite limited. We can make a file publicly
readable and we can turn on/off group read/write permissions,
that's all.
:param mode: a stat MODE bit
"""
changed = 0
if mode & stat.S_IROTH:
changed += self.set_public('true')
else:
changed += self.set_public('false')
if mode & stat.S_IRGRP:
changed += self.chrgrp(self.groupread)
else:
changed += self.chrgrp('')
if mode & stat.S_IWGRP:
changed += self.chwgrp(self.groupwrite)
else:
changed += self.chwgrp('')
# logger.debug("%d -> %s" % (changed, changed>0))
return changed > 0
def create(self, uri, node_type="vos:DataNode", properties=None, subnodes=None):
"""Build the XML needed to represent a VOSpace node returns an ElementTree representation of the XML
:param uri: The URI for this node.
:type uri: str
:param node_type: the type of VOSpace node, likely one of vos:DataNode, vos:ContainerNode, vos:LinkNode
:type node_type: str
:param properties: a dictionary of the node properties, keys should be single words from the IVOA list
:type properties: dict
:param subnodes: Any children to attach to this node, only valid for vos:ContainerNode
:type subnodes: [Node]
"""
if not subnodes:
subnodes = []
elif node_type != 'vos:ContainerNode':
raise ValueError("Only Container Nodes can have subnodes")
if not properties:
properties = {}
endpoints = EndPoints(uri)
# Build the root node called 'node'
node = ElementTree.Element("node")
node.attrib["xmlns"] = Node.VOSNS
node.attrib["xmlns:vos"] = Node.VOSNS
node.attrib[Node.TYPE] = node_type
node.attrib["uri"] = uri
# create a properties section
if 'type' not in properties:
properties['type'] = mimetypes.guess_type(uri)[0]
properties_node = ElementTree.SubElement(node, Node.PROPERTIES)
for prop in properties.keys():
property_node = ElementTree.SubElement(properties_node, Node.PROPERTY)
property_node.attrib['readOnly'] = "false"
property_node.attrib["uri"] = self.fix_prop(prop)
if properties[prop] is None:
# Setting the property value to None indicates that this is actually a delete
property_node.attrib['xsi:nil'] = 'true'
property_node.attrib["xmlns:xsi"] = Node.XSINS
property_node.text = ""
elif len(str(properties[prop])) > 0:
property_node.text = properties[prop]
# That's it for link nodes...
if node_type == "vos:LinkNode":
return node
# create accepts
accepts = ElementTree.SubElement(node, Node.ACCEPTS)
ElementTree.SubElement(accepts, "view").attrib['uri'] = \
"%s#%s" % (Node.IVOAURL, "defaultview")
provides = ElementTree.SubElement(node, Node.PROVIDES)
ElementTree.SubElement(provides, "view").attrib['uri'] = \
"%s#%s" % (Node.IVOAURL, 'defaultview')
ElementTree.SubElement(provides, "view").attrib['uri'] = \
"%s#%s" % (endpoints.core, 'rssview')
# Only DataNode can have a dataview...
if node_type == "vos:DataNode":
ElementTree.SubElement(provides, "view").attrib['uri'] = \
"%s#%s" % (endpoints.core, 'dataview')
# if this is a container node then add directory contents
if node_type == "vos:ContainerNode":
node_list = ElementTree.SubElement(node, Node.NODES)
for sub_node in subnodes:
node_list.append(sub_node.node)
return node
def isdir(self):
"""Check if target is a container Node"""
# logger.debug(self.type)
if self.type == "vos:ContainerNode":
return True
return False
def islink(self):
"""Check if target is a link Node"""
# logger.debug(self.type)
if self.type == "vos:LinkNode":
return True
return False
@property
def is_locked(self):
return self.islocked()
@is_locked.setter
def is_locked(self, lock):
if lock == self.is_locked:
return
self.change_prop(self.endpoints.islocked, lock and "true" or "false")
def islocked(self):
"""Check if target state is locked for update/delete."""
return self.props.get(self.endpoints.islocked, "false") == "true"
def get_info(self):
"""Organize some information about a node and return as dictionary"""
date = convert_vospace_time_to_seconds(self.props['date'])
creator = string.lower(re.search('CN=([^,]*)',
self.props.get('creator', 'CN=unknown_000,'))
.groups()[0].replace(' ', '_'))
perm = []
for i in range(10):
perm.append('-')
perm[1] = 'r'
perm[2] = 'w'
if self.type == "vos:ContainerNode":
perm[0] = 'd'
if self.type == "vos:LinkNode":
perm[0] = 'l'
if self.props.get('ispublic', "false") == "true":
perm[-3] = 'r'
perm[-2] = '-'
# write_group = self.props.get('groupwrite', 'NONE') # MJG
write_group = self.props.get('groupwrite', '') # MJG
if write_group != '':
perm[5] = 'w'
# read_group = self.props.get('groupread', 'NONE')
read_group = self.props.get('groupread', '')
if read_group != '':
perm[4] = 'r'
is_locked = self.props.get(self.endpoints.islocked, "false")
return {"permissions": string.join(perm, ''),
"creator": creator,
"readGroup": read_group,
"writeGroup": write_group,
"isLocked": is_locked,
"size": float(self.props.get('length', 0)),
"date": date,
"target": self.target}
@property
def node_list(self):
"""Get a list of all the nodes held to by a ContainerNode return a
list of Node objects"""
if self._node_list is None:
self._node_list = []
for nodesNode in self.node.findall(Node.NODES):
for nodeNode in nodesNode.findall(Node.NODE):
self.add_child(nodeNode)
return self._node_list
def add_child(self, child_element_tree):
"""
Add a child node to a node list.
:param child_element_tree: a node to add as a child.
:type child_element_tree: ElementTree
:return: Node
"""
child_node = Node(child_element_tree)
self.node_list.append(child_node)
return child_node
def clear_properties(self):
logger.debug("clearing properties")
properties_node_list = self.node.findall(Node.PROPERTIES)
for properties_node in properties_node_list:
for property_node in properties_node.findall(Node.PROPERTY):
key = self.get_prop_name(property_node.get('uri'))
if key in self.props:
del self.props[key]
properties_node.remove(property_node)
logger.debug("done clearing properties")
return
def get_info_list(self):
"""
:rtype [(Node, dict)]
:return a list of tuples containing the (NodeName, Info) about the node and its childern
"""
info = {}
for node in self.node_list:
info[node.name] = node.get_info()
if self.type == "vos:DataNode":
info[self.name] = self.get_info()
return info.items()
def set_props(self, props):
"""Set the SubElement Node PROPERTY values of the given xmlx ELEMENT provided using the Nodes props dictionary.
:param props: the xmlx element to set the Node PROPERTY of.
"""
for property_node in props.findall(Node.PROPERTY):
self.props[self.get_prop_name(property_node.get('uri'))] = self.get_prop_value(property_node)
return
@staticmethod
def get_prop_name(prop):
"""parse the property uri and get the name of the property (strips off the url and just returns the tag)
if this is an IVOA property, otherwise sends back the entry uri.
:param prop: the uri of the property to get the name of.
"""
(url, prop_name) = urllib.splittag(prop)
if url == Node.IVOAURL:
return prop_name
return prop
@staticmethod
def get_prop_value(prop):
"""Pull out the value part of PROPERTY Element.
:param prop: an XML Element that represents a Node PROPERTY.
"""
return prop.text
class VOFile(object):
"""
A class for managing http connections
Attributes:
maxRetries - maximum number of retries when transient errors encountered.
When set too high (as the default value is) the number of
retries are time limited (max 15min)
maxRetryTime - maximum time to retry for when transient errors are
encountered
"""
errnos = {404: errno.ENOENT,
401: errno.EACCES,
409: errno.EEXIST,
423: errno.EPERM,
408: errno.EAGAIN}
# ## if we get one of these codes, retry the command... ;-(
retryCodes = (503, 408, 504, 412)
def __init__(self, url_list, connector, method, size=None,
follow_redirect=True, byte_range=None, possible_partial_read=False):
# MJG: Fix URLs for non-GET calls
if method != 'GET' and '?' in url_list:
url_list = url_list[: url_list.rindex('?')]
self.closed = True
assert isinstance(connector, Connection)
self.connector = connector
self.httpCon = None
self.timeout = -1
self.size = size
self.md5sum = None
self.totalFileSize = None
self.maxRetries = 10000
self.maxRetryTime = MAX_RETRY_TIME
self.url = None
self.method = None
# TODO
# Make all the calls to open send a list of URLs
# this should be redone during a cleanup. Basically, a GET might
# result in multiple URLs (list of URLs) but VOFile is also used to
# retrieve schema files and other info.
# All the calls should pass a list of URLs. Make sure that we
# make a deep copy of the input list so that we don't
# accidentally modify the caller's copy.
if isinstance(url_list, list):
self.URLs = deepcopy(url_list)
else:
self.URLs = [url_list]
self.urlIndex = 0
self.followRedirect = follow_redirect
self._fpos = 0
# initial values for retry parameters
self.currentRetryDelay = DEFAULT_RETRY_DELAY
self.totalRetryDelay = 0
self.retries = 0
self.fileSize = None
self.request = None
self.resp = None
self.trans_encode = None
# open the connection
self._fobj = None
self.open(self.URLs[self.urlIndex], method, byte_range=byte_range, possible_partial_read=possible_partial_read)
def tell(self):
return self._fpos
def seek(self, offset, loc=os.SEEK_SET):
if loc == os.SEEK_CUR:
self._fpos += offset
elif loc == os.SEEK_SET:
self._fpos = offset
elif loc == os.SEEK_END:
self._fpos = int(self.size) - offset
return
@staticmethod
def flush():
"""
Flush is a NO OP in VOFile: only really flush on close.
@return:
"""
return
def close(self):
"""close the connection."""
if not self.closed:
try:
if self.trans_encode is not None:
self.httpCon.send('0\r\n\r\n')
logger.debug("End of document sent.")
logger.debug("getting response.")
self.resp = self.connector.session.send(self.request)
logger.debug("checking response status.")
self.checkstatus()
finally:
self.closed = True
return self.closed
def checkstatus(self, codes=(200, 201, 202, 206, 302, 303, 503, 416,
416, 402, 408, 412, 504)):
"""check the response status. If the status code doesn't match a value from the codes list then
raise an Exception.
:param codes: a list of http status_codes that are NOT failures but require some additional action.
"""
msgs = {404: "Node Not Found",
401: "Not Authorized",
409: "Conflict",
423: "Locked",
408: "Connection Timeout"}
logger.debug("status %d for URL %s" % (self.resp.status_code, self.url))
if self.resp.status_code not in codes:
logger.debug("Got status code: %s for %s" %
(self.resp.status_code, self.url))
msg = self.resp.content
if msg is not None:
msg = html2text.html2text(msg, self.url).strip().replace('\n', ' ')
logger.debug("Error message: {0}".format(msg))
if self.resp.status_code in VOFile.errnos.keys() or (msg is not None and "Node is busy" in msg):
if msg is None or len(msg) == 0 and self.resp.status_code in msgs:
msg = msgs[self.resp.status_code]
if (self.resp.status_code == 401 and
self.connector.vospace_certfile is None and
self.connector.session.auth is None and self.connector.vospace_token is None):
msg += " using anonymous access "
exception = OSError(VOFile.errnos.get(self.resp.status_code, self.resp.status_code), msg)
if self.resp.status_code == 500 and "read-only" in msg:
exception = OSError(errno.EPERM, "VOSpace in read-only mode.")
raise exception
# Get the file size. We use this HEADER-CONTENT-LENGTH as a
# fallback to work around a server-side Java bug that limits
# 'Content-Length' to a signed 32-bit integer (~2 gig files)
try:
self.size = int(self.resp.headers.get("Content-Length", self.resp.headers.get(HEADER_CONTENT_LENGTH, 0)))
except ValueError:
self.size = 0
if self.resp.status_code == 200:
self.md5sum = self.resp.headers.get("Content-MD5", None)
self.totalFileSize = self.size
return True
def open(self, url, method="GET", byte_range=None, possible_partial_read=False):
"""Open a connection to the given URL
:param url: The URL to be openned
:type url: str
:param method: HTTP Method to use on open (PUT/GET/POST)
:type method: str
:param byte_range: The range of byte_range to read, This is in open so we can set the header parameter.
:type byte_range: str
:param possible_partial_read: Sometimes we kill during read, this tells the server that isn't an error.
:type possible_partial_read: bool
"""
logger.debug("Opening %s (%s)" % (url, method))
self.url = url
self.method = method
request = requests.Request(self.method, url)
self.trans_encode = None
# Try to send a content length hint if this is a PUT.
# otherwise send as a chunked PUT
if method in ["PUT"]:
try:
self.size = int(self.size)
request.headers.update({"Content-Length": self.size,
HEADER_CONTENT_LENGTH: self.size})
except TypeError:
self.size = None
self.trans_encode = "chunked"
elif method in ["POST", "DELETE"]:
self.size = None
self.trans_encode = "chunked"
if method in ["PUT", "POST", "DELETE"]:
content_type = "text/xml"
# Workaround for UWS library issues MJG
if 'sync' in url or 'transfer' in url:
content_type = 'application/x-www-form-urlencoded'
if method == "PUT":
ext = os.path.splitext(urllib.splitquery(url)[0])[1]
if ext in ['.fz', '.fits', 'fit']:
content_type = 'application/fits'
else:
content_type = mimetypes.guess_type(url)[0]
if content_type is None: content_type = "text/xml" # MJG
if content_type is not None:
request.headers.update({"Content-type": content_type})
if byte_range is not None and method == "GET":
request.headers.update({"Range": byte_range})
request.headers.update({"Accept": "*/*",
"Expect": "100-continue"})
# set header if a partial read is possible
if possible_partial_read and method == "GET":
request.headers.update({HEADER_PARTIAL_READ: "true"})
try:
self.request = self.connector.session.prepare_request(request)
except Exception as ex:
logger.error(str(ex))
def get_file_info(self):
"""Return information harvested from the HTTP header"""
return self.totalFileSize, self.md5sum
def read(self, size=None, return_response = False):
"""return size bytes from the connection response
:param size: number of bytes to read from the file.
"""
if self.resp is None:
try:
logger.debug("Initializing read by sending request: {0}".format(self.request))
self.resp = self.connector.session.send(self.request, stream=True)
self.checkstatus()
except Exception as ex:
logger.debug("Error on read: {0}".format(ex))
raise ex
if self.resp is None:
raise OSError(errno.EFAULT, "No response from VOServer")
read_error = None
if self.resp.status_code == 416:
return ""
# check the most likely response first
if self.resp.status_code == 200 or self.resp.status_code == 206:
if return_response:
return self.resp
else:
buff = self.resp.raw.read(size)
size = size is not None and size < len(buff) and size or len(buff)
# logger.debug("Sending back {0} bytes".format(size))
return buff[:size]
elif self.resp.status_code == 303 or self.resp.status_code == 302:
url = self.resp.headers.get('Location', None)
logger.debug("Got redirect URL: {0}".format(url))
self.url = url
if not url:
raise OSError(errno.ENOENT,
"Got 303 on {0} but no Location value in header? [{1}]".format(self.url,
self.resp.content),
self.url)
if self.followRedirect:
# We open this new URL without the byte range and partial read as we are following a service
# redirect and that service redirect is to the object that satisfies the original request.
# TODO seperate out making the transfer reqest and reading the response content.
self.open(url, "GET")
# logger.debug("Following redirected URL: %s" % (URL))
return self.read(size)
else:
# logger.debug("Got url:%s from redirect but not following" %
# (self.url))
return self.url
elif self.resp.status_code in VOFile.retryCodes:
# Note: 404 (File Not Found) might be returned when:
# 1. file deleted or replaced
# 2. file migrated from cache
# 3. hardware failure on storage node
# For 3. it is necessary to try the other URLs in the list
# otherwise this the failed URL might show up even after the
# caller tries to re-negotiate the transfer.
# For 1. and 2., calls to the other URLs in the list might or
# might not succeed.
if self.urlIndex < len(self.URLs) - 1:
# go to the next URL
self.urlIndex += 1
self.open(self.URLs[self.urlIndex], "GET")
return self.read(size)
else:
self.URLs.pop(self.urlIndex) # remove url from list
if len(self.URLs) == 0:
# no more URLs to try...
if read_error is not None:
raise read_error
if self.resp.status_code == 404:
raise OSError(errno.ENOENT, self.url)
else:
raise OSError(errno.EIO,
"unexpected server response %s (%d)" %
(self.resp.reason, self.resp.status_code), self.url)
if self.urlIndex < len(self.URLs):
self.open(self.URLs[self.urlIndex], "GET")
return self.read(size)
# start from top of URLs with a delay
self.urlIndex = 0
logger.error("Servers busy {0} for {1}".format(self.resp.status_code, self.URLs))
msg = self.resp.content
if msg is not None:
msg = html2text.html2text(msg, self.url).strip()
else:
msg = "No Message Sent"
logger.error("Message from VOSpace {0}: {1}".format(self.url, msg))
try:
# see if there is a Retry-After in the head...
ras = int(self.resp.headers.get("Retry-After", 5))
except ValueError:
ras = self.currentRetryDelay
if (self.currentRetryDelay * 2) < MAX_RETRY_DELAY:
self.currentRetryDelay *= 2
else:
self.currentRetryDelay = MAX_RETRY_DELAY
if ((self.retries < self.maxRetries) and
(self.totalRetryDelay < self.maxRetryTime)):
logger.error("Retrying in {0} seconds".format(ras))
self.totalRetryDelay += ras
self.retries += 1
time.sleep(int(ras))
self.open(self.URLs[self.urlIndex], "GET")
return self.read(size)
else:
raise OSError(self.resp.status_code,
"failed to connect to server after multiple attempts {0} {1}".format(self.resp.reason,
self.resp.status_code),
self.url)
@staticmethod
def write(buf):
"""write buffer to the connection
:param buf: string to write to the file.
"""
raise OSError(errno.ENOSYS, "Direct write to a VOSpaceFile is not supported, use copy instead.")
class EndPoints(object):
CADC_SERVER = 'www.canfar.phys.uvic.ca'
# NOAO_TEST_SERVER = "dldemo.datalab.noirlab.edu:8080/vospace-2.0"
NOAO_TEST_SERVER = "dldb1.datalab.noirlab.edu:8080/vospace-2.0"
LOCAL_TEST_SERVER = 'localhost:8080/vospace-2.0'
DEFAULT_VOSPACE_URI = 'datalab.noao.edu!vospace'
# DEFAULT_VOSPACE_URI = 'nvo.caltech!vospace'
VOSPACE_WEBSERVICE = os.getenv('VOSPACE_WEBSERVICE', None)
VOServers = {'cadc.nrc.ca!vospace': CADC_SERVER,
'cadc.nrc.ca~vospace': CADC_SERVER,
'datalab.noao.edu!vospace': NOAO_TEST_SERVER,
'datalab.noao.edu~vospace': NOAO_TEST_SERVER,
'nvo.caltech!vospace': LOCAL_TEST_SERVER,
'nvo.caltech~vospace': LOCAL_TEST_SERVER
}
VODataView = {'cadc.nrc.ca!vospace': 'ivo://cadc.nrc.ca/vospace',
'cadc.nrc.ca~vospace': 'ivo://cadc.nrc.ca/vospace',
'datalab.noao.edu!vospace': 'ivo://datalab.noao.edu/vospace',
'datalab.noao.edu~vospace': 'ivo://datalab.noao.edu/vospace',
'nvo.caltech!vospace': 'ivo://nvo.caltech/vospace',
'nvo.caltech~vospace': 'ivo://nvo.caltech/vospace'}
# VONodes = "vospace/nodes"
# VOProperties = {NOAO_TEST_SERVER: "/vospace",
# CADC_SERVER: "/vospace/nodeprops",
# LOCAL_TEST_SERVER: "/vospace"}
# VOTransfer = {NOAO_TEST_SERVER: '/vospace/sync',
# CADC_SERVER: '/vospace/synctrans',
# LOCAL_TEST_SERVER: '/vospace/sync'}
VONodes = "nodes"
VOProperties = {NOAO_TEST_SERVER: "",
CADC_SERVER: "nodeprops",
LOCAL_TEST_SERVER: ""}
VOTransfer = {NOAO_TEST_SERVER: 'sync',
CADC_SERVER: 'synctrans',
LOCAL_TEST_SERVER: 'sync'}
def __init__(self, uri, basic_auth=False):
"""
Based on the URI return the various sever endpoints that will be
associated with this uri.
:param uri:
"""
self.service = basic_auth and 'vospace/auth' or 'vospace'
self.uri_parts = URLParser(uri)
@property
def netloc(self):
return self.uri_parts.netloc
@property
def properties(self):
return "{0}/{1}/{2}".format(self.server, self.service, EndPoints.VOProperties.get(self.server))
@property
def uri(self):
return "ivo://{0}".format(self.netloc).replace("!", "/").replace("~", "/")
@property
def view(self):
return "{0}/view".format(self.uri)
@property
def cutout(self):
return "ivo://{0}/{1}#{2}".format(self.uri_parts.server, 'view', 'cutout')
@property
def core(self):
return "{0}/core".format(self.uri)
@property
def islocked(self):
return "{0}#islocked".format(self.core)
@property
def server(self):
"""
:return: The network location of the VOSpace server.
"""
return (EndPoints.VOSPACE_WEBSERVICE is not None and EndPoints.VOSPACE_WEBSERVICE or
EndPoints.VOServers.get(self.netloc, None))
@property
def transfer(self):
"""
The transfer service endpoint.
:return: service location of the transfer service.
:rtype: str
"""
if self.server in EndPoints.VOTransfer:
end_point = EndPoints.VOTransfer[self.server]
else:
end_point = "/vospace/auth/synctrans"
return "{0}/{1}/{2}".format(self.server, self.service, end_point)
@property
def nodes(self):
"""
:return: The Node service endpoint.
"""
return "{0}/{1}/{2}".format(self.server, self.service, EndPoints.VONodes)
class Client(object):
"""The Client object does the work"""
VO_HTTPGET_PROTOCOL = 'ivo://ivoa.net/vospace/core#httpget'
VO_HTTPPUT_PROTOCOL = 'ivo://ivoa.net/vospace/core#httpput'
VO_HTTPSGET_PROTOCOL = 'ivo://ivoa.net/vospace/core#httpsget'
VO_HTTPSPUT_PROTOCOL = 'ivo://ivoa.net/vospace/core#httpsput'
DWS = '/data/pub/'
# reserved vospace properties, not to be used for extended property setting
vosProperties = ["description", "type", "encoding", "MD5", "length",
"creator", "date", "groupread", "groupwrite", "ispublic"]
VOSPACE_CERTFILE = os.getenv("VOSPACE_CERTFILE", None)
if VOSPACE_CERTFILE is None:
for certfile in ['cadcproxy.pem', 'vospaceproxy.pem']:
certpath = os.path.join(os.getenv("HOME", "."), '.ssl')
certfilepath = os.path.join(certpath, certfile)
if os.access(certfilepath, os.R_OK):
VOSPACE_CERTFILE = certfilepath
break
def __init__(self, vospace_certfile=None, root_node=None, conn=None,
transfer_shortcut=False, http_debug=False,
secure_get=False, vospace_token=None):
"""This could/should be expanded to set various defaults
:param vospace_certfile: x509 proxy certificate file location. Overrides certfile in conn.
:type vospace_certfile: str
:param vospace_token: token string (alternative to vospace_certfile)
:type vospace_token: str
:param root_node: the base of the VOSpace for uri references.
:type root_node: str
:param conn: a connection pool object for this Client
:type conn: Session
:param transfer_shortcut: if True then just assumed data web service urls
:type transfer_shortcut: bool
:param http_debug: turn on http debugging.
:type http_debug: bool
:param secure_get: Use HTTPS: ie. transfer contents of files using SSL encryption.
:type secure_get: bool
"""
if not isinstance(conn, Connection):
vospace_certfile = vospace_certfile is None and Client.VOSPACE_CERTFILE or vospace_certfile
conn = Connection(vospace_certfile=vospace_certfile,
vospace_token=vospace_token,
http_debug=http_debug)
if conn.vospace_certfile:
logger.debug("Using certificate file: {0}".format(vospace_certfile))
if conn.vospace_token:
logger.debug("Using vospace token: " + conn.vospace_token)
vospace_certfile = conn.vospace_certfile
# Set the protocol
if vospace_certfile is None:
self.protocol = "http"
else:
self.protocol = "https"
self.conn = conn
self.rootNode = root_node
self.nodeCache = NodeCache()
self.transfer_shortcut = transfer_shortcut
self.secure_get = secure_get
return
def glob(self, pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, file names starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
:param pathname: path to glob.
"""
return list(self.iglob(pathname))
def iglob(self, pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames
starting with a dot are special cases that are not matched by '*' and '?' patterns.
:param pathname: path to run glob against.
:type pathname: str
"""
dirname, basename = os.path.split(pathname)
if not self.has_magic(pathname):
if basename:
self.get_node(pathname)
yield pathname
else:
# Patterns ending with a slash should match only directories
if self.iglob(dirname):
yield pathname
return
if not dirname:
for name in self.glob1(self.rootNode, basename):
yield name
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and self.has_magic(dirname):
dirs = self.iglob(dirname)
else:
dirs = [dirname]
if self.has_magic(basename):
glob_in_dir = self.glob1
else:
glob_in_dir = self.glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(self, dirname, pattern):
"""
:param dirname: name of the directory to look for matches in.
:type dirname: str
:param pattern: pattern to match directory contents names against
:type pattern: str
:return:
"""
if not dirname:
dirname = self.rootNode
if isinstance(pattern, _unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or sys.getdefaultencoding())
try:
names = self.listdir(dirname, force=True)
except os.error:
return []
if not pattern.startswith('.'):
names = filter(lambda x: not x.startswith('.'), names)
return fnmatch.filter(names, pattern)
def glob0(self, dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if self.isdir(dirname):
return [basename]
else:
if self.access(os.path.join(dirname, basename)):
return [basename]
else:
raise OSError(errno.EACCES, "Permission denied: {0}".format(os.path.join(dirname, basename)))
return []
magic_check = re.compile('[*?[]')
@classmethod
def has_magic(cls, s):
return cls.magic_check.search(s) is not None
# @logExceptions()
def copy(self, source, destination, send_md5=False):
"""copy from source to destination.
One of source or destination must be a vospace location and the other must be a local location.
:param source: The source file to send to VOSpace or the VOSpace node to retrieve
:type source: str
:param destination: The VOSpace location to put the file to or the local destination.
:type destination: str
:param send_md5: Should copy send back the md5 of the destination file or just the size?
:type send_md5: bool
"""
# TODO: handle vospace to vospace copies.
success = False
destination_size = None
destination_md5 = None
source_md5 = None
get_node_url_retried = False
if source[0:4] == "vos:":
check_md5 = False
match = re.search("([^\[\]]*)(\[.*\])$", source)
if match is not None:
view = 'cutout'
source = match.group(1)
cutout = match.group(2)
else:
view = 'data'
cutout = None
check_md5 = True
source_md5 = self.get_node(source).props.get('MD5', 'd41d8cd98f00b204e9800998ecf8427e')
get_urls = self.get_node_url(source, method='GET', cutout=cutout, view=view)
while not success:
# If there are no urls available, drop through to full negotiation if that wasn't already tried
if len(get_urls) == 0:
if self.transfer_shortcut and not get_node_url_retried:
get_urls = self.get_node_url(source, method='GET', cutout=cutout, view=view,
full_negotiation=True)
# remove the first one as we already tried that one.
get_urls.pop(0)
get_node_url_retried = True
else:
break
get_url = get_urls.pop(0)
try:
response = self.conn.session.get(get_url, timeout=(2, 5), stream=True)
source_md5 = response.headers.get('Content-MD5', source_md5)
response.raise_for_status()
with open(destination, 'w') as fout:
for chunk in response.iter_content(chunk_size=512 * 1024):
if chunk:
fout.write(chunk)
fout.flush()
destination_size = os.stat(destination).st_size
if check_md5:
destination_md5 = compute_md5(destination)
logger.debug("{0} {1}".format(source_md5, destination_md5))
assert destination_md5 == source_md5
success = True
except Exception as ex:
logging.debug("Failed to GET {0}".format(get_url))
logging.debug("Got error {0}".format(ex))
continue
else:
source_md5 = compute_md5(source)
put_urls = self.get_node_url(destination, 'PUT')
while not success:
if len(put_urls) == 0:
if self.transfer_shortcut and not get_node_url_retried:
put_urls = self.get_node_url(destination, method='PUT', full_negotiation=True)
# remove the first one as we already tried that one.
put_urls.pop(0)
get_node_url_retried = True
else:
break
put_url = put_urls.pop(0)
try:
with open(source, 'r') as fin:
self.conn.session.put(put_url, data=fin)
node = self.get_node(destination, limit=0, force=True)
destination_md5 = node.props.get('MD5', 'd41d8cd98f00b204e9800998ecf8427e')
assert destination_md5 == source_md5
except Exception as ex:
logging.debug("FAILED to PUT to {0}".format(put_url))
logging.debug("Got error: {0}".format(ex))
continue
success = True
break
if not success:
raise OSError(errno.EFAULT, "Failed copying {0} -> {1}".format(source, destination))
return send_md5 and destination_md5 or destination_size
def fix_uri(self, uri):
"""given a uri check if the authority part is there and if it isn't
then add the vospace authority
:param uri: The string that should be parsed into a proper URI, if possible.
"""
parts = URLParser(uri)
# TODO
# implement support for local files (parts.scheme=None
# and self.rootNode=None
if parts.scheme is None:
if self.rootNode is not None:
uri = self.rootNode + uri
else:
return uri
parts = URLParser(uri)
if parts.scheme != "vos":
# Just past this back, I don't know how to fix...
return uri
# Check that path name compiles with the standard
logger.debug("Got value of args: {0}".format(parts.args))
if parts.args is not None and parts.args != "":
uri = parse_qs(urlparse(parts.args).query).get('link', None)[0]
logger.debug("Got uri: {0}".format(uri))
if uri is not None:
return self.fix_uri(uri)
# Check for 'cutout' syntax values.
path = re.match("(?P<filename>[^\[]*)(?P<ext>(\[\d*:?\d*\])?"
"(\[\d*:?\d*,?\d*:?\d*\])?)", parts.path)
filename = os.path.basename(path.group('filename'))
if not re.match("^[_\-\(\)=\+!,;:@&\*\$\.\w~]*$", filename):
raise OSError(errno.EINVAL, "Illegal vospace container name",
filename)
path = path.group('filename')
# insert the default VOSpace server if none given
host = parts.netloc
if not host or host == '':
host = EndPoints.DEFAULT_VOSPACE_URI
path = os.path.normpath(path).strip('/')
uri = "{0}://{1}/{2}{3}".format(parts.scheme, host, path, parts.args)
logger.debug("Returning URI: {0}".format(uri))
return uri
def get_node(self, uri, limit=0, force=False):
"""connect to VOSpace and download the definition of VOSpace node
:param uri: -- a voSpace node in the format vos:/VOSpaceName/nodeName
:type uri: str
:param limit: -- load children nodes in batches of limit
:type limit: int, None
:param force: force getting the node from the service, rather than returning a cached version.
:return: The VOSpace Node
:rtype: Node
"""
logger.debug("Getting node {0}".format(uri))
uri = self.fix_uri(uri)
node = None
if not force and uri in self.nodeCache:
node = self.nodeCache[uri]
if node is None:
logger.debug("Getting node {0} from ws".format(uri))
with self.nodeCache.watch(uri) as watch:
# If this is vospace URI then we can request the node info
# using the uri directly, but if this a URL then the metadata
# comes from the HTTP header.
if uri.startswith('vos:'):
vo_fobj = self.open(uri, os.O_RDONLY, limit=limit)
vo_xml_string = vo_fobj.read()
xml_file = StringIO(vo_xml_string)
xml_file.seek(0)
dom = ElementTree.parse(xml_file)
node = Node(dom.getroot())
elif uri.startswith('http'):
header = self.open(None, url=uri, mode=os.O_RDONLY, head=True)
header.read()
logger.debug("Got http headers: {0}".format(header.resp.headers))
properties = {'type': header.resp.headers.get('Content-type', 'txt'),
'date': time.strftime(
'%Y-%m-%dT%H:%M:%S GMT',
time.strptime(header.resp.headers.get('Date', None),
'%a, %d %b %Y %H:%M:%S GMT')),
'groupwrite': None,
'groupread': None,
'ispublic': URLParser(uri).scheme == 'https' and 'true' or 'false',
'length': header.resp.headers.get('Content-Length', 0)}
node = Node(node=uri, node_type=Node.DATA_NODE, properties=properties)
logger.debug(str(node))
else:
raise OSError(2, "Bad URI {0}".format(uri))
watch.insert(node)
# IF THE CALLER KNOWS THEY DON'T NEED THE CHILDREN THEY
# CAN SET LIMIT=0 IN THE CALL Also, if the number of nodes
# on the firt call was less than 500, we likely got them
# all during the init
if limit != 0 and node.isdir() and len(node.node_list) > 500:
next_uri = None
while next_uri != node.node_list[-1].uri:
next_uri = node.node_list[-1].uri
xml_file = StringIO(self.open(uri, os.O_RDONLY, next_uri=next_uri, limit=limit).read())
xml_file.seek(0)
next_page = Node(ElementTree.parse(xml_file).getroot())
if len(next_page.node_list) > 0 and next_uri == next_page.node_list[0].uri:
next_page.node_list.pop(0)
node.node_list.extend(next_page.node_list)
for childNode in node.node_list:
with self.nodeCache.watch(childNode.uri) as childWatch:
childWatch.insert(childNode)
return node
def get_node_url(self, uri, method='GET', view=None, limit=0, next_uri=None, cutout=None, full_negotiation=None):
"""Split apart the node string into parts and return the correct URL for this node.
:param uri: The VOSpace uri to get an associated url for.
:type uri: str
:param method: What will this URL be used to do: 'GET' the node, 'PUT' or 'POST' to the node or 'DELETE' it
:type method: str
:param view: If this is a 'GET' which view of the node should the URL provide.
:type view: str
:param limit: If this is a container how many of the children should be returned? (None - Unlimited)
:type limit: int, None
:param next_uri: When getting a container we make repeated calls until all 'limit' children returned. next_uri
tells the service what was the last child uri retrieved in the previous call.
:type next_uri: str
:param cutout: The cutout pattern to apply to the file at the service end: applies to view='cutout' only.
:type cutout: str
:param full_negotiation: Should we use the transfer UWS or do a GET and follow the redirect.
:type full_negotiation: bool
"""
uri = self.fix_uri(uri)
if view in ['data', 'cutout'] and method == 'GET':
node = self.get_node(uri, limit=0)
if node.islink():
target = node.node.findtext(Node.TARGET)
logger.debug("%s is a link to %s" % (node.uri, target))
if target is None:
raise OSError(errno.ENOENT, "No target for link")
parts = URLParser(target)
if parts.scheme != "vos":
# This is not a link to another VOSpace node so lets just return the target as the url
url = target
if cutout is not None:
url = "{0}?cutout={1}".format(target, cutout)
logger.debug("Line 3.1.2")
logger.debug("Returning URL: {0}".format(url))
return [url]
logger.debug("Getting URLs for: {0}".format(target))
return self.get_node_url(target, method=method, view=view, limit=limit, next_uri=next_uri,
cutout=cutout,
full_negotiation=full_negotiation)
logger.debug("Getting URL for: " + str(uri))
parts = URLParser(uri)
if parts.scheme.startswith('http'):
return [uri]
endpoints = EndPoints(uri, basic_auth=self.conn.session.auth is not None)
# see if we have a VOSpace server that goes with this URI in our look up list
if endpoints.server is None:
# Since we don't know how to get URLs for this server we should just return the uri.
return uri
# full_negotiation is an override, so it can be used to force either shortcut (false) or full negotiation (true)
if full_negotiation is not None:
do_shortcut = not full_negotiation
else:
do_shortcut = self.transfer_shortcut
do_shortcut = False # MJG
if not do_shortcut and method == 'GET' and view in ['data', 'cutout']:
return self._get(uri, view=view, cutout=cutout)
if not do_shortcut and method == 'PUT':
return self._put(uri)
if (view == "cutout" and cutout is None) or (cutout is not None and view != "cutout"):
raise ValueError("For cutout, must specify a view=cutout and for view=cutout must specify cutout")
if method == 'GET' and view not in ['data', 'cutout']:
# This is a request for the URL of the Node, which returns an XML document that describes the node.
fields = {}
# MJG: No limit keyword on URLs
# if limit is not None:
# fields['limit'] = limit
if view is not None:
fields['view'] = view
if next_uri is not None:
fields['uri'] = next_uri
data = ""
if len(fields) > 0:
data = "?" + urllib.urlencode(fields)
url = "%s://%s/%s%s" % (self.protocol,
endpoints.nodes,
parts.path.strip('/'),
data)
logger.debug("URL: %s (%s)" % (url, method))
return url
# This is the shortcut. We do a GET request on the service with the parameters sent as arguments.
direction = {'GET': 'pullFromVoSpace', 'PUT': 'pushToVoSpace'}
# On GET override the protocol to be http (faster) unless a secure_get is requested.
protocol = {
'GET': {'https': (self.secure_get and Client.VO_HTTPSGET_PROTOCOL) or Client.VO_HTTPGET_PROTOCOL,
'http': Client.VO_HTTPGET_PROTOCOL},
'PUT': {'https': Client.VO_HTTPSPUT_PROTOCOL,
'http': Client.VO_HTTPPUT_PROTOCOL}}
# build the url for that will request the url that provides access to the node.
url = "%s://%s" % (self.protocol, endpoints.transfer)
logger.debug("URL: %s" % url)
args = {
'TARGET': uri,
'DIRECTION': direction[method],
'PROTOCOL': protocol[method][self.protocol],
'view': view}
if cutout is not None:
args['cutout'] = cutout
params = urllib.urlencode(args)
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
response = self.conn.session.get(url, params=params, headers=headers, allow_redirects=False)
assert isinstance(response, requests.Response)
logging.debug("Transfer Server said: {0}".format(response.content))
if response.status_code == 303:
# Normal case is a redirect
url = response.headers.get('Location', None)
elif response.status_code == 404:
# The file doesn't exist
raise OSError(errno.ENOENT, response.content, url)
elif response.status_code == 409:
raise OSError(errno.EREMOTE, response.content, url)
elif response.status_code == 413:
raise OSError(errno.E2BIG, response.content, url)
else:
logger.debug("Reverting to full negotiation")
return self.get_node_url(uri,
method=method,
view=view,
full_negotiation=True,
limit=limit,
next_uri=next_uri,
cutout=cutout)
logger.debug("Sending short cut url: {0}".format(url))
return [url]
def link(self, src_uri, link_uri):
"""Make link_uri point to src_uri.
:param src_uri: the existing resource, either a vospace uri or a http url
:type src_uri: str
:param link_uri: the vospace node to create that will be a link to src_uri
:type link_uri: str
"""
link_uri = self.fix_uri(link_uri)
src_uri = self.fix_uri(src_uri)
# if the link_uri points at an existing directory then we try and make a link into that directory
if self.isdir(link_uri):
link_uri = os.path.join(link_uri, os.path.basename(src_uri))
with nested(self.nodeCache.volatile(src_uri), self.nodeCache.volatile(link_uri)):
link_node = Node(link_uri, node_type="vos:LinkNode")
ElementTree.SubElement(link_node.node, "target").text = src_uri
data = str(link_node)
size = len(data)
# MJG
print(data)
url = self.get_node_url(link_uri)
logger.debug("Got linkNode URL: {0}".format(url))
self.conn.session.put(url, data=data, headers={'size': size, 'Content-type': 'text/xml'})
def move(self, src_uri, destination_uri):
"""Move src_uri to destination_uri. If destination_uri is a containerNode then move src_uri into destination_uri
:param src_uri: the VOSpace node to be moved.
:type src_uri: str
:param destination_uri: the VOSpace location to move to.
:type destination_uri: str
:return did the move succeed?
:rtype bool
"""
src_uri = self.fix_uri(src_uri)
destination_uri = self.fix_uri(destination_uri)
with nested(self.nodeCache.volatile(src_uri), self.nodeCache.volatile(destination_uri)):
return self.transfer(src_uri, destination_uri, view='move')
def _get(self, uri, view="defaultview", cutout=None):
with self.nodeCache.volatile(uri):
return self.transfer(uri, "pullFromVoSpace", view, cutout)
def _put(self, uri):
with self.nodeCache.volatile(uri):
return self.transfer(uri, "pushToVoSpace", view="defaultview")
def transfer(self, uri, direction, view=None, cutout=None):
"""Build the transfer XML document
:param direction: is this a pushToVoSpace or a pullFromVoSpace ?
:param uri: the uri to transfer from or to VOSpace.
:param view: which view of the node (data/default/cutout/etc.) is being transferred
:param cutout: a special parameter added to the 'cutout' view request. e.g. '[0][1:10,1:10]'
"""
endpoints = EndPoints(uri, basic_auth=self.conn.session.auth is not None)
protocol = {"pullFromVoSpace": "{0}get".format(self.protocol),
"pushToVoSpace": "{0}put".format(self.protocol)}
transfer_xml = ElementTree.Element("vos:transfer")
transfer_xml.attrib['xmlns:vos'] = Node.VOSNS
ElementTree.SubElement(transfer_xml, "vos:target").text = uri
ElementTree.SubElement(transfer_xml, "vos:direction").text = direction
if view == 'move':
ElementTree.SubElement(transfer_xml, "vos:keepBytes").text = "false"
else:
if view == 'defaultview' or view == 'data': # MJG - data view not supported
ElementTree.SubElement(transfer_xml, "vos:view").attrib['uri'] = "ivo://ivoa.net/vospace/core#defaultview"
elif view is not None:
vos_view = ElementTree.SubElement(transfer_xml, "vos:view")
vos_view.attrib['uri'] = endpoints.view + "#{0}".format(view)
if cutout is not None and view == 'cutout':
param = ElementTree.SubElement(vos_view, "vos:param")
param.attrib['uri'] = endpoints.cutout
param.text = cutout
protocol_element = ElementTree.SubElement(transfer_xml, "vos:protocol")
protocol_element.attrib['uri'] = "{0}#{1}".format(Node.IVOAURL, protocol[direction])
logging.debug(ElementTree.tostring(transfer_xml))
url = "{0}://{1}".format(self.protocol,
endpoints.transfer)
logging.debug("Sending to : {}".format(url))
data = ElementTree.tostring(transfer_xml)
resp = self.conn.session.post(url,
data=data,
allow_redirects=False,
headers={'Content-type': 'application/x-www-form-urlencoded'}) # 'text/xml'}) # MJG
logging.debug("{0}".format(resp))
logging.debug("{0}".format(resp.content))
if resp.status_code != 303 and resp.status_code != 302: # MJG
raise OSError(resp.status_code, "Failed to get transfer service response.")
transfer_url = resp.headers.get('Location', None)
if self.conn.session.auth is not None and "auth" not in transfer_url:
transfer_url = transfer_url.replace('/vospace/', '/vospace/auth/')
logging.debug("Got back from transfer URL: %s" % transfer_url)
# For a move this is the end of the transaction.
if view == 'move':
return not self.get_transfer_error(transfer_url, uri)
# for get or put we need the protocol value
xfer_resp = self.conn.session.get(transfer_url, allow_redirects=False)
xfer_url = xfer_resp.headers.get('Location', transfer_url) # MJG
if self.conn.session.auth is not None and "auth" not in xfer_url:
xfer_url = xfer_url.replace('/vospace/', '/vospace/auth/')
xml_string = self.conn.session.get(xfer_url).content
logging.debug("Transfer Document: %s" % xml_string)
transfer_document = ElementTree.fromstring(xml_string)
logging.debug("XML version: {0}".format(ElementTree.tostring(transfer_document)))
all_protocols = transfer_document.findall(Node.PROTOCOL)
if all_protocols is None or not len(all_protocols) > 0:
return self.get_transfer_error(transfer_url, uri)
result = []
for protocol in all_protocols:
for node in protocol.findall(Node.ENDPOINT):
result.append(node.text)
# if this is a connection to the 'rc' server then we reverse the
# urllist to test the fail-over process
if endpoints.server.startswith('rc'):
result.reverse()
return result
def get_transfer_error(self, url, uri):
"""Follow a transfer URL to the Error message
:param url: The URL of the transfer request that had the error.
:param uri: The uri that we were trying to transfer (get or put).
"""
error_codes = {'NodeNotFound': errno.ENOENT,
'RequestEntityTooLarge': errno.E2BIG,
'PermissionDenied': errno.EACCES,
'OperationNotSupported': errno.EOPNOTSUPP,
'InternalFault': errno.EFAULT,
'ProtocolNotSupported': errno.EPFNOSUPPORT,
'ViewNotSupported': errno.ENOSYS,
'InvalidArgument': errno.EINVAL,
'InvalidURI': errno.EFAULT,
'TransferFailed': errno.EIO,
'DuplicateNode.': errno.EEXIST,
'NodeLocked': errno.EPERM}
job_url = str.replace(url, "/results/transferDetails", "")
try:
phase_url = job_url + "/phase"
sleep_time = 1
roller = ('\\', '-', '/', '|', '\\', '-', '/', '|')
phase = VOFile(phase_url, self.conn, method="GET",
follow_redirect=False).read()
# do not remove the line below. It is used for testing
logging.debug("Job URL: " + job_url + "/phase")
while phase in ['PENDING', 'QUEUED', 'EXECUTING', 'UNKNOWN']:
# poll the job. Sleeping time in between polls is doubling
# each time until it gets to 32sec
total_time_slept = 0
if sleep_time <= 32:
sleep_time *= 2
slept = 0
if logger.getEffectiveLevel() == logging.INFO:
while slept < sleep_time:
sys.stdout.write("\r%s %s" % (phase,
roller[total_time_slept % len(roller)]))
sys.stdout.flush()
slept += 1
total_time_slept += 1
time.sleep(1)
sys.stdout.write("\r \n")
else:
time.sleep(sleep_time)
phase = self.conn.session.get(phase_url, allow_redirects=False).content
logging.debug("Async transfer Phase for url %s: %s " % (url, phase))
except KeyboardInterrupt:
# abort the job when receiving a Ctrl-C/Interrupt from the client
logging.error("Received keyboard interrupt")
self.conn.session.post(job_url + "/phase",
allow_redirects=False,
data="PHASE=ABORT",
headers={"Content-type": 'application/x-www-form-urlencoded'}) # MJG
raise KeyboardInterrupt
status = VOFile(phase_url, self.conn, method="GET",
follow_redirect=False).read()
logger.debug("Phase: {0}".format(status))
if status in ['COMPLETED']:
return False
if status in ['HELD', 'SUSPENDED', 'ABORTED']:
# re-queue the job and continue to monitor for completion.
raise OSError("UWS status: {0}".format(status), errno.EFAULT)
error_url = job_url + "/error"
error_message = self.conn.session.get(error_url).content
logger.debug("Got transfer error {0} on URI {1}".format(error_message, uri))
# Check if the error was that the link type is unsupported and try and follow that link.
target = re.search("Unsupported link target:(?P<target> .*)$", error_message)
if target is not None:
return target.group('target').strip()
raise OSError(error_codes.get(error_message, errno.EFAULT),
"{0}: {1}".format(uri, error_message))
def open(self, uri, mode=os.O_RDONLY, view=None, head=False, url=None,
limit=None, next_uri=None, size=None, cutout=None, byte_range=None,
full_negotiation=False, possible_partial_read=False):
"""Create a VOFile connection to the specified uri or url.
:rtype : VOFile
:param uri: The uri of the VOSpace resource to create a connection to, override by specifying url
:type uri: str, None
:param mode: The mode os.O_RDONLY or os.O_WRONLY to open the connection with.
:type mode: bit
:param view: The view of the VOSpace resource, one of: default, data, cutout
:type view: str, None
:param head: Just return the http header of this request.
:type head: bool
:param url: Ignore the uri (ie don't look up the url using get_node_url) and just connect to this url
:type url: str, None
:param limit: limit response from vospace to this many child nodes. relevant for containerNode type
:type limit: int, None
:param next_uri: The uri of the last child node returned by a previous request on a containerNode
:type next_uri: str, None
:param size: The size of file to expect or be put to VOSpace
:type size: int, None
:param cutout: The cutout pattern to use during a get
:type cutout: str, None
:param byte_range: The range of bytes to request, rather than getting the entire file.
:type byte_range: str, None
:param full_negotiation: force this interaction to use the full UWS interaction to get the url for the resource
:type full_negotiation: bool
:param possible_partial_read:
"""
# sometimes this is called with mode from ['w', 'r']
# really that's an error, but I thought I'd just accept those are
# os.O_RDONLY
if type(mode) == str:
mode = os.O_RDONLY
# the url of the connection depends if we are 'getting', 'putting' or
# 'posting' data
method = None
if mode == os.O_RDONLY:
method = "GET"
elif mode & (os.O_WRONLY | os.O_CREAT):
method = "PUT"
elif mode & os.O_APPEND:
method = "POST"
elif mode & os.O_TRUNC:
method = "DELETE"
if head:
method = "HEAD"
if not method:
raise OSError(errno.EOPNOTSUPP, "Invalid access mode", mode)
if uri is not None and view in ['data', 'cutout']:
# Check if this is a target node.
try:
node = self.get_node(uri)
if node.type == "vos:LinkNode":
target = node.node.findtext(Node.TARGET)
logger.debug("%s is a link to %s" % (node.uri, target))
if target is None:
raise OSError(errno.ENOENT, "No target for link")
else:
parts = URLParser(target)
if parts.scheme == 'vos':
# This is a link to another VOSpace node so lets open that instead.
return self.open(target, mode, view, head, url, limit,
next_uri, size, cutout, byte_range)
else:
# A target external link
# TODO Need a way of passing along authentication.
if cutout is not None:
target = "{0}?cutout={1}".format(target, cutout)
return VOFile([target],
self.conn,
method=method,
size=size,
byte_range=byte_range,
possible_partial_read=possible_partial_read)
except OSError as e:
if e.errno in [2, 404]:
pass
else:
raise e
if url is None:
url = self.get_node_url(uri, method=method, view=view,
limit=limit, next_uri=next_uri, cutout=cutout,
full_negotiation=full_negotiation)
if url is None:
raise OSError(errno.EREMOTE)
return VOFile(url, self.conn, method=method, size=size, byte_range=byte_range,
possible_partial_read=possible_partial_read)
def add_props(self, node):
"""Given a node structure do a POST of the XML to the VOSpace to
update the node properties
Makes a new copy of current local state, then gets a copy of what's on the server and
then updates server with differences.
:param node: the Node object to add some properties to.
"""
new_props = copy.deepcopy(node.props)
old_props = self.get_node(node.uri, force=True).props
for prop in old_props:
if prop in new_props and old_props[prop] == new_props[prop] and old_props[prop] is not None:
del (new_props[prop])
node.node = node.create(node.uri, node_type=node.type,
properties=new_props)
# Now write these new properties to the node location.
url = self.get_node_url(node.uri, method='GET')
data = str(node)
size = len(data)
self.conn.session.post(url,
headers={'size': size, 'Content-type': 'text/xml'},
data=data) # MJG
def create(self, node):
"""
Create a (Container/Link/Data) Node on the VOSpace server.
:param node: the Node that we are going to create on the server.
:type node: bool
"""
url = self.get_node_url(node.uri, method='PUT')
data = str(node)
size = len(data)
self.conn.session.put(url, data=data, headers={'size': size, 'Content-type': 'text/xml'})
return True
def update(self, node, recursive=False):
"""Updates the node properties on the server. For non-recursive
updates, node's properties are updated on the server. For
recursive updates, node should only contain the properties to
be changed in the node itself as well as all its children.
:param node: the node to update.
:param recursive: should this update be applied to all children? (True/False)
"""
# Let's do this update using the async transfer method
url = self.get_node_url(node.uri)
endpoints = node.endpoints
if recursive:
property_url = "{0}://{1}".format(self.protocol, endpoints.properties)
logger.debug("prop URL: {0}".format(property_url))
try:
resp = self.conn.session.post(property_url,
allow_redirects=False,
data=str(node),
headers={'Content-type': 'text/xml'})
except Exception as ex:
logger.error(str(ex))
raise ex
if resp is None:
raise OSError(errno.EFAULT, "Failed to connect VOSpace")
logger.debug("Got prop-update response: {0}".format(resp.content))
transfer_url = resp.headers.get('Location', None)
logger.debug("Got job status redirect: {0}".format(transfer_url))
# logger.debug("Got back %s from $Client.VOPropertiesEndPoint " % (con))
# Start the job
self.conn.session.post(transfer_url + "/phase",
allow_redirects=False,
data="PHASE=RUN",
headers={'Content-type': "application/x-www-form-urlencoded"}) # MJG
self.get_transfer_error(transfer_url, node.uri)
else:
resp = self.conn.session.post(url,
data=str(node),
allow_redirects=False,
headers={'Content-type': 'text/xml'}) # MJG
logger.debug("update response: {0}".format(resp.content))
return 0
def mkdir(self, uri):
"""
Create a ContainerNode on the service. Raise OSError(EEXIST) if the container exists.
:param uri: The URI of the ContainerNode to create on the service.
:type uri: str
"""
uri = self.fix_uri(uri)
node = Node(uri, node_type="vos:ContainerNode")
url = self.get_node_url(uri)
try:
if '?' in url: url = url[: url.rindex('?')] # MJG
self.conn.session.headers['Content-type'] = 'text/xml' # MJG
response = self.conn.session.put(url, data=str(node))
response.raise_for_status()
except HTTPError as http_error:
if http_error.response.status_code != 409:
raise http_error
else:
raise OSError(errno.EEXIST, 'ContainerNode {0} already exists'.format(uri))
def delete(self, uri):
"""Delete the node
:param uri: The (Container/Link/Data)Node to delete from the service.
"""
uri = self.fix_uri(uri)
logger.debug("delete {0}".format(uri))
with self.nodeCache.volatile(uri):
url = self.get_node_url(uri, method='GET')
response = self.conn.session.delete(url)
response.raise_for_status()
def get_info_list(self, uri):
"""Retrieve a list of tuples of (NodeName, Info dict)
:param uri: the Node to get info about.
"""
info_list = {}
uri = self.fix_uri(uri)
logger.debug(str(uri))
node = self.get_node(uri, limit=None)
logger.debug(str(node))
while node.type == "vos:LinkNode":
uri = node.target
try:
node = self.get_node(uri, limit=None)
except Exception as e:
logger.error(str(e))
break
for thisNode in node.node_list:
info_list[thisNode.name] = thisNode.get_info()
if node.type in ["vos:DataNode", "vos:LinkNode"]:
info_list[node.name] = node.get_info()
return info_list.items()
def listdir(self, uri, force=False):
"""
Walk through the directory structure a la os.walk.
Setting force=True will make sure no cached results are used.
Follows LinksNodes to their destination location.
:param force: don't use cached values, retrieve from service.
:param uri: The ContainerNode to get a listing of.
:rtype [str]
"""
# logger.debug("getting a listing of %s " % (uri))
names = []
logger.debug(str(uri))
node = self.get_node(uri, limit=None, force=force)
while node.type == "vos:LinkNode":
uri = node.target
# logger.debug(uri)
node = self.get_node(uri, limit=None, force=force)
for thisNode in node.node_list:
names.append(thisNode.name)
return names
def _node_type(self, uri):
"""
Recursively follow links until the base Node is found.
:param uri: the VOSpace uri to recursively get the type of.
:return: the type of Node
:rtype: str
"""
node = self.get_node(uri, limit=0)
while node.type == "vos:LinkNode":
uri = node.target
if uri[0:4] == "vos:":
node = self.get_node(uri, limit=0)
else:
return "vos:DataNode"
return node.type
def isdir(self, uri):
"""Check to see if the given uri is or is a link to containerNode.
:param uri: a VOSpace Node URI to test.
:rtype: bool
"""
try:
return self._node_type(uri) == "vos:ContainerNode"
except OSError as ex:
if ex.errno == errno.ENOENT:
return False
raise ex
def isfile(self, uri):
"""
Check if the given uri is or is a link to a DataNode
:param uri: the VOSpace Node URI to test.
:rtype: bool
"""
try:
return self._node_type(uri) == "vos:DataNode"
except OSError as ex:
if ex.errno == errno.ENOENT:
return False
raise ex
def access(self, uri, mode=os.O_RDONLY):
"""Test if the give VOSpace uri can be accessed in the way requested.
:param uri: a VOSpace location.
:param mode: os.O_RDONLY
"""
return isinstance(self.open(uri, mode=mode), VOFile)
def status(self, uri, code=None):
"""
Check to see if this given uri points at a containerNode.
This is done by checking the view=data header and seeing if you
get an error.
:param uri: the VOSpace (Container/Link/Data)Node to check access status on.
:param code: NOT SUPPORTED.
"""
if not code:
raise OSError(errno.ENOSYS, "Use of 'code' option values no longer supported.")
self.get_node(uri)
return True
def get_job_status(self, url):
""" Returns the status of a job
:param url: the URL of the UWS job to get status of.
:rtype: str
"""
return VOFile(url, self.conn, method="GET", follow_redirect=False).read()
|
the-stack_106_14610
|
import argparse
import os
import panda3d.core as p3d
CONFIG_DATA = """
assimp-gen-normals true
bam-texture-mode unchanged
"""
p3d.load_prc_file_data('', CONFIG_DATA)
def make_texpath_relative(node, srcdir, converted_textures):
geomnode = node.node()
for idx, renderstate in enumerate(geomnode.get_geom_states()):
texattrib = renderstate.get_attrib(p3d.TextureAttrib)
if texattrib:
for texstage in texattrib.get_on_stages():
texture = texattrib.get_on_texture(texstage)
if texture in converted_textures:
continue
texture.filename = os.path.relpath(texture.filename, srcdir)
converted_textures.add(texture)
renderstate = renderstate.set_attrib(texattrib)
geomnode.set_geom_state(idx, renderstate)
def main():
parser = argparse.ArgumentParser(
description='A tool for creating BAM files from Panda3D supported file formats'
)
parser.add_argument('src', type=str, help='source path')
parser.add_argument('dst', type=str, help='destination path')
args = parser.parse_args()
src = p3d.Filename.from_os_specific(os.path.abspath(args.src))
dst = p3d.Filename.from_os_specific(os.path.abspath(args.dst))
dst.make_dir()
loader = p3d.Loader.get_global_ptr()
options = p3d.LoaderOptions()
options.flags |= p3d.LoaderOptions.LF_no_cache
scene = p3d.NodePath(loader.load_sync(src, options))
# Update texture paths
converted_textures = set()
for node in scene.find_all_matches('**/+GeomNode'):
make_texpath_relative(node, src.get_dirname(), converted_textures)
scene.write_bam_file(dst)
if __name__ == '__main__':
main()
|
the-stack_106_14611
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import sys
import traceback
import splunktalib.splunk_platform as sp
from splunktalib.common import log
def _parse_modinput_configs(root, outer_block, inner_block):
"""
When user splunkd spawns modinput script to do config check or run
<?xml version="1.0" encoding="UTF-8"?>
<input>
<server_host>localhost.localdomain</server_host>
<server_uri>https://127.0.0.1:8089</server_uri>
<session_key>xxxyyyzzz</session_key>
<checkpoint_dir>ckpt_dir</checkpoint_dir>
<configuration>
<stanza name="snow://alm_asset">
<param name="duration">60</param>
<param name="host">localhost.localdomain</param>
<param name="index">snow</param>
<param name="priority">10</param>
</stanza>
...
</configuration>
</input>
When user create an stanza through data input on WebUI
<?xml version="1.0" encoding="UTF-8"?>
<items>
<server_host>localhost.localdomain</server_host>
<server_uri>https://127.0.0.1:8089</server_uri>
<session_key>xxxyyyzzz</session_key>
<checkpoint_dir>ckpt_dir</checkpoint_dir>
<item name="abc">
<param name="duration">60</param>
<param name="exclude"></param>
<param name="host">localhost.localdomain</param>
<param name="index">snow</param>
<param name="priority">10</param>
</item>
</items>
"""
confs = root.getElementsByTagName(outer_block)
if not confs:
log.logger.error("Invalid config, missing %s section", outer_block)
raise Exception("Invalid config, missing %s section".format(outer_block))
configs = []
stanzas = confs[0].getElementsByTagName(inner_block)
for stanza in stanzas:
config = {}
stanza_name = stanza.getAttribute("name")
if not stanza_name:
log.logger.error("Invalid config, missing name")
raise Exception("Invalid config, missing name")
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
name = param.getAttribute("name")
if (
name
and param.firstChild
and param.firstChild.nodeType == param.firstChild.TEXT_NODE
):
config[name] = param.firstChild.data
configs.append(config)
return configs
def parse_modinput_configs(config_str):
"""
@config_str: modinput XML configuration feed by splunkd
@return: meta_config and stanza_config
"""
import defusedxml.minidom as xdm
meta_configs = {
"server_host": None,
"server_uri": None,
"session_key": None,
"checkpoint_dir": None,
}
root = xdm.parseString(config_str)
doc = root.documentElement
for tag in meta_configs.keys():
nodes = doc.getElementsByTagName(tag)
if not nodes:
log.logger.error("Invalid config, missing %s section", tag)
raise Exception("Invalid config, missing %s section", tag)
if nodes[0].firstChild and nodes[0].firstChild.nodeType == nodes[0].TEXT_NODE:
meta_configs[tag] = nodes[0].firstChild.data
else:
log.logger.error("Invalid config, expect text ndoe")
raise Exception("Invalid config, expect text ndoe")
if doc.nodeName == "input":
configs = _parse_modinput_configs(doc, "configuration", "stanza")
else:
configs = _parse_modinput_configs(root, "items", "item")
return meta_configs, configs
def get_modinput_configs_from_cli(modinput, modinput_stanza=None):
"""
@modinput: modinput name
@modinput_stanza: modinput stanza name, for multiple instance only
"""
assert modinput
splunkbin = sp.get_splunk_bin()
cli = [splunkbin, "cmd", "splunkd", "print-modinput-config", modinput]
if modinput_stanza:
cli.append(modinput_stanza)
out, err = subprocess.Popen(
cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
if err:
log.logger.error("Failed to get modinput configs with error: %s", err)
return None, None
else:
return parse_modinput_configs(out)
def get_modinput_config_str_from_stdin():
"""
Get modinput from stdin which is feed by splunkd
"""
try:
return sys.stdin.read(5000)
except Exception:
log.logger.error(traceback.format_exc())
raise
def get_modinput_configs_from_stdin():
config_str = get_modinput_config_str_from_stdin()
return parse_modinput_configs(config_str)
|
the-stack_106_14612
|
import unittest
from bot.utils.message_cache import MessageCache
from tests.helpers import MockMessage
# noinspection SpellCheckingInspection
class TestMessageCache(unittest.TestCase):
"""Tests for the MessageCache class in the `bot.utils.caching` module."""
def test_first_append_sets_the_first_value(self):
"""Test if the first append adds the message to the first cell."""
cache = MessageCache(maxlen=10)
message = MockMessage()
cache.append(message)
self.assertEqual(cache[0], message)
def test_append_adds_in_the_right_order(self):
"""Test if two appends are added in the same order if newest_first is False, or in reverse order otherwise."""
messages = [MockMessage(), MockMessage()]
cache = MessageCache(maxlen=10, newest_first=False)
for msg in messages:
cache.append(msg)
self.assertListEqual(messages, list(cache))
cache = MessageCache(maxlen=10, newest_first=True)
for msg in messages:
cache.append(msg)
self.assertListEqual(messages[::-1], list(cache))
def test_appending_over_maxlen_removes_oldest(self):
"""Test if three appends to a 2-cell cache leave the two newest messages."""
cache = MessageCache(maxlen=2)
messages = [MockMessage() for _ in range(3)]
for msg in messages:
cache.append(msg)
self.assertListEqual(messages[1:], list(cache))
def test_appending_over_maxlen_with_newest_first_removes_oldest(self):
"""Test if three appends to a 2-cell cache leave the two newest messages if newest_first is True."""
cache = MessageCache(maxlen=2, newest_first=True)
messages = [MockMessage() for _ in range(3)]
for msg in messages:
cache.append(msg)
self.assertListEqual(messages[:0:-1], list(cache))
def test_pop_removes_from_the_end(self):
"""Test if a pop removes the right-most message."""
cache = MessageCache(maxlen=3)
messages = [MockMessage() for _ in range(3)]
for msg in messages:
cache.append(msg)
msg = cache.pop()
self.assertEqual(msg, messages[-1])
self.assertListEqual(messages[:-1], list(cache))
def test_popleft_removes_from_the_beginning(self):
"""Test if a popleft removes the left-most message."""
cache = MessageCache(maxlen=3)
messages = [MockMessage() for _ in range(3)]
for msg in messages:
cache.append(msg)
msg = cache.popleft()
self.assertEqual(msg, messages[0])
self.assertListEqual(messages[1:], list(cache))
def test_clear(self):
"""Test if a clear makes the cache empty."""
cache = MessageCache(maxlen=5)
messages = [MockMessage() for _ in range(3)]
for msg in messages:
cache.append(msg)
cache.clear()
self.assertListEqual(list(cache), [])
self.assertEqual(len(cache), 0)
def test_get_message_returns_the_message(self):
"""Test if get_message returns the cached message."""
cache = MessageCache(maxlen=5)
message = MockMessage(id=1234)
cache.append(message)
self.assertEqual(cache.get_message(1234), message)
def test_get_message_returns_none(self):
"""Test if get_message returns None for an ID of a non-cached message."""
cache = MessageCache(maxlen=5)
message = MockMessage(id=1234)
cache.append(message)
self.assertIsNone(cache.get_message(4321))
def test_update_replaces_old_element(self):
"""Test if an update replaced the old message with the same ID."""
cache = MessageCache(maxlen=5)
message = MockMessage(id=1234)
cache.append(message)
message = MockMessage(id=1234)
cache.update(message)
self.assertIs(cache.get_message(1234), message)
self.assertEqual(len(cache), 1)
def test_contains_returns_true_for_cached_message(self):
"""Test if contains returns True for an ID of a cached message."""
cache = MessageCache(maxlen=5)
message = MockMessage(id=1234)
cache.append(message)
self.assertIn(1234, cache)
def test_contains_returns_false_for_non_cached_message(self):
"""Test if contains returns False for an ID of a non-cached message."""
cache = MessageCache(maxlen=5)
message = MockMessage(id=1234)
cache.append(message)
self.assertNotIn(4321, cache)
def test_indexing(self):
"""Test if the cache returns the correct messages by index."""
cache = MessageCache(maxlen=5)
messages = [MockMessage() for _ in range(5)]
for msg in messages:
cache.append(msg)
for current_loop in range(-5, 5):
with self.subTest(current_loop=current_loop):
self.assertEqual(cache[current_loop], messages[current_loop])
def test_bad_index_raises_index_error(self):
"""Test if the cache raises IndexError for invalid indices."""
cache = MessageCache(maxlen=5)
messages = [MockMessage() for _ in range(3)]
test_cases = (-10, -4, 3, 4, 5)
for msg in messages:
cache.append(msg)
for current_loop in test_cases:
with self.subTest(current_loop=current_loop):
with self.assertRaises(IndexError):
cache[current_loop]
def test_slicing_with_unfilled_cache(self):
"""Test if slicing returns the correct messages if the cache is not yet fully filled."""
sizes = (5, 10, 55, 101)
slices = (
slice(None), slice(2, None), slice(None, 2), slice(None, None, 2), slice(None, None, 3), slice(-1, 2),
slice(-1, 3000), slice(-3, -1), slice(-10, 3), slice(-10, 4, 2), slice(None, None, -1), slice(None, 3, -2),
slice(None, None, -3), slice(-1, -10, -2), slice(-3, -7, -1)
)
for size in sizes:
cache = MessageCache(maxlen=size)
messages = [MockMessage() for _ in range(size // 3 * 2)]
for msg in messages:
cache.append(msg)
for slice_ in slices:
with self.subTest(current_loop=(size, slice_)):
self.assertListEqual(cache[slice_], messages[slice_])
def test_slicing_with_overfilled_cache(self):
"""Test if slicing returns the correct messages if the cache was appended with more messages it can contain."""
sizes = (5, 10, 55, 101)
slices = (
slice(None), slice(2, None), slice(None, 2), slice(None, None, 2), slice(None, None, 3), slice(-1, 2),
slice(-1, 3000), slice(-3, -1), slice(-10, 3), slice(-10, 4, 2), slice(None, None, -1), slice(None, 3, -2),
slice(None, None, -3), slice(-1, -10, -2), slice(-3, -7, -1)
)
for size in sizes:
cache = MessageCache(maxlen=size)
messages = [MockMessage() for _ in range(size * 3 // 2)]
for msg in messages:
cache.append(msg)
messages = messages[size // 2:]
for slice_ in slices:
with self.subTest(current_loop=(size, slice_)):
self.assertListEqual(cache[slice_], messages[slice_])
def test_length(self):
"""Test if len returns the correct number of items in the cache."""
cache = MessageCache(maxlen=5)
for current_loop in range(10):
with self.subTest(current_loop=current_loop):
self.assertEqual(len(cache), min(current_loop, 5))
cache.append(MockMessage())
|
the-stack_106_14613
|
from opendust.opendust import DustParticle
from opendust.opendust import PlasmaParametersInSIUnitsFieldDriven
from opendust.opendust import SimulatioParametersInSIUnits
from opendust.opendust import OutputParameters
from opendust.opendust import OpenDust
###############################################
### 1. Define plasma parameters in SI units ###
###############################################
T_e = 30000 # electron temperature (K)
T_n = 300 # neutral gas temperature (K)
n_inf = 3.57167962497e15 # ion concentration (1/m^3)
m_i = 1.673557e-27 # H+-ion mass (kg)
w_c = 1.65562835e08 # ion-neutral collision frequency (s^-1)
E = 2.72066448e04 # electric field (V/m)
distributionType = "fieldDriven"
plasmaParametersInSIUnits = PlasmaParametersInSIUnitsFieldDriven(
T_n, T_e, n_inf, E, w_c, m_i
)
plasmaParametersInSIUnits.printParameters()
###################################################
### 2. Define simulation parameters in SI units ###
###################################################
R = 3 * plasmaParametersInSIUnits.r_D_e
H = 6 * plasmaParametersInSIUnits.r_D_e
N = int(2 ** 19)
n = 50000
d_t = 1e-11
simulationParametersInSIUnits = SimulatioParametersInSIUnits(
R, H, N, n, d_t, plasmaParametersInSIUnits
)
simulationParametersInSIUnits.printParameters()
###################################
### 3. Define output parameters ###
###################################
directory = "/home/avtimofeev/opendust/data/Patacchini2008/Animation/"
nOutput = 500
nFileOutput = 500
csvOutputFileName = directory + "csv/trajectory"
xyzOutputFileName = directory + "trajectory.xyz"
restartFileName = directory + "RESTART"
outputParameters = OutputParameters(
nOutput, nFileOutput, csvOutputFileName, xyzOutputFileName, restartFileName
)
################################
### 4. Define dust particles ###
################################
r = 1e-05 # radius of dust particles (m)
q = 200000 * plasmaParametersInSIUnits.e # charge of dust particles
chargeCalculationMethod = "given" # charge calculation method
x_1, y_1, z_1, r_1, q_1 = 0, 0, -1 * plasmaParametersInSIUnits.r_D_e, r, q
dustParticle1 = DustParticle(x_1, y_1, z_1, r_1, chargeCalculationMethod, q_1)
dustParticles = [dustParticle1]
############################################################
### 5. Create OpenDust class object and start simulation ###
############################################################
openDust = OpenDust(
plasmaParametersInSIUnits,
simulationParametersInSIUnits,
outputParameters,
dustParticles,
distributionType,
)
openDust.simulate(deviceIndex = "0,1,2", cutOff = False)
##################
### 6. Analyze ###
##################
forceIonsOrbitZ = openDust.dustParticles[0].forceIonsOrbit
forceIonsCollectZ = openDust.dustParticles[0].forceIonsCollect
q = openDust.dustParticles[0].q
t = openDust.t
f = open(directory+"force.txt","w")
for i in range(n):
f.write(
"{}\t{}\t{}\n".format(
t[i],
forceIonsOrbitZ[i][2],
forceIonsCollectZ[i][2],
)
)
f.close()
meanForceIonsOrbitZ = 0
meanForceIonsCollectZ = 0
iterator = 0
for i in range(n):
if i > 25000:
iterator += 1
meanForceIonsOrbitZ += forceIonsOrbitZ[i][2]
meanForceIonsCollectZ += forceIonsCollectZ[i][2]
meanForceIonsOrbitZ /= iterator
meanForceIonsCollectZ /= iterator
print("Mean force from ions orbits = {}".format(meanForceIonsOrbitZ))
print("Mean force from collected ions = {}".format(meanForceIonsCollectZ))
print("Mean force from ions = {}".format(meanForceIonsOrbitZ+meanForceIonsCollectZ))
f = open(directory+"charge.txt","w")
for i in range(n):
f.write(
"{}\t{}\n".format(
t[i],
q[i]
)
)
f.close()
|
the-stack_106_14615
|
import argparse
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, MaxPool2D)
from tensorflow.keras.losses import Reduction, SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import SGD
import nni
from nni.nas.tensorflow.mutables import LayerChoice, InputChoice
from nni.nas.tensorflow.classic_nas import get_and_apply_next_architecture
tf.get_logger().setLevel('ERROR')
class Net(Model):
def __init__(self):
super().__init__()
self.conv1 = LayerChoice([
Conv2D(6, 3, padding='same', activation='relu'),
Conv2D(6, 5, padding='same', activation='relu'),
])
self.pool = MaxPool2D(2)
self.conv2 = LayerChoice([
Conv2D(16, 3, padding='same', activation='relu'),
Conv2D(16, 5, padding='same', activation='relu'),
])
self.conv3 = Conv2D(16, 1)
self.skipconnect = InputChoice(n_candidates=2, n_chosen=1)
self.bn = BatchNormalization()
self.gap = AveragePooling2D(2)
self.fc1 = Dense(120, activation='relu')
self.fc2 = Dense(84, activation='relu')
self.fc3 = Dense(10)
def call(self, x):
bs = x.shape[0]
t = self.conv1(x)
x = self.pool(t)
x0 = self.conv2(x)
x1 = self.conv3(x0)
x0 = self.skipconnect([x0, None])
if x0 is not None:
x1 += x0
x = self.pool(self.bn(x1))
x = self.gap(x)
x = tf.reshape(x, [bs, -1])
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss(model, x, y, training):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def train(net, train_dataset, optimizer, num_epochs):
train_loss_results = []
train_accuracy_results = []
for epoch in range(num_epochs):
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
for x, y in train_dataset:
loss_value, grads = grad(net, x, y)
optimizer.apply_gradients(zip(grads, net.trainable_variables))
epoch_loss_avg.update_state(loss_value)
epoch_accuracy.update_state(y, net(x, training=True))
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
if epoch % 1 == 0:
print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
epoch_loss_avg.result(),
epoch_accuracy.result()))
def test(model, test_dataset):
test_accuracy = tf.keras.metrics.Accuracy()
for (x, y) in test_dataset:
# training=False is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
logits = model(x, training=False)
prediction = tf.argmax(logits, axis=1, output_type=tf.int32)
test_accuracy(prediction, y)
print("Test set accuracy: {:.3%}".format(test_accuracy.result()))
return test_accuracy.result()
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
args, _ = parser.parse_known_args()
cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
split = int(len(x_train) * 0.9)
dataset_train = tf.data.Dataset.from_tensor_slices((x_train[:split], y_train[:split])).batch(64)
dataset_valid = tf.data.Dataset.from_tensor_slices((x_train[split:], y_train[split:])).batch(64)
dataset_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(64)
net = Net()
get_and_apply_next_architecture(net)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
train(net, dataset_train, optimizer, args.epochs)
acc = test(net, dataset_test)
nni.report_final_result(acc.numpy())
|
the-stack_106_14616
|
from torch.utils.data import Dataset
import torch
import random
import numpy as np
from collections import defaultdict
class LogDataset(Dataset):
def __init__(self, log_corpus, time_corpus, vocab, seq_len, encoding="utf-8", on_memory=True, predict_mode=False, mask_ratio=0.15):
"""
:param corpus: log sessions/line
:param vocab: log events collection including pad, ukn ...
:param seq_len: max sequence length
:param corpus_lines: number of log sessions
:param encoding:
:param on_memory:
:param predict_mode: if predict
"""
self.vocab = vocab
self.seq_len = seq_len
self.on_memory = on_memory
self.encoding = encoding
self.predict_mode = predict_mode
self.log_corpus = log_corpus
self.time_corpus = time_corpus
self.corpus_lines = len(log_corpus)
self.mask_ratio = mask_ratio
def __len__(self):
return self.corpus_lines
def __getitem__(self, idx):
k, t = self.log_corpus[idx], self.time_corpus[idx]
k_masked, k_label, t_masked, t_label = self.random_item(k, t)
# [CLS] tag = SOS tag, [SEP] tag = EOS tag
k = [self.vocab.sos_index] + k_masked
k_label = [self.vocab.pad_index] + k_label
# k_label = [self.vocab.sos_index] + k_label
t = [0] + t_masked
t_label = [self.vocab.pad_index] + t_label
return k, k_label, t, t_label
# k, k_label, t, t_label = k[:100], k_label[:100], t[:100], t_label[:100]
# padding = [self.vocab.pad_index for _ in range(100 - len(k))]
# k.extend(padding), k_label.extend(padding), t.extend(padding), t_label.extend(
# padding)
# output = {"bert_input": torch.tensor(k, dtype=torch.long),
# "bert_label": torch.tensor(k_label, dtype=torch.long),
# "time_input": torch.tensor(t, dtype=torch.float),
# "time_label": torch.tensor(t_label, dtype=torch.float)
# }
# return output
def random_item(self, k, t):
tokens = list(k)
output_label = []
time_intervals = list(t)
time_label = []
for i, token in enumerate(tokens):
time_int = time_intervals[i]
prob = random.random()
# replace 15% of tokens in a sequence to a masked token
if prob < self.mask_ratio:
# raise AttributeError("no mask in visualization")
if self.predict_mode:
tokens[i] = self.vocab.mask_index
output_label.append(self.vocab.stoi.get(token, self.vocab.unk_index))
time_label.append(time_int)
time_intervals[i] = 0
continue
prob /= self.mask_ratio
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = self.vocab.mask_index
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.randrange(len(self.vocab))
# 10% randomly change token to current token
else:
tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
output_label.append(self.vocab.stoi.get(token, self.vocab.unk_index))
time_intervals[i] = 0 # time mask value = 0
time_label.append(time_int)
else:
tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
output_label.append(0)
time_label.append(0)
return tokens, output_label, time_intervals, time_label
def collate_fn(self, batch, percentile=100, dynamical_pad=True):
lens = [len(seq[0]) for seq in batch]
# find the max len in each batch
if dynamical_pad:
# dynamical padding
seq_len = int(np.percentile(lens, percentile))
if self.seq_len is not None:
seq_len = min(seq_len, self.seq_len)
else:
# fixed length padding
seq_len = self.seq_len
#print("collate_fn seq_len", seq_len)
output = defaultdict(list)
for seq in batch:
bert_input = seq[0][:seq_len]
bert_label = seq[1][:seq_len]
time_input = seq[2][:seq_len]
time_label = seq[3][:seq_len]
padding = [self.vocab.pad_index for _ in range(seq_len - len(bert_input))]
bert_input.extend(padding), bert_label.extend(padding), time_input.extend(padding), time_label.extend(
padding)
time_input = np.array(time_input)[:, np.newaxis]
output["bert_input"].append(bert_input)
output["bert_label"].append(bert_label)
output["time_input"].append(time_input)
output["time_label"].append(time_label)
output["bert_input"] = torch.tensor(output["bert_input"], dtype=torch.long)
output["bert_label"] = torch.tensor(output["bert_label"], dtype=torch.long)
output["time_input"] = torch.tensor(output["time_input"], dtype=torch.float)
output["time_label"] = torch.tensor(output["time_label"], dtype=torch.float)
return output
|
the-stack_106_14617
|
import sys
import pytest # type: ignore
from helpers import mock_legacy_venv, run_pipx_cli
def test_reinstall(pipx_temp_env, capsys):
assert not run_pipx_cli(["install", "pycowsay"])
assert not run_pipx_cli(["reinstall", "--python", sys.executable, "pycowsay"])
def test_reinstall_nonexistent(pipx_temp_env, capsys):
assert run_pipx_cli(["reinstall", "--python", sys.executable, "nonexistent"])
assert "Nothing to reinstall for nonexistent" in capsys.readouterr().out
@pytest.mark.parametrize("metadata_version", [None, "0.1"])
def test_reinstall_legacy_venv(pipx_temp_env, capsys, metadata_version):
assert not run_pipx_cli(["install", "pycowsay"])
mock_legacy_venv("pycowsay", metadata_version=metadata_version)
assert not run_pipx_cli(["reinstall", "--python", sys.executable, "pycowsay"])
def test_reinstall_suffix(pipx_temp_env, capsys):
suffix = "_x"
assert not run_pipx_cli(["install", "pycowsay", f"--suffix={suffix}"])
assert not run_pipx_cli(
["reinstall", "--python", sys.executable, f"pycowsay{suffix}"]
)
@pytest.mark.parametrize("metadata_version", ["0.1"])
def test_reinstall_suffix_legacy_venv(pipx_temp_env, capsys, metadata_version):
suffix = "_x"
assert not run_pipx_cli(["install", "pycowsay", f"--suffix={suffix}"])
mock_legacy_venv(f"pycowsay{suffix}", metadata_version=metadata_version)
assert not run_pipx_cli(
["reinstall", "--python", sys.executable, f"pycowsay{suffix}"]
)
|
the-stack_106_14618
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OSDisk(Model):
"""Settings for the operating system disk of the virtual machine.
:param caching: The type of caching to enable for the OS disk. Values are:
none - The caching mode for the disk is not enabled.
readOnly - The caching mode for the disk is read only.
readWrite - The caching mode for the disk is read and write.
The default value for caching is none. For information about the caching
options see:
https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/.
Possible values include: 'none', 'readOnly', 'readWrite'
:type caching: str or ~azure.batch.models.CachingType
"""
_attribute_map = {
'caching': {'key': 'caching', 'type': 'CachingType'},
}
def __init__(self, caching=None):
self.caching = caching
|
the-stack_106_14620
|
# -*- coding: utf-8 -*-
# Copyright CERN since 2013
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data rebalancing",
"Debug", "Express", "Functional Test", "Functional Test XrootD",
"Functional Test WebDAV", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions",
"Globus Online Test", "Data Challenge"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
VO = {"description": "VO tag",
"type": "string",
"pattern": "^([a-zA-Z_\\-.0-9]{3})?$"}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT,
'vo': VO}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject("Problem validating %(name)s : %(error)s" % locals())
|
the-stack_106_14621
|
"""Prerender common images used in the application."""
from memegen.settings import ProductionConfig
from memegen.factory import create_app
from memegen.domain import Text
def run():
app = create_app(ProductionConfig)
with app.app_context():
options = []
for template in app.template_service.all():
for text in [Text("_"), template.sample_text]:
for watermark in ["", "memegen.link"]:
options.append((template, text, watermark))
print(f"Generating {len(options)} sample images...")
for template, text, watermark in options:
app.image_service.create(template, text, watermark=watermark)
if __name__ == '__main__':
run()
|
the-stack_106_14622
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from copy import deepcopy
import mock
import pytest
from .common import FIXTURES_PATH
from .utils import mocked_perform_request
def test_flatten_json(check):
check = check({})
with open(os.path.join(FIXTURES_PATH, 'nginx_plus_in.json')) as f:
parsed = check.parse_json(f.read())
parsed.sort()
with open(os.path.join(FIXTURES_PATH, 'nginx_plus_out.python')) as f:
expected = eval(f.read())
# Check that the parsed test data is the same as the expected output
assert parsed == expected
def test_flatten_json_timestamp(check):
check = check({})
assert (
check.parse_json(
"""
{"timestamp": "2018-10-23T12:12:23.123212Z"}
"""
)
== [('nginx.timestamp', 1540296743, [], 'gauge')]
)
def test_plus_api(check, instance, aggregator):
instance = deepcopy(instance)
instance['use_plus_api'] = True
check = check(instance)
check._perform_request = mock.MagicMock(side_effect=mocked_perform_request)
check.check(instance)
total = 0
for m in aggregator.metric_names:
total += len(aggregator.metrics(m))
assert total == 1180
def test_nest_payload(check):
check = check({})
keys = ["foo", "bar"]
payload = {"key1": "val1", "key2": "val2"}
result = check._nest_payload(keys, payload)
expected = {"foo": {"bar": payload}}
assert result == expected
@pytest.mark.parametrize(
'test_case, extra_config, expected_http_kwargs',
[
(
"legacy auth config",
{'user': 'legacy_foo', 'password': 'legacy_bar'},
{'auth': ('legacy_foo', 'legacy_bar')},
),
("new auth config", {'username': 'new_foo', 'password': 'new_bar'}, {'auth': ('new_foo', 'new_bar')}),
("legacy ssl config True", {'ssl_validation': True}, {'verify': True}),
("legacy ssl config False", {'ssl_validation': False}, {'verify': False}),
],
)
def test_config(check, instance, test_case, extra_config, expected_http_kwargs):
instance = deepcopy(instance)
instance.update(extra_config)
c = check(instance)
with mock.patch('datadog_checks.base.utils.http.requests') as r:
r.get.return_value = mock.MagicMock(status_code=200, content='{}')
c.check(instance)
http_wargs = dict(
auth=mock.ANY, cert=mock.ANY, headers=mock.ANY, proxies=mock.ANY, timeout=mock.ANY, verify=mock.ANY
)
http_wargs.update(expected_http_kwargs)
r.get.assert_called_with('http://localhost:8080/nginx_status', **http_wargs)
|
the-stack_106_14624
|
from deap import base
from deap import creator
from deap import tools
import random
import numpy
import matplotlib.pyplot as plt
import seaborn as sns
import hyperparameter_tuning_genetic_test
import elitism
# boundaries for ADABOOST parameters:
# "n_estimators": 1..100
# "learning_rate": 0.01..100
# "algorithm": 0, 1
# [n_estimators, learning_rate, algorithm]:
BOUNDS_LOW = [ 1, 0.01, 0]
BOUNDS_HIGH = [100, 1.00, 1]
NUM_OF_PARAMS = len(BOUNDS_HIGH)
# Genetic Algorithm constants:
POPULATION_SIZE = 20
P_CROSSOVER = 0.9 # probability for crossover
P_MUTATION = 0.5 # probability for mutating an individual
MAX_GENERATIONS = 5
HALL_OF_FAME_SIZE = 5
CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
# set the random seed:
RANDOM_SEED = 42
random.seed(RANDOM_SEED)
# create the classifier accuracy test class:
test = hyperparameter_tuning_genetic_test.HyperparameterTuningGenetic(None)
toolbox = base.Toolbox()
# define a single objective, maximizing fitness strategy:
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
# create the Individual class based on list:
creator.create("Individual", list, fitness=creator.FitnessMax)
# define the hyperparameter attributes individually:
for i in range(NUM_OF_PARAMS):
# "hyperparameter_0", "hyperparameter_1", ...
toolbox.register("hyperparameter_" + str(i),
random.uniform,
BOUNDS_LOW[i],
BOUNDS_HIGH[i])
# create a tuple containing an attribute generator for each param searched:
hyperparameters = ()
for i in range(NUM_OF_PARAMS):
hyperparameters = hyperparameters + \
(toolbox.__getattribute__("hyperparameter_" + str(i)),)
# create the individual operator to fill up an Individual instance:
toolbox.register("individualCreator",
tools.initCycle,
creator.Individual,
hyperparameters,
n=1)
# create the population operator to generate a list of individuals:
toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
# fitness calculation
def classificationAccuracy(individual):
return test.getAccuracy(individual),
toolbox.register("evaluate", classificationAccuracy)
# genetic operators:mutFlipBit
# genetic operators:
toolbox.register("select", tools.selTournament, tournsize=2)
toolbox.register("mate",
tools.cxSimulatedBinaryBounded,
low=BOUNDS_LOW,
up=BOUNDS_HIGH,
eta=CROWDING_FACTOR)
toolbox.register("mutate",
tools.mutPolynomialBounded,
low=BOUNDS_LOW,
up=BOUNDS_HIGH,
eta=CROWDING_FACTOR,
indpb=1.0 / NUM_OF_PARAMS)
# Genetic Algorithm flow:
def main():
# create initial population (generation 0):
population = toolbox.populationCreator(n=POPULATION_SIZE)
# prepare the statistics object:
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("max", numpy.max)
stats.register("avg", numpy.mean)
# define the hall-of-fame object:
hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
# perform the Genetic Algorithm flow with hof feature added:
population, logbook = elitism.eaSimpleWithElitism(population,
toolbox,
cxpb=P_CROSSOVER,
mutpb=P_MUTATION,
ngen=MAX_GENERATIONS,
stats=stats,
halloffame=hof,
verbose=True)
# print best solution found:
print("- Best solution is: ")
print("params = ", test.formatParams(hof.items[0]))
print("Accuracy = %1.5f" % hof.items[0].fitness.values[0])
# extract statistics:
maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
# plot statistics:
sns.set_style("whitegrid")
plt.plot(maxFitnessValues, color='red')
plt.plot(meanFitnessValues, color='green')
plt.xlabel('Generation')
plt.ylabel('Max / Average Fitness')
plt.title('Max and Average fitness over Generations')
plt.savefig("img_hyperparameter_01.png")
if __name__ == "__main__":
main()
|
the-stack_106_14625
|
from convoys import autograd_scipy_monkeypatch # NOQA
import autograd
from autograd_gamma import gammainc
from deprecated.sphinx import deprecated
import emcee
import numpy
from scipy.special import gammaincinv
from autograd.scipy.special import expit, gammaln
from autograd.numpy import isnan, exp, dot, log, sum
import progressbar
import scipy.optimize
import warnings
__all__ = ['Exponential',
'Weibull',
'Gamma',
'GeneralizedGamma']
def generalized_gamma_loss(x, X, B, T, W, fix_k, fix_p,
hierarchical, flavor, callback=None):
k = exp(x[0]) if fix_k is None else fix_k
p = exp(x[1]) if fix_p is None else fix_p
log_sigma_alpha = x[2]
log_sigma_beta = x[3]
a = x[4]
b = x[5]
n_features = int((len(x)-6)/2)
alpha = x[6:6+n_features]
beta = x[6+n_features:6+2*n_features]
lambd = exp(dot(X, alpha)+a)
# PDF: p*lambda^(k*p) / gamma(k) * t^(k*p-1) * exp(-(x*lambda)^p)
log_pdf = log(p) + (k*p) * log(lambd) - gammaln(k) \
+ (k*p-1) * log(T) - (T*lambd)**p
cdf = gammainc(k, (T*lambd)**p)
if flavor == 'logistic': # Log-likelihood with sigmoid
c = expit(dot(X, beta)+b)
LL_observed = log(c) + log_pdf
LL_censored = log((1 - c) + c * (1 - cdf))
elif flavor == 'linear': # L2 loss, linear
c = dot(X, beta)+b
LL_observed = -(1 - c)**2 + log_pdf
LL_censored = -(c*cdf)**2
LL_data = sum(
W * B * LL_observed +
W * (1 - B) * LL_censored, 0)
if hierarchical:
# Hierarchical model with sigmas ~ invgamma(1, 1)
LL_prior_a = -4*log_sigma_alpha - 1/exp(log_sigma_alpha)**2 \
- dot(alpha, alpha) / (2*exp(log_sigma_alpha)**2) \
- n_features*log_sigma_alpha
LL_prior_b = -4*log_sigma_beta - 1/exp(log_sigma_beta)**2 \
- dot(beta, beta) / (2*exp(log_sigma_beta)**2) \
- n_features*log_sigma_beta
LL = LL_prior_a + LL_prior_b + LL_data
else:
LL = LL_data
if isnan(LL):
return -numpy.inf
if callback is not None:
callback(LL)
return LL
class RegressionModel(object):
pass
class GeneralizedGamma(RegressionModel):
''' Generalization of Gamma, Weibull, and Exponential
:param mcmc: boolean, defaults to False. Whether to use MCMC to
sample from the posterior so that a confidence interval can be
estimated later (see :meth:`predict`).
:param hierarchical: boolean denoting whether we have a (Normal) prior
on the alpha and beta parameters to regularize. The variance of
the normal distribution is in itself assumed to be an inverse
gamma distribution (1, 1).
:param flavor: defaults to logistic. If set to 'linear', then an
linear model is fit, where the beta params will be completely
additive. This creates a much more interpretable model, with some
minor loss of accuracy.
:param ci: boolean, deprecated alias for `mcmc`.
This mostly follows the `Wikipedia article
<https://en.wikipedia.org/wiki/Generalized_gamma_distribution>`_, although
our notation is slightly different. Also see `this paper
<http://data.princeton.edu/pop509/ParametricSurvival.pdf>`_ for an overview.
**Shape of the probability function**
The cumulative density function is:
:math:`F(t) = P(k, (t\\lambda)^p)`
where :math:`P(a, x) = \\gamma(a, x) / \\Gamma(a)` is the lower regularized
incomplete gamma function.
:math:`\\gamma(a, x)` is the incomplete gamma function and :math:`\\Gamma(a)`
is the standard gamma function.
The probability density function is:
:math:`f(t) = p\\lambda^{kp} t^{kp-1} \\exp(-(t\\lambda)^p) / \\Gamma(k)`
**Modeling conversion rate**
Since our goal is to model the conversion rate, we assume the conversion
rate converges to a final value
:math:`c = \\sigma(\\mathbf{\\beta^Tx} + b)`
where :math:`\\sigma(z) = 1/(1+e^{-z})` is the sigmoid function,
:math:`\\mathbf{\\beta}` is an unknown vector we are solving for (with
corresponding intercept :math:`b`), and :math:`\\mathbf{x}` are the
feature vector (inputs).
We also assume that the rate parameter :math:`\\lambda` is determined by
:math:`\\lambda = exp(\\mathbf{\\alpha^Tx} + a)`
where :math:`\\mathrm{\\alpha}` is another unknown vector we are
trying to solve for (with corresponding intercept :math:`a`).
We also assume that the :math:`\\mathbf{\\alpha}, \\mathbf{\\beta}`
vectors have a normal distribution
:math:`\\alpha_i \\sim \\mathcal{N}(0, \\sigma_{\\alpha})`,
:math:`\\beta_i \\sim \\mathcal{N}(0, \\sigma_{\\beta})`
where hyperparameters :math:`\\sigma_{\\alpha}^2, \\sigma_{\\beta}^2`
are drawn from an inverse gamma distribution
:math:`\\sigma_{\\alpha}^2 \\sim \\text{inv-gamma}(1, 1)`,
:math:`\\sigma_{\\beta}^2 \\sim \\text{inv-gamma}(1, 1)`
**List of parameters**
The full model fits vectors :math:`\\mathbf{\\alpha, \\beta}` and scalars
:math:`a, b, k, p, \\sigma_{\\alpha}, \\sigma_{\\beta}`.
**Likelihood and censorship**
For entries that convert, the contribution to the likelihood is simply
the probability density given by the probability distribution function
:math:`f(t)` times the final conversion rate :math:`c`.
For entries that *did not* convert, there is two options. Either the
entry will never convert, which has probability :math:`1-c`. Or,
it will convert at some later point that we have not observed yet,
with probability given by the cumulative density function
:math:`F(t)`.
**Solving the optimization problem**
To find the MAP (max a posteriori), `scipy.optimize.minimize
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize>`_
with the SLSQP method.
If `mcmc == True`, then `emcee <http://dfm.io/emcee/current/>`_ is used
to sample from the full posterior in order to generate uncertainty
estimates for all parameters.
'''
def __init__(self, mcmc=False, fix_k=None, fix_p=None, hierarchical=True,
flavor='logistic', ci=None):
self._mcmc = mcmc
self._fix_k = fix_k
self._fix_p = fix_p
self._hierarchical = hierarchical
self._flavor = flavor
if ci is not None:
warnings.warn('The `ci` argument is deprecated in 0.2.1 in favor '
' of `mcmc`.', DeprecationWarning)
self._mcmc = ci
def fit(self, X, B, T, W=None):
'''Fits the model.
:param X: numpy matrix of shape :math:`k \\cdot n`
:param B: numpy vector of shape :math:`n`
:param T: numpy vector of shape :math:`n`
:param W: (optional) numpy vector of shape :math:`n`
'''
if W is None:
W = numpy.ones(len(X))
X, B, T, W = (Z if type(Z) == numpy.ndarray else numpy.array(Z)
for Z in (X, B, T, W))
keep_indexes = (T > 0) & (B >= 0) & (B <= 1) & (W >= 0)
if sum(keep_indexes) < X.shape[0]:
n_removed = X.shape[0] - sum(keep_indexes)
warnings.warn('Warning! Removed %d/%d entries from inputs where '
'T <= 0 or B not 0/1 or W < 0' % (n_removed, len(X)))
X, B, T, W = (Z[keep_indexes] for Z in (X, B, T, W))
n_features = X.shape[1]
# scipy.optimize and emcee forces the the parameters to be a vector:
# (log k, log p, log sigma_alpha, log sigma_beta,
# a, b, alpha_1...alpha_k, beta_1...beta_k)
# Generalized Gamma is a bit sensitive to the starting point!
x0 = numpy.zeros(6+2*n_features)
x0[0] = +1 if self._fix_k is None else log(self._fix_k)
x0[1] = -1 if self._fix_p is None else log(self._fix_p)
args = (X, B, T, W, self._fix_k, self._fix_p,
self._hierarchical, self._flavor)
# Set up progressbar and callback
bar = progressbar.ProgressBar(widgets=[
progressbar.Variable('loss', width=15, precision=9), ' ',
progressbar.BouncingBar(), ' ',
progressbar.Counter(width=6),
' [', progressbar.Timer(), ']'])
def callback(LL, value_history=[]):
value_history.append(LL)
bar.update(len(value_history), loss=LL)
# Define objective and use automatic differentiation
f = lambda x: -generalized_gamma_loss(x, *args, callback=callback)
jac = autograd.grad(lambda x: -generalized_gamma_loss(x, *args))
# Find the maximum a posteriori of the distribution
res = scipy.optimize.minimize(f, x0, jac=jac, method='SLSQP',
options={'maxiter': 9999})
if not res.success:
raise Exception('Optimization failed with message: %s' %
res.message)
result = {'map': res.x}
# TODO: should not use fixed k/p as search parameters
if self._fix_k:
result['map'][0] = log(self._fix_k)
if self._fix_p:
result['map'][1] = log(self._fix_p)
# Make sure we're in a local minimum
gradient = jac(result['map'])
gradient_norm = numpy.dot(gradient, gradient)
if gradient_norm >= 1e-2 * len(X):
warnings.warn('Might not have found a local minimum! '
'Norm of gradient is %f' % gradient_norm)
# Let's sample from the posterior to compute uncertainties
if self._mcmc:
dim, = res.x.shape
n_walkers = 5*dim
sampler = emcee.EnsembleSampler(
nwalkers=n_walkers,
ndim=dim,
log_prob_fn=generalized_gamma_loss,
args=args,
)
mcmc_initial_noise = 1e-3
p0 = [result['map'] + mcmc_initial_noise * numpy.random.randn(dim)
for i in range(n_walkers)]
n_burnin = 100
n_steps = int(numpy.ceil(2000. / n_walkers))
n_iterations = n_burnin + n_steps
bar = progressbar.ProgressBar(max_value=n_iterations, widgets=[
progressbar.Percentage(), ' ', progressbar.Bar(),
' %d walkers [' % n_walkers,
progressbar.AdaptiveETA(), ']'])
for i, _ in enumerate(sampler.sample(p0, iterations=n_iterations)):
bar.update(i+1)
result['samples'] = sampler.chain[:, n_burnin:, :] \
.reshape((-1, dim)).T
if self._fix_k:
result['samples'][0, :] = log(self._fix_k)
if self._fix_p:
result['samples'][1, :] = log(self._fix_p)
self.params = {k: {
'k': exp(data[0]),
'p': exp(data[1]),
'a': data[4],
'b': data[5],
'alpha': data[6:6+n_features].T,
'beta': data[6+n_features:6+2*n_features].T,
} for k, data in result.items()}
def _predict(self, params, x, t):
lambd = exp(dot(x, params['alpha'].T) + params['a'])
if self._flavor == 'logistic':
c = expit(dot(x, params['beta'].T) + params['b'])
elif self._flavor == 'linear':
c = dot(x, params['beta'].T) + params['b']
M = c * gammainc(
params['k'],
(t*lambd)**params['p'])
return M
def predict_posteriori(self, x, t):
''' Returns the trace samples generated via the MCMC steps.
Requires the model to be fit with `mcmc == True`.'''
x = numpy.array(x)
t = numpy.array(t)
assert self._mcmc
params = self.params['samples']
t = numpy.expand_dims(t, -1)
return self._predict(params, x, t)
def predict_ci(self, x, t, ci=0.8):
'''Works like :meth:`predict` but produces a confidence interval.
Requires the model to be fit with `ci = True`. The return value
will contain one more dimension than for :meth:`predict`, and
the last dimension will have size 3, containing the mean, the
lower bound of the confidence interval, and the upper bound of
the confidence interval.
'''
M = self.predict_posteriori(x, t)
y = numpy.mean(M, axis=-1)
y_lo = numpy.percentile(M, (1-ci)*50, axis=-1)
y_hi = numpy.percentile(M, (1+ci)*50, axis=-1)
return numpy.stack((y, y_lo, y_hi), axis=-1)
def predict(self, x, t):
'''Returns the value of the cumulative distribution function
for a fitted model (using the maximum a posteriori estimate).
:param x: feature vector (or matrix)
:param t: time
'''
params = self.params['map']
x = numpy.array(x)
t = numpy.array(t)
return self._predict(params, x, t)
def rvs(self, x, n_curves=1, n_samples=1, T=None):
''' Samples values from this distribution
T is optional and means we already observed non-conversion until T
'''
assert self._mcmc # Need to be fit with MCMC
if T is None:
T = numpy.zeros((n_curves, n_samples))
else:
assert T.shape == (n_curves, n_samples)
B = numpy.zeros((n_curves, n_samples), dtype=numpy.bool)
C = numpy.zeros((n_curves, n_samples))
params = self.params['samples']
for i, j in enumerate(numpy.random.randint(len(params['k']),
size=n_curves)):
k = params['k'][j]
p = params['p'][j]
lambd = exp(dot(x, params['alpha'][j]) + params['a'][j])
c = expit(dot(x, params['beta'][j]) + params['b'][j])
z = numpy.random.uniform(size=(n_samples,))
cdf_now = c * gammainc(
k,
numpy.multiply.outer(T[i], lambd)**p) # why is this outer?
adjusted_z = cdf_now + (1 - cdf_now) * z
B[i] = (adjusted_z < c)
y = adjusted_z / c
w = gammaincinv(k, y)
# x = (t * lambd)**p
C[i] = w**(1./p) / lambd
C[i][~B[i]] = 0
return B, C
@deprecated(version='0.2.0',
reason='Use :meth:`predict` or :meth:`predict_ci` instead.')
def cdf(self, x, t, ci=False):
'''Returns the predicted values.'''
if ci:
return self.predict_ci(x, t)
else:
return self.predict(x, t)
@deprecated(version='0.2.0',
reason='Use :meth:`predict_posteriori` instead.')
def cdf_posteriori(self, x, t):
'''Returns the a posterior distribution of the predicted values.'''
return self.predict_posteriori(x, t)
class Exponential(GeneralizedGamma):
''' Specialization of :class:`.GeneralizedGamma` where :math:`k=1, p=1`.
The cumulative density function is:
:math:`F(t) = 1 - \\exp(-t\\lambda)`
The probability density function is:
:math:`f(t) = \\lambda\\exp(-t\\lambda)`
The exponential distribution is the most simple distribution.
From a conversion perspective, you can interpret it as having
two competing final states where the probability of transitioning
from the initial state to converted or dead is constant.
See documentation for :class:`GeneralizedGamma`.'''
def __init__(self, *args, **kwargs):
kwargs.update(dict(fix_k=1, fix_p=1))
super(Exponential, self).__init__(*args, **kwargs)
class Weibull(GeneralizedGamma):
''' Specialization of :class:`.GeneralizedGamma` where :math:`k=1`.
The cumulative density function is:
:math:`F(t) = 1 - \\exp(-(t\\lambda)^p)`
The probability density function is:
:math:`f(t) = p\\lambda(t\\lambda)^{p-1}\\exp(-(t\\lambda)^p)`
See documentation for :class:`GeneralizedGamma`.'''
def __init__(self, *args, **kwargs):
kwargs.update(dict(fix_k=1))
super(Weibull, self).__init__(*args, **kwargs)
class Gamma(GeneralizedGamma):
''' Specialization of :class:`.GeneralizedGamma` where :math:`p=1`.
The cumulative density function is:
:math:`F(t) = P(k, t\\lambda)`
where :math:`P(a, x) = \\gamma(a, x) / \\Gamma(a)` is the lower regularized
incomplete gamma function.
The probability density function is:
:math:`f(t) = \\lambda^k t^{k-1} \\exp(-x\\lambda) / \\Gamma(k)`
See documentation for :class:`GeneralizedGamma`.'''
def __init__(self, *args, **kwargs):
kwargs.update(dict(fix_p=1))
super(Gamma, self).__init__(*args, **kwargs)
|
the-stack_106_14627
|
# -*- coding: utf-8 -*-
"""Core of box engine package"""
import os
import shutil
import uuid
import string
from subprocess import CalledProcessError
import vagrant
from workbox import model
from workbox.lib.helpers import get_vagrantfiles_base_folder, get_free_port, get_server_load_value
class BoxEngine(object):
"""Helper class for work with boxes"""
@staticmethod
def get_all_boxes():
"""
Get all boxes from db
Returns:
Collection of all boxes
"""
return model.Box.get_all_boxes()
@staticmethod
def get_all_user_boxes(user_id):
"""
Get all user boxes from db
Args:
user_id (int): user id in db
Returns:
Collection of all boxes
"""
return model.Box.get_all_user_boxes(user_id)
@staticmethod
def get_box_by_id(box_id):
"""
Get box with given box_id
Args:
box_id (int): box_id in db
Returns:
model.Box or None
"""
return model.Box.get_by_box_id(box_id)
@staticmethod
def is_author(user_name, box_id):
"""
Detect is user author of box
Args:
user_name (string): user name
box_id (int): id of box
Raises:
IndexError: no box with given box_id
Returns:
True if user is author, otherwise - False
"""
return model.Box.is_author(user_name, box_id)
@staticmethod
def update_vagrantfile(box_id, vagrantfile_data):
"""
Update Vagrantfile of box with given box_id
Args:
box_id (int): id of box
vagrantfile_data (string): text data of vagrantfile
Raises:
IndexError: no box with given box_id
EnvironmentError: vagrantfile was removed
"""
box = BoxEngine.get_box_by_id(box_id)
if box is None:
raise IndexError("Виртуальная среда #" + str(box_id) + " не найдена")
file_path = os.path.join(box.vagrantfile_path, 'Vagrantfile')
if not os.path.exists(file_path):
raise EnvironmentError("Vagrantfile был удален (#" + str(box_id) + ")")
with open(file_path, 'wb') as v_file:
v_file.write(vagrantfile_data)
try:
model.Box.update_datetime_of_modify(box_id)
except IndexError:
raise
@staticmethod
def create_box_from_vagrantfile(box_name, user_name, vagrantfile_data):
"""
Create box from givent Vagrantfile text
Args:
box_name (string): name of box
user_name (string): user name
vagrantfile_data (string): text data of vagrantfile
Returns:
Id of created box
"""
port = None
if '#FPRT#' in vagrantfile_data:
while True:
port = get_free_port()
if model.Box.is_port_free(port):
vagrantfile_data = string.replace(vagrantfile_data, '#FPRT#', str(port))
break
vagrantfile_path = BoxEngine._create_vagrantfile(vagrantfile_data)
box_id = model.Box.add_new_box(user_name, box_name, port, vagrantfile_path)
return box_id
@staticmethod
def create_box_from_parameters(box_name, user_name, vagrantfile_data):
"""
Create box from givent parameters
Args:
box_name (string): name of box
user_name (string): user name
vagrantfile_data (string): text data of vagrantfile
"""
return BoxEngine.create_box_from_vagrantfile(box_name, user_name, vagrantfile_data)
@staticmethod
def update_all_boxes_status():
"""
Update status of all boxes
Raises:
EnvironmentError: vagrant failed
"""
boxes = BoxEngine.get_all_boxes()
status = None
for box in boxes:
try:
vagrant_box = vagrant.Vagrant(box.vagrantfile_path)
status = vagrant_box.status()
except CalledProcessError:
raise EnvironmentError(
"Не удалось выполнить 'vagrant status' (#" + str(box.box_id) + ")")
except OSError:
raise EnvironmentError(
"Не удалось выполнить 'vagrant status' (проблема с доступом к Vagrantfile) (#"
+ str(box.box_id) + ")")
if status is not None and len(status) > 0 and status[0].state is not None:
if box.status == 'started' and status[0].state != 'running':
model.Box.change_status(box.box_id, 'stopped')
if box.status != 'started' and status[0].state == 'running':
model.Box.change_status(box.box_id, 'started')
@staticmethod
def update_all_user_boxes_status(user_id):
"""
Update status of user boxes
Args:
user_id (int): user id in db
Raises:
EnvironmentError: vagrant failed
"""
boxes = BoxEngine.get_all_user_boxes(user_id)
for box in boxes:
try:
vagrant_box = vagrant.Vagrant(box.vagrantfile_path)
status = vagrant_box.status()
except CalledProcessError:
raise EnvironmentError(
"Не удалось выполнить 'vagrant status' (#" + str(box.box_id) + ")")
except OSError:
raise EnvironmentError(
"Не удалось выполнить 'vagrant status' (проблема с доступом к Vagrantfile) (#"
+ str(box.box_id) + ")")
if status is not None and len(status) > 0 and status[0].state is not None:
if box.status == 'started' and status[0].state != 'running':
model.Box.change_status(box.box_id, 'stopped')
if box.status != 'started' and status[0].state == 'running':
model.Box.change_status(box.box_id, 'started')
@staticmethod
def start_box(box_id):
"""
Start box
Args:
box_id (int): id of box
Raises:
EnvironmentError: vagrant failed
IndexError: no box with given box_id
"""
box = BoxEngine.get_box_by_id(box_id)
if box is None:
raise IndexError("Виртуальная среда #" + str(box_id) + " не найдена")
try:
vagrant_box = vagrant.Vagrant(box.vagrantfile_path)
vagrant_box.up()
except CalledProcessError:
raise EnvironmentError("Не удалось выполнить 'vagrant up' (#" + str(box_id) + ")")
except OSError:
raise EnvironmentError(
"Не удалось выполнить 'vagrant up' (проблема с доступом к Vagrantfile) (#"
+ str(box_id) + ")")
model.Box.change_status(box_id, 'started')
@staticmethod
def stop_box(box_id):
"""
Stop box
Args:
box_id (int): id of box
Raises:
IndexError: no box with given box_id
EnvironmentError: vagrant failed
"""
box = BoxEngine.get_box_by_id(box_id)
if box is None:
raise IndexError("Виртуальная среда #" + str(box_id) + " не найдена")
try:
vagrant_box = vagrant.Vagrant(box.vagrantfile_path)
vagrant_box.destroy()
except CalledProcessError:
raise EnvironmentError("Не удалось выполнить 'vagrant destroy' (#" + str(box_id) + ")")
except OSError:
raise EnvironmentError(
"Не удалось выполнить 'vagrant destroy' (проблема с доступом к Vagrantfile) (#"
+ str(box_id) + ")")
model.Box.change_status(box_id, 'stopped')
@staticmethod
def copy_box(user_name, copied_box_id):
"""
Copy box from box with given box_id
Args:
user_name (string): user name
copied_box_id (int): id of copied box
Returns:
Id of created box
Raises:
IndexError: no box with given box_id
EnvironmentError: vagrantfile was removed
"""
copied_box = BoxEngine.get_box_by_id(copied_box_id)
if copied_box is None:
raise IndexError("Виртуальная среда #" + str(copied_box_id) + " не найдена")
port = None
if copied_box.port != None:
while True:
port = get_free_port()
if model.Box.is_port_free(port):
break
file_path = os.path.join(copied_box.vagrantfile_path, 'Vagrantfile')
if not os.path.exists(file_path):
raise EnvironmentError("Vagrantfile был удален (#" + str(copied_box_id) + ")")
with open(file_path, 'r') as v_file:
vagrantfile_path = BoxEngine._create_vagrantfile(v_file.read())
box_id = model.Box.add_new_box(user_name, copied_box.name, port, vagrantfile_path)
return box_id
@staticmethod
def delete_box(box_id):
"""
Delete box with given box_id
Args:
box_id (int): id of box
Raises:
IndexError: no box with given box_id
"""
box = BoxEngine.get_box_by_id(box_id)
if box is None:
raise IndexError("Виртуальная среда #" + str(box_id) + " не найдена")
if box.status == 'started':
BoxEngine.stop_box(box_id)
vagrantfile_path = box.vagrantfile_path
model.Box.delete_box(box_id)
BoxEngine._delete_vagrantfile(vagrantfile_path)
@staticmethod
def get_vagrantfile_data(box_id):
"""
Return vagrantfile data of box
Args:
box_id (int): id of box
Returns:
Vagrantfile data
Raises:
IndexError: no box with given box_id
EnvironmentError: vagrantfile was removed
"""
box = BoxEngine.get_box_by_id(box_id)
if box is None:
raise IndexError("Виртуальная среда #" + str(box_id) + " не найдена")
file_path = os.path.join(box.vagrantfile_path, 'Vagrantfile')
if not os.path.exists(file_path):
raise EnvironmentError("Vagrantfile был удален (#" + str(box_id) + ")")
with open(file_path, 'r') as v_file:
return v_file.read()
@staticmethod
def get_server_load_value():
"""
Get server load value
Returns:
Server load value (int between 0 and 100)
"""
return get_server_load_value()
@staticmethod
def get_number_of_user_boxes(user_id):
"""
Get number of all users boxes from db
Args:
user_id (int): user id in db
Returns:
Number of all user's boxes
"""
my_boxes = {}
my_boxes['created'] = model.Box.get_number_of_user_boxes(user_id, 'created')
my_boxes['started'] = model.Box.get_number_of_user_boxes(user_id, 'started')
my_boxes['stopped'] = model.Box.get_number_of_user_boxes(user_id, 'stopped')
return my_boxes
@staticmethod
def get_number_of_all_boxes():
"""
Get number of all boxes from db
Returns:
Number of all boxes
"""
all_boxes = {}
all_boxes['created'] = model.Box.get_number_of_all_boxes('created')
all_boxes['started'] = model.Box.get_number_of_all_boxes('started')
all_boxes['stopped'] = model.Box.get_number_of_all_boxes('stopped')
return all_boxes
@staticmethod
def _create_vagrantfile(vagrantfile_data):
"""
Create Vagrantfile and return its path
Args:
vagrantfile_data (string): text data of vagrantfile
Returns:
Path to created vagrantfile
"""
directory = str(get_vagrantfiles_base_folder()) + str(uuid.uuid4())
if not os.path.exists(directory):
os.makedirs(directory)
file_path = os.path.join(directory, 'Vagrantfile')
temp_file_path = file_path + '~'
with open(temp_file_path, 'wb') as output_file:
output_file.write(vagrantfile_data)
os.rename(temp_file_path, file_path)
return directory
@staticmethod
def _delete_vagrantfile(vagrantfile_dir):
"""
Delete Vagrantfile from disk
Args:
vagrantfile_dir (string): path to vagrantfile
"""
if os.path.exists(vagrantfile_dir):
shutil.rmtree(vagrantfile_dir)
|
the-stack_106_14630
|
# Ultroid - UserBot
# Copyright (C) 2021-2022 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
#
# Ported by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
from os import remove
from random import choice
from telethon.tl.functions.users import GetFullUserRequest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_delete, edit_or_reply, ice_cmd
from userbot.utils.misc import create_quotly
from .carbon import all_col
@ice_cmd(pattern="q( (.*)|$)")
async def quotly(event):
match = event.pattern_match.group(1).strip()
if not event.is_reply:
return await edit_delete(event, "**Mohon Balas ke Pesan**")
msg = await edit_or_reply(event, "`Processing...`")
reply = await event.get_reply_message()
replied_to, reply_ = None, None
if match:
spli_ = match.split(maxsplit=1)
if (spli_[0] in ["r", "reply"]) or (
spli_[0].isdigit() and int(spli_[0]) in range(1, 21)
):
if spli_[0].isdigit():
if not event.client._bot:
reply_ = await event.client.get_messages(
event.chat_id,
min_id=event.reply_to_msg_id - 1,
reverse=True,
limit=int(spli_[0]),
)
else:
id_ = reply.id
reply_ = []
for msg_ in range(id_, id_ + int(spli_[0])):
msh = await event.client.get_messages(event.chat_id, ids=msg_)
if msh:
reply_.append(msh)
else:
replied_to = await reply.get_reply_message()
try:
match = spli_[1]
except IndexError:
match = None
user = None
if not reply_:
reply_ = reply
if match:
match = match.split(maxsplit=1)
if match:
if match[0].startswith("@") or match[0].isdigit():
try:
match_ = await event.client(GetFullUserRequest(match[0]))
user = await event.client.get_entity(match_)
except ValueError:
pass
match = match[1] if len(match) == 2 else None
else:
match = match[0]
if match == "random":
match = choice(all_col)
try:
file = await create_quotly(reply_, bg=match, reply=replied_to, sender=user)
except Exception as er:
return await msg.edit(f"**ERROR:** `{er}`")
message = await reply.reply("Quotly by Ice-Userbot", file=file)
remove(file)
await msg.delete()
return message
CMD_HELP.update(
{
"quotly": f"**Plugin : **`quotly`\
\n\n • **Syntax :** `{cmd}q`\
\n • **Function : **Membuat pesan menjadi sticker dengan random background.\
\n\n • **Syntax :** `{cmd}q` <angka>\
\n • **Function : **Membuat pesan menjadi sticker dengan custom jumlah pesan yang diberikan.\
\n\n • **Syntax :** `{cmd}q` <warna>\
\n • **Function : **Membuat pesan menjadi sticker dengan custom warna background yang diberikan.\
\n\n • **Syntax :** `{cmd}q` <username>\
\n • **Function : **Membuat pesan menjadi sticker dengan custom username user tele yang diberikan.\
"
}
)
|
the-stack_106_14631
|
#!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function):
"""
Encode a value using the encoding function, if it fails store the error in a queue
"""
encoded_val = None
try:
encoded_val = encoding_function(value)
except Exception as e:
log.error("Data particle error encoding. Name:%s Value:%s", name, value)
self._encoding_errors.append({name: value})
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
|
the-stack_106_14634
|
import pytest
from dsa.challenges.queue_with_stacks.queue_with_stacks import (
Node,
Stack,
PseudoQueue
)
# Tests Instances
def test_Node_exists():
assert Node("test")
def test_Stack_exists():
assert Stack()
def test_PseudoQueue_exists():
assert PseudoQueue()
# Queue Tests
def test_PseudoQueue_enqueue_one_item():
my_queue = PseudoQueue()
my_queue.enqueue(1)
actual = str(my_queue.storage1)
expected = "[1] -> NULL"
assert actual == expected
def test_PseudoQueue_enqueue_multiple_items():
my_queue = PseudoQueue()
my_queue.enqueue(1)
my_queue.enqueue(2)
my_queue.enqueue(3)
actual = str(my_queue.storage1)
expected = "[3] -> [2] -> [1] -> NULL"
assert actual == expected
def test_PseudoQueue_dequeue_one_item():
my_queue = PseudoQueue()
my_queue.enqueue(1)
my_queue.dequeue()
actual = str(my_queue.storage1)
expected = "NULL"
assert actual == expected
def test_PseudoQueue_dequeue_multiple_items():
my_queue = PseudoQueue()
my_queue.enqueue(1)
my_queue.enqueue(2)
my_queue.enqueue(3)
my_queue.dequeue()
my_queue.dequeue()
actual = str(my_queue.storage1)
expected = "[3] -> NULL"
assert actual == expected
|
the-stack_106_14635
|
import tkinter.ttk as ttk
from bdsolve.solver.genetic import Player
from bdsolve.ui.common import ArrayView, default_style
class LearnUI:
def __init__(self):
self.p = Player()
self.t = ttk.tkinter.Tk()
self.t.title('bdsolve - Learning GUI')
self.t.config(bd=0, relief='flat')
self.t.geometry('750x500')
self.t.grid_columnconfigure(0, weight=2)
self.t.grid_columnconfigure(1, weight=1)
self.t.grid_rowconfigure(0, weight=1)
self.s = default_style(self.t)
self.vars = [
ttk.tkinter.StringVar(self.t) for _ in range(5)]
self.ivars = [
ttk.tkinter.IntVar(self.t) for _ in range(2)]
self.f1 = ttk.Frame(
self.t, borderwidth=6, relief='ridge',
padding='0.3i', style='A.TFrame')
self.f1.grid(row=0, column=0, sticky='nswe')
self.f1.grid_columnconfigure(0, weight=1)
self.f1.grid_rowconfigure(0, weight=5)
self.f1.grid_rowconfigure(1, weight=1)
self.f2 = ttk.Frame(
self.t, borderwidth=6, relief='ridge',
padding='0.3i', style='B.TFrame')
self.f2.grid(row=0, column=1, sticky='nswe')
self.f2.grid_columnconfigure(0, weight=1)
self.f2_1 = ttk.Labelframe(self.f2, padding='0.1i', text='Controls')
self.f2_1.grid(row=0, column=0, sticky='nwse')
self.b1 = ttk.Button(
self.f2_1, text='Learn',
command=lambda: [
self.ivars[0].set(1),
self.run()
])
self.b1.pack()
self.b2 = ttk.Button(
self.f2_1, text='Stop',
command=lambda: [
self.ivars[0].set(0)
])
self.b2.pack()
ttk.Separator(self.f2_1, orient='horizontal').pack()
self.sb = ttk.Spinbox(
self.f2_1, from_=10, to=1010, increment=100, width=5)
self.sb.pack()
self.sb.set(110)
self.f2_2 = ttk.Labelframe(self.f2, padding='0.1i', text='Statistics')
self.f2_2.grid(row=1, column=0, sticky='nwse')
self.l1 = ttk.Label(self.f2_2, textvariable=self.vars[0])
self.l1.pack()
self.l2 = ttk.Label(self.f2_2, textvariable=self.vars[1])
self.l2.pack()
self.l3 = ttk.Label(self.f2_2, textvariable=self.vars[2])
self.l3.pack()
self.l4 = ttk.Label(self.f2_2, textvariable=self.vars[3])
self.l4.pack()
self.l5 = ttk.Label(self.f2_2, textvariable=self.vars[4])
self.l5.pack()
self.aw = ArrayView(self.f1, self.p.board.board, 40)
self.aw.canvas.grid(row=0, column=0, sticky='')
self.pr = ttk.Progressbar(
self.f1, orient='horizontal', mode='determinate',
maximum=self.p.g.pop_count-1, variable=self.ivars[1])
self.pr.grid(row=1, column=0, sticky='swe')
self.t.mainloop()
def run(self):
self.p.learn()
self.aw.update()
self.ivars[1].set(self.p.g.pop_num)
self.vars[0].set(f'Score: {self.p.score}')
self.vars[1].set(f'Avg score: {self.p.avg_score}')
self.vars[2].set(f'Hi score: {self.p.hi_score}')
self.vars[3].set(f'Current: {self.p.g.pop_num+1}/{self.p.g.pop_count}')
self.vars[4].set(f'Generation: {self.p.g.gen_num}')
if self.ivars[0].get():
self.t.after(int(self.sb.get()), self.run)
|
the-stack_106_14636
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
if head == None:
return None
elif head.next == None:
return TreeNode(head.val)
#快慢指针法, slow最后会是中间结点, front是它前面一个
front, slow, fast = head, head, head
while fast.next and fast.next.next:
front = slow
slow, fast = slow.next, fast.next.next
fast, front.next = slow.next, None
root = TreeNode(slow.val)
if head != slow:
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(fast)
return root
|
the-stack_106_14637
|
import logging
import pathlib
import urllib.parse
from flask import current_app
from quetzal.app.api.exceptions import QuetzalException
logger = logging.getLogger(__name__)
def upload(filename, content, location):
""" Save a file on a local filesystem.
Implements the *upload* mechanism of the local file storage backend.
Parameters
----------
filename: str
Filename where the file will be saved. It can include a relative path.
content: file-like
Contents of the file.
location: str
URL where the file will be saved. The `filename` parameter will be
relative to this parameter.
Returns
-------
url: str
URL to the uploaded file. Its format will be ``file://absolute/path/to/file``.
path_obj: :py:class:`pathlib.Path`
Path object where the file was saved.
Raises
------
quetzal.app.api.exceptions.QuetzalException
When the location is the global data directory. This is not permitted.
"""
logger.debug('Saving local file %s at %s', filename, location)
# Verification that the upload does not change the global data directory
data_dir = pathlib.Path(current_app.config['QUETZAL_FILE_DATA_DIR']).resolve()
target_dir = pathlib.Path(urllib.parse.urlparse(location).path).resolve()
if str(target_dir).startswith(str(data_dir)):
raise QuetzalException('Cannot upload directly to global data directory')
# Rewind the file descriptor to the beginning of file in case there was a
# read operation before
content.seek(0)
# Create target directory if needed
target_path = target_dir / filename
target_path.parent.mkdir(parents=True, exist_ok=True)
filename = str(target_path.resolve())
# Save the contents
content.save(filename)
return f'file://{filename}', target_path
def set_permissions(file_obj, owner):
logger.debug('File permissions on file local storage does not do anything')
|
the-stack_106_14640
|
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name='ebook-convert-helper',
author='Arbaaz Laskar',
author_email="[email protected]",
description="A helper cli for calibre's ebook-convert CLI which is used to convert all files in an directory into another format.",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.3.2",
license='Apache License',
url="https://github.com/arzkar/calibre-ebook-convert-helper",
packages=find_packages(
include=['ebook_convert_helper', 'ebook_convert_helper.*']),
include_package_data=True,
install_requires=[
'tqdm>=4.62.3',
'colorama>=0.4.4'
],
entry_points='''
[console_scripts]
ebook-convert-helper=ebook_convert_helper.cli:main
''',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
|
the-stack_106_14642
|
import base58
def isValidPublicAddress(address: str) -> bool:
"""Check if address is a valid NEO address"""
valid = False
if len(address) == 34 and address[0] == 'A':
try:
base58.b58decode_check(address.encode())
valid = True
except ValueError:
# checksum mismatch
valid = False
return valid
|
the-stack_106_14643
|
#!/usr/bin/env python3
"""File Processing Engine.
This is a generic file processing engine that sets up a watch folder and waits
for files/directories to be copied to it. Any added directories are also watched
(if recursive is set) but any added files are be processed using one of its built
in file handler classes.
Current built in file handlers:
1) Copy files/directory
2) Import CSV file to MySQL database table.
3) Import CSV file to SQLite database table.
4) SFTP copy files/directory to an SSH server.
usage: fpe.py [-h] [-n NAME] file
Process files copied into watch folder with a custom handler.
positional arguments:
file Configration file
optional arguments:
-h, --help show this help message and exit
-n NAME, --name NAME File handler name
"""
from handlerfactory import create_event_handler
import sys
import os
import time
import configparser
import logging
import argparse
from watchdog.observers import Observer
__author__ = "Rob Tizzard"
__copyright__ = "Copyright 20018"
__credits__ = ["Rob Tizzard"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Rob Tizzard"
__email__ = "[email protected]"
__status__ = "Pre-Alpha"
def get_config_section(config, section_name):
"""Get configuration file section and return dictionary for it"""
config_section = {}
for option in config.options(section_name):
try:
config_section[option] = config.get(section_name, option)
# Automatically set any boolean values (dont use getBoolean)
if config_section[option] in ('True', 'False'):
config_section[option] = config_section[option] == 'True'
except Exception as e:
logging.error('Error on option {}.\n{}'.format(option, e))
config_section[option] = None
# Save away section name for use
config_section['name'] = section_name
return config_section
def load_config(arguments):
"""Load configuration file and set logging parameters"""
try:
# Read in config file
config = configparser.ConfigParser()
config.read(arguments.file)
# Default logging parameters
logging_params = {'level': logging.INFO,
'format': '%(asctime)s:%(message)s'}
# Read in any logging options, merge with default and
# remove logging section
if 'Logging' in config.sections():
logging_params.update(get_config_section(config, 'Logging'))
# If level passed in then convert to int.
if logging_params['level'] is not int:
logging_params['level'] = int(logging_params['level'])
logging_params.pop('name')
config.remove_section('Logging')
logging.basicConfig(**logging_params) # Set logging options
# If handler name set then remove all others from config
# leaving the config empty if the handler doesn't exist
if arguments.name is not None:
if not config.has_section(arguments.name):
logging.info('Error: Non-existant file handler {}.'.
format(arguments.name))
for section in config.sections():
if section != arguments.name:
config.remove_section(section)
except Exception as e:
logging.error(e)
sys.exit(1)
return config
def load_arguments():
"""Load and parse command line arguments"""
parser = argparse.ArgumentParser(description='Process files copied into watch folder with a custom handler.')
parser.add_argument('file', help='Configration file')
parser.add_argument('-n', '--name', help="File handler name")
arguments = parser.parse_args()
if not os.path.exists(arguments.file):
print('Error: Non-existant config file passed to FPE.')
sys.exit(1)
return arguments
def create_observer(config, handler_name):
"""Create file handler attach to an observer and start watching."""
try:
# Default values for optional fields
handler_section = {'recursive': False,
'deletesource': True}
# Merge config with default values and create handler
handler_section.update(get_config_section(config, handler_name))
file_handler = create_event_handler(handler_section)
except Exception as e:
logging.error(e)
observer = None
else:
# Create observer with file handler and start watching
if file_handler is not None:
observer = Observer()
observer.schedule(file_handler, file_handler.watch_folder,
recursive=file_handler.recursive)
observer.start()
else:
observer = None
return observer
def observe_folders(observers_list):
"""Run observers until user quits (eg.Control-C)"""
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
# Stop all observers
for observer in observers_list:
observer.stop()
finally:
# Wait for all observer threads to stop
for observer in observers_list:
observer.join()
########################
# FPE Main Entry Point #
########################
def fpe():
"""Main program entry point"""
arguments = load_arguments()
config = load_config(arguments)
logging.info('File Processing Engine Started.')
observers_list = []
# Loop through config sections creating file observers
for handler_name in config.sections():
observer = create_observer(config, handler_name)
if observer is not None:
observers_list.append(observer)
# If list not empty observer folders
if observers_list:
observe_folders(observers_list)
else:
logging.error('Error: No file handlers configured.')
logging.info('File Processing Engine Stopped.')
if __name__ == '__main__':
fpe()
|
the-stack_106_14644
|
#! /usr/bin/env python
# David Cournapeau
# Last Change: Wed Nov 05 07:00 PM 2008 J
from __future__ import division, print_function, absolute_import
import os.path
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
TestCase, run_module_suite, assert_raises, assert_allclose, assert_equal,
assert_)
from scipy.cluster.vq import (kmeans, kmeans2, py_vq, py_vq2, vq, whiten,
ClusterError)
from scipy.cluster import _vq
# Optional:
# import modules that are located in the same directory as this file.
DATAFILE1 = os.path.join(os.path.dirname(__file__), "data.txt")
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestWhiten(TestCase):
def test_whiten(self):
desired = np.array([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
for tp in np.array, np.matrix:
obs = tp([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_allclose(whiten(obs), desired, rtol=1e-5)
def test_whiten_zero_std(self):
desired = np.array([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
for tp in np.array, np.matrix:
obs = tp([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_allclose(whiten(obs), desired, rtol=1e-5)
assert_equal(len(w), 1)
assert_(issubclass(w[-1].category, RuntimeWarning))
def test_whiten_not_finite(self):
for tp in np.array, np.matrix:
for bad_value in np.nan, np.inf, -np.inf:
obs = tp([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
class TestVq(TestCase):
def test_py_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
label1 = py_vq(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_py_vq2(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
label1 = py_vq2(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
label1, dist = _vq.vq(tp(X), tp(initc))
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(tp(X), tp(initc))
# def test_py_vq_1d(self):
# """Test special rank 1 vq algo, python implementation."""
# data = X[:, 0]
# initc = data[:3]
# a, b = _py_vq_1d(data, initc)
# ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
# assert_array_equal(a, ta)
# assert_array_equal(b, tb)
def test_vq_1d(self):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
def test__vq_sametype(self):
a = np.array([1.0, 2.0], dtype=np.float64)
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
def test__vq_invalid_type(self):
a = np.array([1, 2], dtype=np.int)
assert_raises(TypeError, _vq.vq, a, a)
def test_vq_large_nfeat(self):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
def test_vq_large_features(self):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
class TestKMean(TestCase):
def test_large_features(self):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 100
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
kmeans(data, 2)
def test_kmeans_simple(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
code1 = kmeans(tp(X), tp(initc), iter=1)[0]
assert_array_almost_equal(code1, CODET2)
def test_kmeans_lost_cluster(self):
# This will cause kmean to have a cluster with no points.
data = np.fromfile(DATAFILE1, sep=", ")
data = data.reshape((200, 2))
initk = np.array([[-1.8127404, -0.67128041],
[2.04621601, 0.07401111],
[-2.31149087,-0.05160469]])
kmeans(data, initk)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
kmeans2(data, initk, missing='warn')
assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
def test_kmeans2_simple(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
def test_kmeans2_rank1(self):
data = np.fromfile(DATAFILE1, sep=", ")
data = data.reshape((200, 2))
data1 = data[:, 0]
initc = data1[:3]
code = initc.copy()
kmeans2(data1, code, iter=1)[0]
kmeans2(data1, code, iter=2)[0]
def test_kmeans2_rank1_2(self):
data = np.fromfile(DATAFILE1, sep=", ")
data = data.reshape((200, 2))
data1 = data[:, 0]
kmeans2(data1, 2, iter=1)
def test_kmeans2_init(self):
data = np.fromfile(DATAFILE1, sep=", ")
data = data.reshape((200, 2))
kmeans2(data, 3, minit='points')
kmeans2(data[:, :1], 3, minit='points') # special case (1-D)
# minit='random' can give warnings, filter those
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="One of the clusters is empty. Re-run")
kmeans2(data, 3, minit='random')
kmeans2(data[:, :1], 3, minit='random') # special case (1-D)
def test_kmeans2_empty(self):
# Regression test for gh-1032.
assert_raises(ValueError, kmeans2, [], 2)
def test_kmeans_0k(self):
# Regression test for gh-1073: fail when k arg is 0.
assert_raises(ValueError, kmeans, X, 0)
assert_raises(ValueError, kmeans2, X, 0)
assert_raises(ValueError, kmeans2, X, np.array([]))
if __name__ == "__main__":
run_module_suite()
|
the-stack_106_14645
|
#!/usr/bin/python
import sys,os
#Raw log file content
log = []
#line -> call chain
cc = {}
#lines that are inst events, record the line num.
insts = []
#To match the vim, we change 0-based to 1-based
def println(ln,s):
print('%-*d: %s' % (10,ln+1,s))
def print_cc(ln,c):
println(ln,'##CC: ' + '->'.join(c))
def print_inst(ln,s):
println(ln,'--INST: ' + s)
def print_match(ln,s):
println(ln,' ' + s)
#decide whether a line is a inst visit/update event.
def is_inst_line(i):
global log
#After seeing a prefix string, we need to match the "!dbg" within the Nth line next, if matched, then it's an inst line.
pref = [
'AliasAnalysisVisitor::visit', 0,
'TaintAnalysisVisitor::visit', 0,
'updatePointsToObjects for', 0,
'TaintUtils::updateTaintInfo() for', 0,
'*********fetchPointsToObjects', 1,
'*********FieldAccess', 1,
]
if i < 0 or i >= len(log) or log[i].find('!dbg') < 0:
return False
for k in range(0,len(pref),2):
off = pref[k+1]
if i-off >= 0 and log[i-off].startswith(pref[k]):
return True
return False
def inst_analyze():
global log,insts
for i in range(len(log)):
if is_inst_line(i):
insts.append(i)
#TODO: add context lines to the identified obj lines when appropriate.
def obj_slice(k):
global log,cc,insts
#For each tuple entry k, if the matched obj line contains k[1], then we will try to include the context
#up to the line including k[0] and down to the line including k[2].
#To to safe and conservative, we will not include the context lines that are out of current inst scope.
ctx = [
('updateFieldPointsTo() for', 'updateFieldPointsTo', 'After updates'),
('createEmbObj(): host type', 'createEmbObj', 'createEmbObj(): the embedded obj created'),
]
cc_index = sorted(list(cc))
cur_cc = 0
cur_in = -1
next_in = -1
i = 0
while i < len(log):
if log[i].find(k) >= 0:
#Print the post-inst visit context of the previous matched line if needed.
if cur_in > -1 and cur_in + 1 < len(insts) and i >= insts[cur_in + 1]:
print_inst(insts[cur_in + 1],log[insts[cur_in + 1]])
cur_in += 1
#First print the cc history to this matched line.
while cur_cc < len(cc_index) and i >= cc_index[cur_cc]:
print_cc(cc_index[cur_cc],cc[cc_index[cur_cc]])
cur_cc += 1
#Then print the nearest previous inst visit.
j = cur_in
while j + 1 < len(insts) and i >= insts[j+1]:
j += 1
if j != cur_in:
cur_in = j
print_inst(insts[j],log[insts[j]])
#INVARIANT: 'cur_in' is the nearest previous inst visit of the current matched obj line.
#Print the matched obj line w/ necessary contexts.
has_ctx = False
#Current inst scope
ui = (0 if cur_in < 0 else insts[cur_in])
di = (insts[cur_in+1] if cur_in+1 < len(insts) else len(log))
for t in ctx:
if log[i].find(t[1]) >= 0:
#Identify the start and the end of the context.
up = down = i
while t[0] and up > ui and log[up].find(t[0]) < 0:
up -= 1
while t[2] and down < di and log[down].find(t[2]) < 0:
down += 1
#print '-----------------' + 'ui:' + str(ui) + ' di:' + str(di) + ' up:' + str(up) + ' down:' + str(down) + ' i:' + str(i)
#Printing..
for m in range(up,down+1):
print_match(m,log[m])
i = down
has_ctx = True
break
if not has_ctx:
print_match(i,log[i])
i += 1
def tag_slice(k):
pass
#Analyze the call chain at each line in the log.
def cc_analyze():
global log,cc
cur_cc = []
ln = 0
for l in log:
#E.g. line format:
#[TIMING] Start func(5) snd_seq_pool_new: Thu Feb 13 13:50:05 2020
#[TIMING] End func(5) snd_seq_pool_new in: 1.122699e+01s
if l.startswith('[TIMING] Start func'):
tks = l.split(' ')
if len(tks) < 4:
cur_cc.append('!ERR')
else:
cur_cc.append(tks[3][:-1])
cc[ln] = list(cur_cc)
elif l.startswith('[TIMING] End func'):
if len(cur_cc) > 0:
cur_cc.pop()
cc[ln] = list(cur_cc)
ln += 1
def cc_slice():
global cc
for i in sorted(list(cc)):
print_cc(i,cc[i])
#The log file is in general very large, this script tries to slice only information of interest (e.g. all events related to a certain object)
#and output them in a well-organized readable format, so that our life can be made easier when debugging...
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: ./log_slicer.py log_file key_type(tag/obj) key(ID)')
else:
#First read in the log file.
with open(sys.argv[1],'r') as f:
for l in f:
log.append(l[:-1])
#Preliminary callchain analysis
cc_analyze()
if len(sys.argv) < 4:
cc_slice()
else:
inst_analyze()
k = sys.argv[3]
if sys.argv[2] == 'tag':
tag_slice(k)
elif sys.argv[2] == 'obj':
obj_slice(k)
else:
#By default perform a callchain analysis for each line in the log.
cc_slice()
|
the-stack_106_14646
|
import ClientConstants as CC
import ClientDefaults
import ClientGUIListBoxes
import collections
import HydrusConstants as HC
import os
import random
import TestConstants
import time
import unittest
import wx
import HydrusGlobals as HG
def DoClick( click, panel, do_delayed_ok_afterwards = False ):
wx.QueueEvent( panel, click )
if do_delayed_ok_afterwards:
HG.test_controller.CallLaterWXSafe( panel, 1, PressKeyOnFocusedWindow, wx.WXK_RETURN )
wx.YieldIfNeeded()
time.sleep( 0.1 )
def GetAllClickableIndices( panel ):
click = wx.MouseEvent( wx.wxEVT_LEFT_DOWN )
current_y = 5
click.SetX( 10 )
click.SetY( current_y )
all_clickable_indices = {}
while panel._GetIndexUnderMouse( click ) is not None:
index = panel._GetIndexUnderMouse( click )
if index not in all_clickable_indices:
all_clickable_indices[ index ] = current_y
current_y += 5
click.SetY( current_y )
return all_clickable_indices
def PressKey( window, key ):
window.SetFocus()
uias = wx.UIActionSimulator()
uias.Char( key )
def PressKeyOnFocusedWindow( key ):
uias = wx.UIActionSimulator()
uias.Char( key )
class TestListBoxes( unittest.TestCase ):
def test_listbox_colour_options( self ):
frame = TestConstants.TestFrame()
try:
initial_namespace_colours = { 'series' : ( 153, 101, 21 ), '' : ( 0, 111, 250 ), None : ( 114, 160, 193 ), 'creator' : ( 170, 0, 0 ) }
panel = ClientGUIListBoxes.ListBoxTagsColourOptions( frame, initial_namespace_colours )
frame.SetPanel( panel )
self.assertEqual( panel.GetNamespaceColours(), initial_namespace_colours )
#
new_namespace_colours = dict( initial_namespace_colours )
new_namespace_colours[ 'character' ] = ( 0, 170, 0 )
colour = wx.Colour( 0, 170, 0 )
panel.SetNamespaceColour( 'character', colour )
self.assertEqual( panel.GetNamespaceColours(), new_namespace_colours )
#
terms = set( panel._terms )
ordered_terms = list( panel._ordered_terms )
self.assertEqual( len( terms ), len( ordered_terms ) )
#
all_clickable_indices = GetAllClickableIndices( panel )
self.assertEqual( len( all_clickable_indices.keys() ), len( terms ) )
self.assertEqual( set( all_clickable_indices.keys() ), set( range( len( all_clickable_indices.keys() ) ) ) )
#
for ( index, y ) in all_clickable_indices.items():
click = wx.MouseEvent( wx.wxEVT_LEFT_DOWN )
click.SetX( 10 )
click.SetY( y )
DoClick( click, panel )
self.assertEqual( panel.GetSelectedNamespaceColours(), dict( [ ordered_terms[ index ] ] ) )
#
current_y = 5
click = wx.MouseEvent( wx.wxEVT_LEFT_DOWN )
click.SetX( 10 )
click.SetY( current_y )
while panel._GetIndexUnderMouse( click ) is not None:
current_y += 5
click.SetY( current_y )
DoClick( click, panel )
self.assertEqual( panel.GetSelectedNamespaceColours(), {} )
#
if len( all_clickable_indices.keys() ) > 2:
indices = random.sample( all_clickable_indices.keys(), len( all_clickable_indices.keys() ) - 1 )
for index in indices:
click = wx.MouseEvent( wx.wxEVT_LEFT_DOWN )
click.SetControlDown( True )
click.SetX( 10 )
click.SetY( all_clickable_indices[ index ] )
DoClick( click, panel )
expected_selected_terms = [ ordered_terms[ index ] for index in indices ]
self.assertEqual( panel.GetSelectedNamespaceColours(), dict( expected_selected_terms ) )
#
random_index = random.choice( all_clickable_indices.keys() )
while ordered_terms[ random_index ][0] in panel.PROTECTED_TERMS:
random_index = random.choice( all_clickable_indices.keys() )
del new_namespace_colours[ ordered_terms[ random_index ][0] ]
# select nothing
current_y = 5
click = wx.MouseEvent( wx.wxEVT_LEFT_DOWN )
click.SetX( 10 )
click.SetY( current_y )
while panel._GetIndexUnderMouse( click ) is not None:
current_y += 5
click.SetY( current_y )
DoClick( click, panel )
# select the random index
click = wx.MouseEvent( wx.wxEVT_LEFT_DOWN )
click.SetX( 10 )
click.SetY( all_clickable_indices[ random_index ] )
DoClick( click, panel )
# now double-click to activate and hence remove
doubleclick = wx.MouseEvent( wx.wxEVT_LEFT_DCLICK )
doubleclick.SetX( 5 )
doubleclick.SetY( all_clickable_indices[ random_index ] )
DoClick( doubleclick, panel, do_delayed_ok_afterwards = True )
self.assertEqual( panel.GetNamespaceColours(), new_namespace_colours )
finally:
frame.DestroyLater()
|
the-stack_106_14647
|
import datetime
from anchore_engine.services.policy_engine.api.models import (
FeedMetadata,
FeedGroupMetadata,
Image,
ImageIngressResponse,
ImageIngressRequest,
ImageVulnerabilityListing,
CpeVulnerability,
CvssScore,
CvssCombined,
LegacyVulnerabilityReport,
LegacyTableReport,
LegacyMultiReport,
)
from anchore_engine.utils import datetime_to_rfc3339
def test_feeds():
f = FeedMetadata()
f.name = "feed1"
d1 = datetime.datetime.utcnow()
f.updated_at = d1
assert f.to_json() == {
"name": "feed1",
"updated_at": datetime_to_rfc3339(d1),
"groups": None,
"enabled": None,
"last_full_sync": None,
}
f.groups = []
g = FeedGroupMetadata()
g.name = "group1"
g.record_count = 10
g.enabled = True
f.groups.append(g)
assert f.to_json() == {
"name": "feed1",
"updated_at": datetime_to_rfc3339(d1),
"enabled": None,
"last_full_sync": None,
"groups": [
{
"name": "group1",
"enabled": True,
"record_count": 10,
"created_at": None,
"updated_at": None,
"last_sync": None,
}
],
}
def test_groups():
d1 = datetime.datetime.utcnow()
d2 = datetime.datetime.utcnow() - datetime.timedelta(days=1)
g = FeedGroupMetadata()
g.name = "group"
g.enabled = True
g.created_at = d2
g.updated_at = d1
g.last_sync = d1
g.record_count = 0
assert g.to_json() == {
"name": "group",
"enabled": True,
"created_at": datetime_to_rfc3339(d2),
"updated_at": datetime_to_rfc3339(d1),
"last_sync": datetime_to_rfc3339(d1),
"record_count": 0,
}
def test_image():
"""
Simple serialization test
:return:
"""
i = Image()
i.user_id = "user"
i.id = "image1"
i.state = "active"
i.digest = "digest"
i.tags = ["tag1", "tag2"]
assert i.to_json() == {
"id": "image1",
"user_id": "user",
"digest": "digest",
"tags": ["tag1", "tag2"],
"state": "active",
"created_at": None,
"last_modified": None,
"distro_namespace": None,
}
def test_ingress_request():
"""
Simple serialization test
:return:
"""
r = ImageIngressRequest()
r.user_id = "user"
r.image_id = "image1"
r.fetch_url = "catalog://something.com/user/image_analysis/image1"
assert r.to_json() == {
"user_id": "user",
"image_id": "image1",
"fetch_url": "catalog://something.com/user/image_analysis/image1",
}
r = ImageIngressRequest()
r.user_id = "user"
r.image_id = "image1"
r.fetch_url = "https://someserver.com/file"
assert r.to_json() == {
"user_id": "user",
"image_id": "image1",
"fetch_url": "https://someserver.com/file",
}
r = ImageIngressRequest()
r.user_id = "user"
r.image_id = "image1"
r.fetch_url = "file:///path/to/file"
assert r.to_json() == {
"user_id": "user",
"image_id": "image1",
"fetch_url": "file:///path/to/file",
}
def test_ingress_response():
"""
Simple serialization test
:return:
"""
r = ImageIngressResponse()
r.status = "ok"
r.vulnerability_report = {}
r.problems = []
assert r.to_json() == {"status": "ok", "vulnerability_report": {}, "problems": []}
r = ImageIngressResponse()
assert r.to_json() == {
"status": None,
"vulnerability_report": None,
"problems": None,
}
def test_vuln_report():
r = ImageVulnerabilityListing()
r.image_id = "image"
r.user_id = "user"
r.cpe_report = [CpeVulnerability()]
v = r.cpe_report[0]
v.name = "lib1"
v.cpe = "cpe:*:*"
v.cpe23 = "cpe2:*:*"
v.version = "1.1"
v.feed_name = "nvdv2"
v.feed_namespace = "nvdv2:cpes"
v.severity = "High"
v.vulnerability_id = "CVE"
v.vendor_data = [CvssCombined()]
v.vendor_data[0].id = "CVE-VENDOR"
v.vendor_data[0].cvss_v2 = CvssScore()
v.vendor_data[0].cvss_v2.base_score = 1.0
v.vendor_data[0].cvss_v2.exploitability_score = 2.0
v.vendor_data[0].cvss_v2.impact_score = 3.0
v.vendor_data[0].cvss_v3 = CvssScore()
v.vendor_data[0].cvss_v3.base_score = 1.0
v.vendor_data[0].cvss_v3.exploitability_score = 2.0
v.vendor_data[0].cvss_v3.impact_score = 3.0
v.nvd_data = [CvssCombined()]
v.nvd_data[0].id = "CVE-NVD"
v.nvd_data[0].cvss_v2 = CvssScore()
v.nvd_data[0].cvss_v2.base_score = 1.1
v.nvd_data[0].cvss_v2.exploitability_score = 2.2
v.nvd_data[0].cvss_v2.impact_score = 3.3
v.nvd_data[0].cvss_v3 = CvssScore()
v.nvd_data[0].cvss_v3.base_score = 1.1
v.nvd_data[0].cvss_v3.exploitability_score = 2.2
v.nvd_data[0].cvss_v3.impact_score = 3.3
r.legacy_report = LegacyVulnerabilityReport()
r.legacy_report.multi = LegacyMultiReport()
r.legacy_report.multi.result = LegacyTableReport()
r.legacy_report.multi.result.colcount = 4
r.legacy_report.multi.result.rowcount = 1
r.legacy_report.multi.result.header = ["id", "name", "version", "url"]
r.legacy_report.multi.result.rows = [["CVE-NVD", "lib1", "1.1", "http://someurl"]]
r.legacy_report.multi.url_column_index = 3
r.legacy_report.multi.warns = []
assert r.to_json() == {
"user_id": "user",
"image_id": "image",
"cpe_report": [
{
"cpe": "cpe:*:*",
"cpe23": "cpe2:*:*",
"pkg_path": None,
"pkg_type": None,
"feed_name": "nvdv2",
"feed_namespace": "nvdv2:cpes",
"version": "1.1",
"name": "lib1",
"link": None,
"nvd_data": [
{
"id": "CVE-NVD",
"cvss_v2": {
"base_score": 1.1,
"exploitability_score": 2.2,
"impact_score": 3.3,
},
"cvss_v3": {
"base_score": 1.1,
"exploitability_score": 2.2,
"impact_score": 3.3,
},
}
],
"vendor_data": [
{
"id": "CVE-VENDOR",
"cvss_v2": {
"base_score": 1.0,
"exploitability_score": 2.0,
"impact_score": 3.0,
},
"cvss_v3": {
"base_score": 1.0,
"exploitability_score": 2.0,
"impact_score": 3.0,
},
}
],
"severity": "High",
"vulnerability_id": "CVE",
}
],
"legacy_report": {
"multi": {
"result": {
"colcount": 4,
"header": ["id", "name", "version", "url"],
"rowcount": 1,
"rows": [["CVE-NVD", "lib1", "1.1", "http://someurl"]],
},
"url_column_index": 3,
"warns": [],
}
},
}
|
the-stack_106_14648
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
import torch.nn.functional as F
import sys
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'batch_sync':
norm_layer = BatchNorm2d
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal',
gpu_ids=[], n_downsampling=2, opt=None):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert (torch.cuda.is_available())
if which_model_netG in ['APS']:
from models.APS import stylegenerator
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
netG = stylegenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout,
n_blocks=9, gpu_ids=gpu_ids, n_downsampling=n_downsampling, opt = opt)
netG.cuda()
if len(gpu_ids) > 1:
netG = nn.DataParallel(netG, device_ids=gpu_ids)
init_weights(netG, init_type=init_type)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[], use_dropout=False,
n_downsampling=2):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert (torch.cuda.is_available())
if which_model_netD == 'resnet':
netD = ResnetDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_layers_D,
gpu_ids=[], padding_type='reflect', use_sigmoid=use_sigmoid,
n_downsampling=n_downsampling)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
netD.cuda()
if len(gpu_ids) > 1:
netD = nn.DataParallel(netD, device_ids=gpu_ids)
# if use_gpu:
# netD.cuda(gpu_ids[0])
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
self.real_label_var = self.Tensor(input.size()).fill_(self.real_label)
# self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
self.fake_label_var = self.Tensor(input.size()).fill_(self.fake_label)
# self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetDiscriminator(nn.Module):
def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[],
padding_type='reflect', use_sigmoid=False, n_downsampling=2):
assert (n_blocks >= 0)
super(ResnetDiscriminator, self).__init__()
self.input_nc = input_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
# n_downsampling = 2
if n_downsampling <= 2:
for i in range(n_downsampling):
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
elif n_downsampling == 3:
mult = 2 ** 0
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** 1
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** 2
model += [nn.Conv2d(ngf * mult, ngf * mult, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult),
nn.ReLU(True)]
if n_downsampling <= 2:
mult = 2 ** n_downsampling
else:
mult = 4
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout,
use_bias=use_bias)]
if use_sigmoid:
model += [nn.Sigmoid()]
self.model = nn.Sequential(*model)
def forward(self, input, mask=None):
y = self.model(input)
if mask is not None:
mask = F.interpolate(mask, size=(y.shape[2],y.shape[3]))
y = y * mask
return y
|
the-stack_106_14649
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Heat documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 13 11:23:35 2012.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import os
import subprocess
import sys
import tempfile
import warnings
from oslo_config import cfg
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
CONTRIB_DIR = os.path.join(ROOT, 'contrib')
PLUGIN_DIRS = glob.glob(os.path.join(CONTRIB_DIR, '*'))
ENV_DIR = os.path.join(ROOT, "etc", "heat", "environment.d")
TEMP_ENV_DIR = tempfile.mkdtemp()
for f in glob.glob(os.path.join(ENV_DIR, "*.yaml")):
with open(f, "r") as fin:
name = os.path.split(f)[-1]
with open(os.path.join(TEMP_ENV_DIR, name), "w") as fout:
fout.write(fin.read().replace("file:///", "file://%s/" % ROOT))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
cfg.CONF.import_opt('plugin_dirs', 'heat.common.config')
cfg.CONF.set_override(name='plugin_dirs', override=PLUGIN_DIRS)
cfg.CONF.import_opt('environment_dir', 'heat.common.config')
cfg.CONF.set_override(name='environment_dir', override=TEMP_ENV_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'sphinxcontrib.apidoc',
'openstackdocstheme',
'oslo_config.sphinxconfiggen',
'oslo_config.sphinxext',
'oslo_policy.sphinxext',
'oslo_policy.sphinxpolicygen',
'ext.resources',
'ext.tablefromtext',
'stevedore.sphinxext']
# policy sample file generation
policy_generator_config_file = '../../etc/heat/heat-policy-generator.conf'
sample_policy_basename = '_static/heat'
# oslo_config.sphinxconfiggen options
config_generator_config_file = '../../config-generator.conf'
sample_config_basename = '_static/heat'
# openstackdocstheme options
repository_name = 'openstack/heat'
bug_project = '989'
bug_tag = 'docs'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Heat'
copyright = u'(c) 2012- Heat Developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['heat.']
primary_domain = 'py'
nitpicky = False
# -- Options for API documentation -------------------------------------------
apidoc_module_dir = '../../heat'
apidoc_separate_modules = True
apidoc_excluded_paths = [
'cmd',
'cloudinit',
'db/sqlalchemy/migrate_repo/versions',
'engine/resources/aws',
'engine/resources/openstack',
'hacking',
'httpd',
'locale',
'tests',
'version.py',
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
html_theme = 'openstackdocs'
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"sidebar_mode": "toc"}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8')
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Heatdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'Heat.tex', u'Heat Documentation',
u'Heat Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man/heat-api', 'heat-api',
u'REST API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cfn', 'heat-api-cfn',
u'CloudFormation compatible API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-db-setup', 'heat-db-setup',
u'Command line utility to setup the Heat database',
[u'Heat Developers'], 1),
('man/heat-engine', 'heat-engine',
u'Service which performs the actions from the API calls made by the user',
[u'Heat Developers'], 1),
('man/heat-keystone-setup', 'heat-keystone-setup',
u'Script which sets up keystone for usage by Heat',
[u'Heat Developers'], 1),
('man/heat-keystone-setup-domain', 'heat-keystone-setup-domain',
u'Script which sets up a keystone domain for heat users and projects',
[u'Heat Developers'], 1),
('man/heat-manage', 'heat-manage',
u'Script which helps manage specific database operations',
[u'Heat Developers'], 1),
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Heat', u'Heat Documentation',
u'Heat Developers', 'Heat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
the-stack_106_14650
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.FamilyArchiveDetail import FamilyArchiveDetail
class AlipayUserFamilyArchiveQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayUserFamilyArchiveQueryResponse, self).__init__()
self._archive_list = None
@property
def archive_list(self):
return self._archive_list
@archive_list.setter
def archive_list(self, value):
if isinstance(value, list):
self._archive_list = list()
for i in value:
if isinstance(i, FamilyArchiveDetail):
self._archive_list.append(i)
else:
self._archive_list.append(FamilyArchiveDetail.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayUserFamilyArchiveQueryResponse, self).parse_response_content(response_content)
if 'archive_list' in response:
self.archive_list = response['archive_list']
|
the-stack_106_14652
|
"""
Links up the various cards in the BDF.
For example, with cross referencing...
.. code-block:: python
>>> model = BDF()
>>> model.read_bdf(bdf_filename, xref=True)
>>> nid1 = 1
>>> node1 = model.nodes[nid1]
>>> node.nid
1
>>> node.xyz
[1., 2., 3.]
>>> node.Cid()
3
>>> node.cid
3
>>> node.cid_ref
CORD2S, 3, 1, 0., 0., 0., 0., 0., 1.,
1., 0., 0.
# get the position in the global frame
>>> node.get_position()
[4., 5., 6.]
# get the position with respect to another frame
>>> node.get_position_wrt(model, cid=2)
[4., 5., 6.]
Without cross referencing...
.. code-block:: python
>>> model = BDF()
>>> model.read_bdf(bdf_filename, xref=True)
>>> nid1 = 1
>>> node1 = model.nodes[nid1]
>>> node.nid
1
>>> node.xyz
[1., 2., 3.]
>>> node.Cid()
3
>>> node.cid
3
>>> node.cid_ref
None
# get the position in the global frame
>>> node.get_position()
Error!
Cross-referencing allows you to easily jump across cards and also helps
with calculating things like position, area, and mass. The BDF is designed
around the idea of cross-referencing, so it's recommended that you use it.
"""
# pylint: disable=R0902,R0904,R0914
from collections import defaultdict
import traceback
from typing import List, Dict, Any
from numpy import zeros, argsort, arange, array_equal, array
from pyNastran.bdf.bdf_interface.attributes import BDFAttributes
class XrefMesh(BDFAttributes):
"""Links up the various cards in the BDF."""
def __init__(self) -> None:
"""The main BDF class defines all the parameters that are used."""
BDFAttributes.__init__(self)
self._nxref_errors = 100
self._stop_on_xref_error = True
# def geom_check(self):
# """
# Performs various geometry checks
# 1. nodal uniqueness on elements
# """
# for elem in model.elements:
# elem.check_unique_nodes()
def cross_reference(self,
xref: bool=True,
xref_nodes: bool=True,
xref_elements: bool=True,
xref_nodes_with_elements: bool=False,
xref_properties: bool=True,
xref_masses: bool=True,
xref_materials: bool=True,
xref_loads: bool=True,
xref_constraints: bool=True,
xref_aero: bool=True,
xref_sets: bool=True,
xref_optimization: bool=True,
word: str='') -> None:
"""
Links up all the cards to the cards they reference
Parameters
----------
xref : bool; default=True
cross references the model
xref_nodes : bool; default=True
set cross referencing of nodes/coords
xref_element : bool; default=True
set cross referencing of elements
xref_properties : bool; default=True
set cross referencing of properties
xref_masses : bool; default=True
set cross referencing of CMASS/PMASS
xref_materials : bool; default=True
set cross referencing of materials
xref_loads : bool; default=True
set cross referencing of loads
xref_constraints : bool; default=True
set cross referencing of constraints
xref_aero : bool; default=True
set cross referencing of CAERO/SPLINEs
xref_sets : bool; default=True
set cross referencing of SETx
word : str; default=''
model flag
To only cross-reference nodes:
.. code-block:: python
model = BDF()
model.read_bdf(bdf_filename, xref=False)
model.cross_reference(xref=True, xref_loads=False, xref_constraints=False,
xref_materials=False, xref_properties=False,
xref_aero=False, xref_masses=False,
xref_sets=False)
.. warning:: be careful if you call this method with False values
"""
if not xref:
return
self.log.debug("Cross Referencing%s..." % word)
if xref_nodes:
self._cross_reference_nodes()
self._cross_reference_coordinates()
if xref_elements:
self._cross_reference_elements()
self._cross_reference_rigid_elements()
if xref_properties:
self._cross_reference_properties()
if xref_masses:
self._cross_reference_masses()
if xref_materials:
self._cross_reference_materials()
if xref_aero:
self._cross_reference_aero()
if xref_constraints:
self._cross_reference_constraints()
if xref_loads:
self._cross_reference_loads()
if xref_sets:
self._cross_reference_sets()
if xref_optimization:
self._cross_reference_optimization()
if xref_nodes_with_elements:
self._cross_reference_nodes_with_elements()
self._cross_reference_contact()
self._cross_reference_superelements()
#self.case_control_deck.cross_reference(self)
self.pop_xref_errors()
for super_id, superelement in sorted(self.superelement_models.items()):
superelement.cross_reference(
xref=xref, xref_nodes=xref_nodes, xref_elements=xref_elements,
xref_nodes_with_elements=xref_nodes_with_elements,
xref_properties=xref_properties, xref_masses=xref_masses,
xref_materials=xref_materials, xref_loads=xref_loads,
xref_constraints=xref_constraints, xref_aero=xref_aero,
xref_sets=xref_sets, xref_optimization=xref_optimization,
word=' (Superelement %i)' % super_id)
def _cross_reference_constraints(self) -> None:
"""
Links the SPCADD, SPC, SPCAX, SPCD, MPCADD, MPC, SUPORT,
SUPORT1, SESUPORT cards.
"""
for spcadds in self.spcadds.values():
for spcadd in spcadds:
spcadd.cross_reference(self)
for spcs in self.spcs.values():
for spc in spcs:
spc.cross_reference(self)
for spcoffs in self.spcoffs.values():
for spcoff in spcoffs:
spcoff.cross_reference(self)
for mpcadds in self.mpcadds.values():
for mpcadd in mpcadds:
mpcadd.cross_reference(self)
for mpcs in self.mpcs.values():
for mpc in mpcs:
mpc.cross_reference(self)
for suport in self.suport:
suport.cross_reference(self)
for unused_suport1_id, suport1 in self.suport1.items():
suport1.cross_reference(self)
for se_suport in self.se_suport:
se_suport.cross_reference(self)
def _cross_reference_coordinates(self) -> None:
"""
Links up all the coordinate cards to other coordinate cards and nodes
- CORD1R, CORD1C, CORD1S
- CORD2R, CORD2C, CORD2S
"""
# CORD2x: links the rid to coordinate systems
# CORD1x: links g1,g2,g3 to grid points
for coord in self.coords.values():
coord.cross_reference(self)
for coord in self.coords.values():
coord.setup()
def _cross_reference_aero(self, check_caero_element_ids: bool=False) -> None:
"""
Links up all the aero cards
- CAEROx, PAEROx, SPLINEx, AECOMP, AELIST, AEPARAM, AESTAT, AESURF, AESURFS
"""
self.zona.cross_reference()
for caero in self.caeros.values():
caero.cross_reference(self)
for paero in self.paeros.values():
paero.cross_reference(self)
for trim in self.trims.values():
trim.cross_reference(self)
for csschd in self.csschds.values():
csschd.cross_reference(self)
for spline in self.splines.values():
spline.cross_reference(self)
for aecomp in self.aecomps.values():
aecomp.cross_reference(self)
for aelist in self.aelists.values():
aelist.cross_reference(self)
for aeparam in self.aeparams.values():
aeparam.cross_reference(self)
#for aestat in self.aestats.values(s):
#aestat.cross_reference(self)
for aesurf in self.aesurf.values():
aesurf.cross_reference(self)
for aesurfs in self.aesurfs.values():
aesurfs.cross_reference(self)
for flutter in self.flutters.values():
flutter.cross_reference(self)
for monitor_point in self.monitor_points:
monitor_point.cross_reference(self)
if self.aero:
self.aero.cross_reference(self)
if self.aeros:
self.aeros.cross_reference(self)
if check_caero_element_ids: # only support CAERO1
ncaeros = len(self.caeros)
if ncaeros > 1:
# we don't need to check the ncaeros=1 case
i = 0
min_maxs = zeros((ncaeros, 2), dtype='int32')
for unused_eid, caero in sorted(self.caeros.items()):
min_maxs[i, :] = caero.min_max_eid
i += 1
isort = argsort(min_maxs.ravel())
expected = arange(ncaeros * 2, dtype='int32')
if not array_equal(isort, expected):
msg = 'CAERO element ids are inconsistent\n'
msg += 'isort = %s' % str(isort)
raise RuntimeError(msg)
#'AERO', ## aero
#'AEROS', ## aeros
#'GUST', ## gusts
#'FLUTTER', ## flutters
#'FLFACT', ## flfacts
#'MKAERO1', 'MKAERO2', ## mkaeros
#'AECOMP', ## aecomps
#'AEFACT', ## aefacts
#'AELINK', ## aelinks
#'AELIST', ## aelists
#'AEPARAM', ## aeparams
#'AESTAT', ## aestats
#'AESURF', ## aesurfs
def _cross_reference_nodes(self) -> None:
"""Links the nodes to coordinate systems"""
grdset = self.grdset
for node in self.nodes.values():
try:
node.cross_reference(self, grdset)
except Exception:
self.log.error("Couldn't cross reference GRID.\n%s" % (str(node)))
raise
for point in self.points.values():
try:
point.cross_reference(self)
except Exception:
self.log.error("Couldn't cross reference POINT.\n%s" % (str(point)))
raise
# SPOINTs, EPOINTs don't need xref
# GRDPNT for mass calculations
#if model.has_key()
#for param_key, param in self.params:
#if
def _cross_reference_elements(self) -> None:
"""
Links the elements to nodes, properties (and materials depending on
the card).
"""
for elem in self.elements.values():
try:
elem.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, elem)
for elem in self.masses.values():
try:
elem.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, elem)
def _cross_reference_rigid_elements(self) -> None:
for elem in self.rigid_elements.values():
try:
elem.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, elem)
for elem in self.plotels.values():
try:
elem.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, elem)
def _store_xref_error(self, error, card) -> None:
self._ixref_errors += 1
var = traceback.format_exception_only(type(error), error)
self._stored_xref_errors.append((card, var))
if self._ixref_errors > self._nxref_errors:
self.pop_xref_errors()
def _cross_reference_nodes_with_elements(self) -> None:
"""Links the nodes to all connected elements"""
nodes = defaultdict(list) # type: Dict[int, List[Any]]
for element in self.elements.values():
#if element.type in ['CONM2']:
# pass
#else:
if element.nodes is not None:
for nid in element.node_ids:
if nid is None:
continue
nodes[nid].append(element)
#except AttributeError:
#print(element)
#print('node = %s' % str(node))
#raise
for node in self.nodes.values():
node.elements_ref = nodes[node.nid]
def _cross_reference_masses(self) -> None:
"""
Links the mass to nodes, properties (and materials depending on
the card).
"""
for mass in self.masses.values():
try:
mass.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, mass)
for prop in self.properties_mass.values():
try:
prop.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, prop)
def _cross_reference_properties(self) -> None:
"""Links the properties to materials"""
for prop in self.properties.values():
try:
prop.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, prop)
def _cross_reference_materials(self) -> None:
"""
Links the materials to materials (e.g. MAT1, CREEP)
often this is a pass statement
"""
for mat in self.materials.values(): # MAT1
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, mat)
for mat in self.creep_materials.values(): # CREEP
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, mat)
# CREEP - depends on MAT1
data = [self.MATS1, self.MATS3, self.MATS8,
self.MATT1, self.MATT2, self.MATT3, self.MATT4, self.MATT5,
self.MATT8, self.MATT9]
for material_deps in data:
for mat in material_deps.values():
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, mat)
def _cross_reference_loads(self) -> None:
"""Links the loads to nodes, coordinate systems, and other loads."""
for (unused_lid, load_combinations) in self.load_combinations.items():
for load_combination in load_combinations:
try:
load_combination.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, load_combination)
for (unused_lid, loads) in self.loads.items():
for load in loads:
try:
load.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, load)
for (unused_lid, sid) in self.dloads.items():
for load in sid:
#self.log.debug(" dloadi load=%s" % (load))
try:
load.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._ixref_errors += 1
var = traceback.format_exception_only(type(error), error)
self._stored_xref_errors.append((load, var))
if self._ixref_errors > self._nxref_errors:
self.pop_xref_errors()
for unused_lid, sid in self.dload_entries.items():
for load in sid:
#self.log.debug(" dloadi load=%s" % (load))
try:
load.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
#raise
self._store_xref_error(error, load)
for unused_key, darea in self.dareas.items():
try:
darea.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, darea)
for unused_key, tic in self.tics.items():
try:
tic.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, tic)
for unused_key, dphase in self.dphases.items():
try:
dphase.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as error:
self._store_xref_error(error, dphase)
def _cross_reference_sets(self) -> None:
"""cross references the SET objects"""
for set_obj in self.asets:
set_obj.cross_reference(self)
for set_obj in self.omits:
set_obj.cross_reference(self)
for set_obj in self.bsets:
set_obj.cross_reference(self)
for set_obj in self.csets:
set_obj.cross_reference(self)
for set_obj in self.qsets:
set_obj.cross_reference(self)
for unused_name, set_objs in self.usets.items():
for set_obj in set_objs:
set_obj.cross_reference(self)
# superelements
for unused_key, set_obj in self.se_sets.items():
set_obj.cross_reference(self)
for set_obj in self.se_bsets:
set_obj.cross_reference(self)
for set_obj in self.se_csets:
set_obj.cross_reference(self)
for set_obj in self.se_qsets:
set_obj.cross_reference(self)
for set_obj in self.se_usets:
set_obj.cross_reference(self)
def _cross_reference_optimization(self) -> None:
"""cross references the optimization objects"""
remove_missing_optimization = True
dconstrs_to_remove = []
for unused_key, deqatn in self.dequations.items():
deqatn.cross_reference(self)
for unused_key, dresp in self.dresps.items():
dresp.cross_reference(self)
for key, dconstrs in self.dconstrs.items():
for i, dconstr in enumerate(dconstrs):
try:
dconstr.cross_reference(self)
except:
if not remove_missing_optimization:
raise
dconstrs_to_remove.append((key, i))
for unused_key, dvcrel in self.dvcrels.items():
dvcrel.cross_reference(self)
for unused_key, dvmrel in self.dvmrels.items():
dvmrel.cross_reference(self)
for unused_key, dvprel in self.dvprels.items():
dvprel.cross_reference(self)
for unused_key, desvar in self.desvars.items():
desvar.cross_reference(self)
for key, i in dconstrs_to_remove:
del self.dconstrs[key][i]
def _safe_cross_reference_contact(self) -> None:
"""cross references the contact objects"""
self._cross_reference_contact()
def _cross_reference_contact(self) -> None:
"""cross references the contact objects"""
for blseg in self.blseg.values():
blseg.cross_reference(self)
for bconp in self.bconp.values():
bconp.cross_reference(self)
# bgset
# bctset
#for bgadd in self.bgadds.values():
#bgadd.cross_reference(self)
#for bctadd in self.bctadds.values():
#bctadd.cross_reference(self)
def _uncross_reference_contact(self) -> None:
"""uncross references the contact objects"""
for blseg in self.blseg.values():
blseg.uncross_reference()
for bconp in self.bconp.values():
bconp.uncross_reference()
def _cross_reference_superelements(self) -> None:
"""cross references the superelement objects"""
for unused_seid, csuper in self.csuper.items():
csuper.cross_reference(self)
for unused_seid, csupext in self.csupext.items():
csupext.cross_reference(self)
for unused_seid, sebulk in self.sebulk.items():
sebulk.cross_reference(self)
for unused_seid, sebndry in self.sebndry.items():
sebndry.cross_reference(self)
for unused_seid, seconct in self.seconct.items():
seconct.cross_reference(self)
for unused_seid, seelt in self.seelt.items():
seelt.cross_reference(self)
for unused_seid, seexcld in self.seexcld.items():
seexcld.cross_reference(self)
for unused_seid, selabel in self.selabel.items():
selabel.cross_reference(self)
for unused_seid, seloc in self.seloc.items():
seloc.cross_reference(self)
for unused_seid, seload in self.seload.items():
seload.cross_reference(self)
for unused_seid, sempln in self.sempln.items():
sempln.cross_reference(self)
for unused_seid, setree in self.setree.items():
setree.cross_reference(self)
#'senqset',
#'se_sets', 'se_usets',
def _safe_cross_reference_superelements(
self, create_superelement_geometry: bool=False) -> None:
xref_errors = {}
seloc_missing = []
for seid, seloc in self.seloc.items():
if seid in self.superelement_models:
superelement = self.superelement_models[seid]
seloc.safe_cross_reference(self, xref_errors)
#seloc.transform(self)
else:
seloc_missing.append(seid)
try:
for unused_seid, sempln in sorted(self.sempln.items()):
sempln.safe_cross_reference(self, xref_errors)
for unused_seid, csuper in self.csuper.items():
csuper.safe_cross_reference(self, xref_errors)
for unused_seid, csupext in self.csupext.items():
csupext.safe_cross_reference(self, xref_errors)
if self.sebulk and create_superelement_geometry:
#print('sebulk...')
import os
# we have to create the superelement in order to transform it...
for seid, sebulk in self.sebulk.items():
super_filename = 'super_%i.bdf' % seid
if os.path.exists(super_filename):
os.remove(super_filename)
#print(sebulk)
rseid = sebulk.rseid
sebulk.safe_cross_reference(self, xref_errors)
mirror_model = self._create_superelement_from_sebulk(sebulk, seid, rseid)
if mirror_model is None:
continue
self.log.debug('made superelement %i' % seid)
self.superelement_models[seid] = mirror_model
mirror_model.write_bdf(super_filename)
for unused_seid, sebndry in self.sebndry.items():
sebndry.safe_cross_reference(self, xref_errors)
for unused_seid, seconct in self.seconct.items():
seconct.safe_cross_reference(self, xref_errors)
for unused_seid, seelt in self.seelt.items():
seelt.safe_cross_reference(self, xref_errors)
for unused_seid, seexcld in self.seexcld.items():
seexcld.safe_cross_reference(self, xref_errors)
for unused_seid, selabel in self.selabel.items():
selabel.safe_cross_reference(self, xref_errors)
for seid in seloc_missing:
seloc = self.seloc[seid]
seloc.safe_cross_reference(self, xref_errors)
for unused_seid, seload in self.seload.items():
seload.safe_cross_reference(self, xref_errors)
for unused_seid, setree in self.setree.items():
setree.safe_cross_reference(self, xref_errors)
except KeyError:
if not create_superelement_geometry:
raise
self.write_bdf('superelement_xref.bdf')
self.log.error('check superelement_xref.bdf')
raise
def _create_superelement_from_sebulk(self, sebulk, seid: int, rseid: int) -> None:
"""helper for sebulk"""
#C:\MSC.Software\MSC.Nastran\msc20051\nast\tpl\see103q4.dat
ref_model = self.superelement_models[rseid]
if sebulk.superelement_type == 'MIRROR':
from pyNastran.bdf.mesh_utils.mirror_mesh import bdf_mirror_plane
#print('creating superelement %s from %s' % (seid, rseid))
sempln = self.sempln[seid]
plane = array([node.get_position() for node in sempln.nodes_ref])
# What about seloc on the primary and sempln+seloc on the secondary?
# - move the primary
# - then apply the mirror to make the secondary
# - then move the secondary
#
# Or what about sempln+seloc on the tertiary?
#
# this is fine for the secondary
if rseid in self.seloc:
# I think this is wrong...
seloc = self.seloc[rseid]
plane = seloc.transform(self, plane)
ref_model, mirror_model, unused_nid_offset, unused_eid_offset = bdf_mirror_plane(
ref_model, plane, mirror_model=None, log=None, debug=True, use_nid_offset=False)
mirror_model.properties = ref_model.properties
mirror_model.materials = ref_model.materials
new_model = mirror_model
elif sebulk.Type in ['MANUAL', 'PRIMARY', 'COLLCTR', 'EXTERNAL']:
self.log.info('skipping:\n%s' % sebulk)
new_model = None
else: # pragma: no cover
raise NotImplementedError(sebulk)
return new_model
def _uncross_reference_superelements(self) -> None:
"""cross references the superelement objects"""
for unused_seid, csuper in self.csuper.items():
csuper.uncross_reference()
for unused_seid, csupext in self.csupext.items():
csupext.uncross_reference()
for unused_seid, sebulk in self.sebulk.items():
sebulk.uncross_reference()
for unused_seid, sebndry in self.sebndry.items():
sebndry.uncross_reference()
for unused_seid, seconct in self.seconct.items():
seconct.uncross_reference()
for unused_seid, seelt in self.seelt.items():
seelt.uncross_reference()
for unused_seid, seexcld in self.seexcld.items():
seexcld.uncross_reference()
for unused_seid, selabel in self.selabel.items():
selabel.uncross_reference()
for unused_seid, seloc in self.seloc.items():
seloc.uncross_reference()
for unused_seid, seload in self.seload.items():
seload.uncross_reference()
for unused_seid, sempln in self.sempln.items():
sempln.uncross_reference()
for unused_seid, setree in self.setree.items():
setree.uncross_reference()
def get_point_grids(self, nodes: List[Any], msg: str='') -> None:
"""gets GRID, POINT cards"""
nodes_ref = []
missing_nids = []
for nid in nodes:
if nid in self.nodes:
node = self.nodes[nid]
elif nid in self.points:
node = self.points[nid]
else:
missing_nids.append(nid)
continue
nodes_ref.append(node)
if missing_nids:
raise KeyError('missing GRID/POINT nids=%s%s' % (missing_nids, msg))
return nodes_ref
def superelement_nodes(self, seid: int, nodes: List[Any], msg: str='') -> None:
if seid == 0:
return self.Nodes(nodes, msg=msg)
try:
superelement = self.superelement_models[seid]
except KeyError:
keys = list(self.superelement_models.keys())
raise KeyError('cant find superelement=%i%s; seids=%s' % (seid, msg, keys))
return superelement.Nodes(nodes, msg=msg)
def geom_check(self, geom_check: bool, xref: bool) -> None: # pragma: no cover
"""
what about xref?
"""
if geom_check:
if xref:
for unused_eid, element in self.elements.values():
#element.Mass()
element._verify(xref=True)
#if 'GEOMCHECK' in self.params: # should this be an executive control parameter?
#for eid, element in model.elements:
#element._verify()
else:
for unused_eid, element in self.elements.values():
element.verify_unique_node_ids()
element._verify(xref=False)
# aspect ratio - ratio between element edges
# warping - how planar is a face
# taper - split a quad into 2 triangles and compare the area
# skew - an angle, measures how skewed an element face is by drawing lines
# between midpoints of elements edges, finding the smallest angle
# between the intersecting lines and subtracting that from 90 degrees
# Jacobian - how much does element deviate from the ideal shape by taking the
# determinant of the Jacobian matrix
# quad skew <= 30.
# quad warp >= 0.05
# quad taper >= 0.5
# quad iamin <= 30.
# quad iamax >= 150.
# tria skew <= 10.
# tria iamax <= 160.
# tetra ar >= 100.
# tetra elpr <= 0.5
# tetra detj <= 0.
# hex ar >= 100.
# hex elpr <= 0.5
# hex detj <= 0.
# hex warp <= 0.707
# penta ar >= 100.
# penta elpr <= 0.5
# penta detj <= 0.
# penta warp <= 0.707
# pyram ar >= 100.
# pyram elpr <= 0.5
# pyram detj <= 0.
# pyram warp <= 0.707
|
the-stack_106_14653
|
class Solution:
def longestValidParentheses(self, s: str) -> int:
ans, stack = 0, [(")", -1)]
for i, x in enumerate(s):
if stack[-1][0] == "(" and x == ")":
stack.pop()
ans = max(ans, i - stack[-1][1])
else:
stack.append((x, i))
return ans
|
the-stack_106_14657
|
import numpy as np
import sys
import unittest
import ray
import ray.rllib.agents.a3c as a3c
import ray.rllib.agents.ddpg as ddpg
import ray.rllib.agents.ddpg.td3 as td3
import ray.rllib.agents.dqn as dqn
import ray.rllib.agents.impala as impala
import ray.rllib.agents.pg as pg
import ray.rllib.agents.ppo as ppo
import ray.rllib.agents.sac as sac
from ray.rllib.utils import check, framework_iterator, try_import_tf
tf = try_import_tf()
def do_test_explorations(run,
env,
config,
dummy_obs,
prev_a=None,
expected_mean_action=None):
"""Calls an Agent's `compute_actions` with different `explore` options."""
core_config = config.copy()
if run not in [a3c.A3CTrainer]:
core_config["num_workers"] = 0
# Test all frameworks.
for fw in framework_iterator(core_config):
if fw == "tfe" and run in [
ddpg.DDPGTrainer, sac.SACTrainer, td3.TD3Trainer
]:
continue
print("Agent={}".format(run))
# Test for both the default Agent's exploration AND the `Random`
# exploration class.
for exploration in [None, "Random"]:
local_config = core_config.copy()
if exploration == "Random":
# TODO(sven): Random doesn't work for IMPALA yet.
if run is impala.ImpalaTrainer:
continue
local_config["exploration_config"] = {"type": "Random"}
print("exploration={}".format(exploration or "default"))
trainer = run(config=local_config, env=env)
# Make sure all actions drawn are the same, given same
# observations.
actions = []
for _ in range(25):
actions.append(
trainer.compute_action(
observation=dummy_obs,
explore=False,
prev_action=prev_a,
prev_reward=1.0 if prev_a is not None else None))
check(actions[-1], actions[0])
# Make sure actions drawn are different
# (around some mean value), given constant observations.
actions = []
for _ in range(100):
actions.append(
trainer.compute_action(
observation=dummy_obs,
explore=True,
prev_action=prev_a,
prev_reward=1.0 if prev_a is not None else None))
check(
np.mean(actions),
expected_mean_action
if expected_mean_action is not None else 0.5,
atol=0.3)
# Check that the stddev is not 0.0 (values differ).
check(np.std(actions), 0.0, false=True)
class TestExplorations(unittest.TestCase):
"""
Tests all Exploration components and the deterministic flag for
compute_action calls.
"""
@classmethod
def setUpClass(cls):
ray.init(ignore_reinit_error=True)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_a2c(self):
do_test_explorations(
a3c.A2CTrainer,
"CartPole-v0",
a3c.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(1))
def test_a3c(self):
do_test_explorations(
a3c.A3CTrainer,
"CartPole-v0",
a3c.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(1))
def test_ddpg(self):
# Switch off random timesteps at beginning. We want to test actual
# GaussianNoise right away.
config = ddpg.DEFAULT_CONFIG.copy()
config["exploration_config"]["random_timesteps"] = 0
do_test_explorations(
ddpg.DDPGTrainer,
"Pendulum-v0",
config,
np.array([0.0, 0.1, 0.0]),
expected_mean_action=0.0)
def test_simple_dqn(self):
do_test_explorations(dqn.SimpleQTrainer, "CartPole-v0",
dqn.SIMPLE_Q_DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]))
def test_dqn(self):
do_test_explorations(dqn.DQNTrainer, "CartPole-v0", dqn.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]))
def test_impala(self):
do_test_explorations(
impala.ImpalaTrainer,
"CartPole-v0",
impala.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(0))
def test_pg(self):
do_test_explorations(
pg.PGTrainer,
"CartPole-v0",
pg.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(1))
def test_ppo_discr(self):
do_test_explorations(
ppo.PPOTrainer,
"CartPole-v0",
ppo.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0, 0.0]),
prev_a=np.array(0))
def test_ppo_cont(self):
do_test_explorations(
ppo.PPOTrainer,
"Pendulum-v0",
ppo.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0]),
prev_a=np.array([0.0]),
expected_mean_action=0.0)
def test_sac(self):
do_test_explorations(
sac.SACTrainer,
"Pendulum-v0",
sac.DEFAULT_CONFIG,
np.array([0.0, 0.1, 0.0]),
expected_mean_action=0.0)
def test_td3(self):
config = td3.TD3_DEFAULT_CONFIG.copy()
# Switch off random timesteps at beginning. We want to test actual
# GaussianNoise right away.
config["exploration_config"]["random_timesteps"] = 0
do_test_explorations(
td3.TD3Trainer,
"Pendulum-v0",
config,
np.array([0.0, 0.1, 0.0]),
expected_mean_action=0.0)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_106_14661
|
#! /usr/bin/env python
#
# example2.py -- Simple, configurable FITS viewer.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import logging
from ginga import colors
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.misc import log
from ginga.web.pgw import Widgets, Viewers
from ginga.util.loader import load_data
class FitsViewer(object):
def __init__(self, logger, window):
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
self.top = window
self.top.add_callback('close', self.closed)
vbox = Widgets.VBox()
vbox.set_margins(2, 2, 2, 2)
vbox.set_spacing(1)
fi = Viewers.CanvasView(logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_zoom_algorithm('rate')
fi.set_zoomrate(1.4)
fi.show_pan_mark(True)
fi.set_callback('drag-drop', self.drop_file_cb)
fi.set_callback('cursor-changed', self.cursor_cb)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_set_active(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# so trackpad scrolling can be adjusted
settings = bd.get_settings()
settings.set(scroll_zoom_direct_scale=True,
scroll_zoom_acceleration=0.07)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
canvas.set_surface(fi)
self.canvas = canvas
# add canvas to view
private_canvas = fi.get_canvas()
private_canvas.add(canvas)
canvas.ui_set_active(True)
canvas.register_for_cursor_drawing(fi)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
# add a color bar
fi.show_color_bar(True)
# add little mode indicator that shows keyboard modal states
fi.show_mode_indicator(True, corner='ur')
fi.set_desired_size(512, 512)
w = Viewers.GingaViewerWidget(viewer=fi)
vbox.add_widget(w, stretch=1)
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
hbox = Widgets.HBox()
hbox.set_margins(2, 2, 2, 2)
hbox.set_spacing(4)
wdrawtype = Widgets.ComboBox()
for name in self.drawtypes:
wdrawtype.append_text(name)
index = self.drawtypes.index('rectangle')
wdrawtype.set_index(index)
wdrawtype.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawtype = wdrawtype
wdrawcolor = Widgets.ComboBox()
for name in self.drawcolors:
wdrawcolor.append_text(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.set_index(index)
wdrawcolor.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawcolor = wdrawcolor
wfill = Widgets.CheckBox("Fill")
wfill.add_callback('activated', lambda w, tf: self.set_drawparams())
self.wfill = wfill
walpha = Widgets.SpinBox(dtype=float)
walpha.set_limits(0.0, 1.0, incr_value=0.1)
walpha.set_value(1.0)
walpha.set_decimals(2)
walpha.add_callback('value-changed', lambda w, val: self.set_drawparams())
self.walpha = walpha
wclear = Widgets.Button("Clear Canvas")
wclear.add_callback('activated', lambda w: self.clear_canvas())
## wopen = Widgets.Button("Open File")
## wopen.add_callback('activated', lambda w: self.open_file())
## wquit = Widgets.Button("Quit")
## wquit.add_callback('activated', lambda w: self.quit())
hbox.add_widget(Widgets.Label(''), stretch=1)
for w in (wdrawtype, wdrawcolor, wfill,
Widgets.Label('Alpha:'), walpha, wclear):
hbox.add_widget(w, stretch=0)
vbox.add_widget(hbox, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
hbox.set_spacing(4)
btn1 = Widgets.RadioButton("Draw")
btn1.set_state(mode == 'draw')
btn1.add_callback('activated', lambda w, val: self.set_mode_cb('draw', val))
btn1.set_tooltip("Choose this to draw on the canvas")
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Edit", group=btn1)
btn2.set_state(mode == 'edit')
btn2.add_callback('activated', lambda w, val: self.set_mode_cb('edit', val))
btn2.set_tooltip("Choose this to edit things on the canvas")
hbox.add_widget(btn2)
hbox.add_widget(Widgets.Label('Zoom sensitivity: '))
slider = Widgets.Slider(orientation='horizontal', dtype=float)
slider.add_callback('value-changed',
lambda w, val: self.adjust_scrolling_accel_cb(val))
slider.set_limits(0.0, 12.0, 0.005)
slider.set_value(8.0)
hbox.add_widget(slider, stretch=1)
# hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
# need to put this in an hbox with an expanding label or the
# browser wants to resize the canvas
hbox = Widgets.HBox()
hbox.add_widget(vbox, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
self.top.set_widget(hbox)
def set_drawparams(self):
index = self.wdrawtype.get_index()
kind = self.drawtypes[index]
index = self.wdrawcolor.get_index()
fill = self.wfill.get_state()
alpha = self.walpha.get_value()
params = {'color': self.drawcolors[index],
'alpha': alpha,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.delete_all_objects()
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
self.top.set_title(filepath)
def open_file(self):
res = Widgets.FileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file_cb(self, viewer, paths):
filename = paths[0]
self.load_file(filename)
def cursor_cb(self, viewer, button, data_x, data_y):
"""This gets called when the data position relative to the cursor
changes.
"""
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + viewer.data_off),
int(data_y + viewer.data_off))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def set_mode_cb(self, mode, tf):
self.logger.info("canvas mode changed (%s) %s" % (mode, tf))
if not (tf is False):
self.canvas.set_draw_mode(mode)
return True
def adjust_scrolling_accel_cb(self, val):
def f(x):
return (1.0 / 2.0**(10.0 - x))
val2 = f(val)
self.logger.debug("slider value is %f, setting will be %f" % (val, val2))
settings = self.fitsimage.get_bindings().get_settings()
settings.set(scroll_zoom_acceleration=val2)
return True
def closed(self, w):
self.logger.info("Top window closed.")
w.delete()
self.top = None
sys.exit()
def quit(self, *args):
self.readout.set_text("Quitting!")
self.logger.info("Attempting to shut down the application...")
if self.top is not None:
self.top.close()
sys.exit()
def main(options, args):
logger = log.get_logger("example2", options=options)
if options.use_opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warning("Error using OpenCv: %s" % str(e))
if options.use_opencl:
from ginga import trcalc
try:
trcalc.use('opencl')
except Exception as e:
logger.warning("Error using OpenCL: %s" % str(e))
#base_url = "http://%s:%d/app" % (options.host, options.port)
# establish our widget application
app = Widgets.Application(logger=logger,
host=options.host, port=options.port)
# create top level window
window = app.make_window("Ginga web example2")
# our own viewer object, customized with methods (see above)
viewer = FitsViewer(logger, window)
#server.add_callback('shutdown', viewer.quit)
window.resize(700, 540)
if len(args) > 0:
viewer.load_file(args[0])
#window.show()
#window.raise_()
try:
app.start()
except KeyboardInterrupt:
logger.info("Terminating viewer...")
window.close()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--host", dest="host", metavar="HOST",
default='localhost',
help="Listen on HOST for connections")
argprs.add_argument("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
argprs.add_argument("--loglevel", dest="loglevel", metavar="LEVEL",
type=int, default=logging.INFO,
help="Set logging level to LEVEL")
argprs.add_argument("--opencv", dest="use_opencv", default=False,
action="store_true",
help="Use OpenCv acceleration")
argprs.add_argument("--opencl", dest="use_opencl", default=False,
action="store_true",
help="Use OpenCL acceleration")
argprs.add_argument("--port", dest="port", metavar="PORT",
type=int, default=9909,
help="Listen on PORT for connections")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
argprs.add_argument("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
argprs.add_argument("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
|
the-stack_106_14665
|
from .env import MatrixGameEnv
import numpy as np
class parallel_env(MatrixGameEnv):
def __init__(self, max_frames, memory_length, temptation=5., reward=3., punishment=1., suckers=0.):
utility_matrix = np.array([[[reward, reward], [suckers, temptation]],
[[temptation, suckers], [punishment, punishment]]], dtype=np.float)
super(parallel_env, self).__init__(num_agents=2, num_actions=2, utility_matrix=utility_matrix,
memory_length=memory_length, max_frames=max_frames)
|
the-stack_106_14666
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Libsharp(AutotoolsPackage):
"""Libsharp is a code library for spherical harmonic transforms (SHTs) and
spin-weighted spherical harmonic transforms, which evolved from the libpsht
library."""
variant('openmp', default=True, description='Build with openmp support')
variant('mpi', default=True, description='Build with MPI support')
variant('pic', default=True, description='Generate position-independent code (PIC)')
homepage = "https://github.com/Libsharp/libsharp"
git = "https://github.com/Libsharp/libsharp.git"
version('1.0.0', commit='cc4753ff4b0ef393f0d4ada41a175c6d1dd85d71', preferred=True)
version('2018-01-17', commit='593d4eba67d61827191c32fb94bf235cb31205e1')
depends_on('autoconf', type='build')
depends_on('mpi', when='+mpi')
patch('arm.patch', when='@2018-01-17 target=aarch64:')
patch('1.0.0-arm.patch', when='@1.0.0 target=aarch64:')
def autoreconf(self, spec, prefix):
"""Generate autotools configuration"""
bash = which('bash')
bash('autoconf')
def configure_args(self):
args = []
if '+openmp' not in self.spec:
args.append("--disable-openmp")
if '+mpi' not in self.spec:
args.append("--disable-mpi")
if '+pic' in self.spec:
args.append("--enable-pic")
return args
def install(self, spec, prefix):
# Libsharp's only caller healpix include headers like 'libsharp/xxx.h'
# Install xxx.h to include/libsharp
install_tree('auto/include', prefix.include.libsharp)
install_tree('auto/lib', prefix.lib)
|
the-stack_106_14669
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from ..base import ComponentAPI
class CollectionsBkLogin(object):
"""Collections of BK_LOGIN APIS"""
def __init__(self, client):
self.client = client
self.get_all_users = ComponentAPI(
client=self.client,
method="GET",
path="/api/c/compapi{bk_api_ver}/bk_login/get_all_users/",
description=u"获取所有用户信息",
)
self.get_batch_users = ComponentAPI(
client=self.client,
method="POST",
path="/api/c/compapi{bk_api_ver}/bk_login/get_batch_users/",
description=u"批量获取用户信息",
)
self.get_batch_users_platform_role = ComponentAPI(
client=self.client,
method="POST",
path="/api/c/compapi{bk_api_ver}/bk_login/get_batch_users_platform_role/",
description=u"批量获取用户各平台角色信息",
)
self.get_user = ComponentAPI(
client=self.client,
method="GET",
path="/api/c/compapi{bk_api_ver}/bk_login/get_user/",
description=u"获取用户信息",
)
self.get_all_user = ComponentAPI(
client=self.client,
method="GET",
path="/api/c/compapi{bk_api_ver}/bk_login/get_all_user/",
description=u"获取所有用户信息",
)
self.get_batch_user = ComponentAPI(
client=self.client,
method="GET",
path="/api/c/compapi{bk_api_ver}/bk_login/get_batch_user/",
description=u"获取多个用户信息",
)
self.get_batch_user_platform_role = ComponentAPI(
client=self.client,
method="GET",
path="/api/c/compapi{bk_api_ver}/bk_login/get_batch_user_platform_role/",
description=u"获取多个用户在平台应用的角色",
)
|
the-stack_106_14670
|
import os
import random
from copy import deepcopy
from agents.greedy_agent_boost import GreedyAgentBoost
from agents.greedysearch_agent import GreedySearchAgent
from agents.minmax_agent import MinMaxAgent
from agents.random_agent import RandomAgent
import numpy as np
from agents.state_evaluator_heuristic import StateEvaluatorHeuristic
from arena.arena_multi_thread import ArenaMultiThread
from gym_splendor_code.envs.mechanics.game_settings import USE_TQDM
from gym_splendor_code.envs.mechanics.state_as_dict import StateAsDict
from nn_models.utils.vectorizer import Vectorizer
import pickle
from mpi4py import MPI
from nn_models.value_function_heura.value_function import ValueFunction
comm = MPI.COMM_WORLD
my_rank = MPI.COMM_WORLD.Get_rank()
main_thread = my_rank == 0
def produce_data(when_to_start, dump_p, n_games, filename, folder):
list_of_agents = [RandomAgent(), GreedyAgentBoost(), MinMaxAgent()]
arek = ArenaMultiThread()
arek.start_collecting_states()
arek.collect_only_from_middle_game(when_to_start, dump_p)
arek.all_vs_all('deterministic', list_of_agents, n_games)
arek.dump_collected_states(filename, folder)
def flip_states(list_of_states, list_of_values):
rev_states = []
rev_values = []
for i in range(len(list_of_states)):
rev_state = deepcopy(list_of_states[i])
rev_state.change_active_player()
rev_states.append(rev_state)
rev_values.append(-list_of_values[i])
return rev_states, rev_values
def evaluate_states(files_dir, dump_dir):
evaluator = ValueFunction()
list_of_files = os.listdir(files_dir)
for file_name in list_of_files:
with open(os.path.join(files_dir, file_name), 'rb') as f:
X, _ = pickle.load(f)
Y = []
for x in X:
state_to_eval = StateAsDict(x).to_state()
Y.append(evaluator.evaluate(state_to_eval))
del state_to_eval
with open(os.path.join(dump_dir, file_name), 'wb') as f:
pickle.dump((X, Y), f)
print(len(X))
del X
del Y
def pick_data_for_training(epochs_range, files_dir, dump_dir, bufor_size=10):
states = []
values = []
files_list = os.listdir(files_dir)
for epoch in epochs_range:
print(f'Epoch = {epoch}')
bufor = 0
part = 0
for file_name in files_list:
bufor += 1
print(f'Current file = {file_name}')
with open(os.path.join(files_dir, file_name), 'rb') as f:
one_file_data = pickle.load(f)
for key in one_file_data:
if one_file_data[key]['states']:
random_idx = random.randint(0, len(one_file_data[key]['states']) - 1)
states.append(one_file_data[key]['states'][random_idx])
values.append(one_file_data[key]['values'][random_idx])
del one_file_data
if bufor > bufor_size:
bufor = 0
part += 1
print('\n Flipping \n')
# states_rev, values_rev = flip_states(states, values)
# print('States flipped')
# states = states + states_rev
# values = values + values_rev
# del states_rev
# del values_rev
print('Ready to save')
with open(os.path.join(dump_dir, f'epoch_{epoch}_part_{part}.pickle'), 'wb') as f:
pickle.dump((states, values), f)
del states
del values
states = []
values = []
def flatten_data_from_games(source_file, target_file):
with open(source_file, 'rb') as f:
one_file_data = pickle.load(f)
states = []
values = []
for key in one_file_data:
states += one_file_data[key]['states']
values += one_file_data[key]['values']
with open(target_file, 'wb') as f:
pickle.dump((states, values), f)
def load_data_for_model(file):
with open(file, 'rb') as f:
data_to_return = pickle.load(f)
return data_to_return
|
the-stack_106_14674
|
import asyncio
import logging
from baseorder import BaseOrder, log
from tokens import *
from dexible.common import as_units
async def main():
logging.basicConfig(level=logging.INFO)
inputs = [WETH_KOVAN, USDC_KOVAN, WETH_KOVAN, WETH_KOVAN]
outputs = [WBTC_KOVAN, WETH_KOVAN, DAI_KOVAN, USDC_KOVAN]
amounts = [as_units(300, 18), as_units(300000, 6),
as_units(300, 18), as_units(300, 18)]
sdk = BaseOrder.create_dexible_sdk()
calls = []
for i in range(0, len(inputs)):
token_in = await sdk.token.lookup(inputs[i])
token_out = await sdk.token.lookup(outputs[i])
calls.append(sdk.quote.get_quote(token_in=token_in,
token_out=token_out,
amount_in=amounts[i],
slippage_percent=.5))
r = await asyncio.gather(*calls)
log.info(f"Quotes: {r}")
if __name__ == '__main__':
asyncio.run(main())
|
the-stack_106_14676
|
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as T
import numpy as np
from tqdm import tqdm
from model.vae import VAE
from dataset import TanksDataset, ToTensor
def train(vae, dataloader, epochs=1, device=torch.device("cpu")):
vae = vae.to(device)
vae = vae.double()
#transform = T.ConvertImageDtype(dtype=torch.double)
optimizer = torch.optim.Adam(vae.parameters(), lr=0.001)
reported_loss = []
for epoch in range(epochs):
collective_loss = []
for _, x in tqdm(enumerate(dataloader)):
x.to(device)
#x = transform(images)
#assert x.dtype == torch.double
_, mu, log_sigma, x_prime = vae.forward(x.double())
loss, recon, kld = vae.loss_fn(x, x_prime, mu, log_sigma)
optimizer.zero_grad()
loss.backward()
optimizer.step()
collective_loss.append([recon.item(), kld.item()])
np_collective_loss = np.array(collective_loss)
average_loss = np.mean(np_collective_loss, axis=1)
reported_loss.append(average_loss)
print(f"Epoch {epoch+1} finished!", f"reconstruction_loss = {average_loss[0]} || KL-Divergence = {average_loss[1]}", sep="\n")
if (epoch+1) % 10 == 0:
with torch.no_grad():
to_img = T.ToPILImage()
example = vae.sample()
img_example = to_img(example)
img_example.save(f"result_at_epoch_{epoch+1}.png")
print("Training Finished!")
return np.array(list(zip(range(epochs), average_loss)))
if __name__ == "__main__":
train_loader = DataLoader(TanksDataset(transform=ToTensor()), batch_size=64, shuffle=True)
vae = VAE(
input_shape=[3, 64, 64],
conv_filters=[3, 32, 64, 128 , 256],
conv_kernels=[(5, 5), (3, 3), (3, 3), (3, 3)],
conv_strides=[(1, 1), (1, 1), (1, 1), (1, 1)],
paddings=[(1, 1), (1, 1), (1, 1), (1, 1)],
output_paddings=[(0, 0), (0, 0), (0, 0), (0, 0)],
dilations=[(1, 1), (1, 1), (1, 1), (1, 1)],
latent_space_dim=1024
)
train(vae, train_loader, epochs=100, device=torch.device("cuda"))
|
the-stack_106_14677
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Mark the boundary and faces for different dataset types.
# Here we focus on 3D types.
# Control test resolution
res = 50
# Create a 3D volume
image = vtk.vtkImageData()
image.SetDimensions(res,res,res)
image.SetOrigin(-0.5,-0.5,-0.5)
image.SetSpacing(1.0/float(res-1),1.0/float(res-1),1.0/float(res-1))
mark1 = vtk.vtkMarkBoundaryFilter()
mark1.SetInputData(image)
mark1.GenerateBoundaryFacesOn()
thresh1 = vtk.vtkThreshold()
thresh1.SetInputConnection(mark1.GetOutputPort())
thresh1.SetThresholdFunction(vtk.vtkThreshold.THRESHOLD_UPPER)
thresh1.SetUpperThreshold(1.0)
thresh1.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, "BoundaryCells")
mapper1 = vtk.vtkDataSetMapper()
mapper1.SetInputConnection(thresh1.GetOutputPort())
mapper1.ScalarVisibilityOff()
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper1)
# unstructured grid
sphere = vtk.vtkSphere()
sphere.SetCenter(0,0,0)
sphere.SetRadius(1000000)
toUG = vtk.vtkExtractGeometry()
toUG.SetInputData(image)
toUG.SetImplicitFunction(sphere)
mark2 = vtk.vtkMarkBoundaryFilter()
mark2.SetInputConnection(toUG.GetOutputPort())
mark2.GenerateBoundaryFacesOn()
mark2.Update()
thresh2 = vtk.vtkThreshold()
thresh2.SetInputConnection(mark2.GetOutputPort())
thresh2.SetThresholdFunction(vtk.vtkThreshold.THRESHOLD_UPPER)
thresh2.SetUpperThreshold(1.0)
thresh2.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, "BoundaryCells")
mapper2 = vtk.vtkDataSetMapper()
mapper2.SetInputConnection(thresh2.GetOutputPort())
mapper2.ScalarVisibilityOff()
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
# Define graphics objects
ren1 = vtk.vtkRenderer()
ren1.SetViewport(0,0, 0.5, 1)
ren1.SetBackground(0,0,0)
ren1.AddActor(actor1)
ren1.GetActiveCamera().SetFocalPoint(0,0,0)
ren1.GetActiveCamera().SetPosition(0.25,0.5,1)
ren1.ResetCamera()
ren2 = vtk.vtkRenderer()
ren2.SetViewport(0.5,0, 1,1)
ren2.SetBackground(0,0,0)
ren2.AddActor(actor2)
ren2.SetActiveCamera(ren1.GetActiveCamera())
renWin = vtk.vtkRenderWindow()
renWin.SetSize(300,150)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
iren.Start()
# --- end of script --
|
the-stack_106_14678
|
#!/usr/bin/env python
from copy import deepcopy
import json
import os
import math
import traceback
import click
import numpy as np
import tensorflow
import rastervision as rv
from integration_tests.chip_classification_tests.experiment \
import ChipClassificationIntegrationTest
from integration_tests.object_detection_tests.experiment \
import ObjectDetectionIntegrationTest
from integration_tests.semantic_segmentation_tests.experiment \
import SemanticSegmentationIntegrationTest
from rastervision.rv_config import RVConfig
all_tests = [
rv.CHIP_CLASSIFICATION, rv.OBJECT_DETECTION, rv.SEMANTIC_SEGMENTATION
]
np.random.seed(1234)
tensorflow.set_random_seed(5678)
# Suppress warnings and info to avoid cluttering CI log
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
TEST_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class IntegrationTestExperimentRunner(rv.runner.LocalExperimentRunner):
def __init__(self, tmp_dir=None):
super().__init__(tmp_dir)
def _run_experiment(self, command_dag):
"""Check serialization of all commands."""
for command_config in command_dag.get_sorted_commands():
deepcopy(
rv.command.CommandConfig.from_proto(command_config.to_proto()))
super()._run_experiment(command_dag)
def console_info(msg):
click.echo(click.style(msg, fg='green'))
def console_warning(msg):
click.echo(click.style(msg, fg='yellow'))
def console_error(msg):
click.echo(click.style(msg, fg='red', err=True))
class TestError():
def __init__(self, test, message, details=None):
self.test = test
self.message = message
self.details = details
def __str__(self):
return ('Error\n' + '------\n' + 'Test: {}\n'.format(self.test) +
'Message: {}\n'.format(self.message) + 'Details: {}'.format(
str(self.details)) if self.details else '' + '\n')
def get_test_dir(test):
return os.path.join(TEST_ROOT_DIR, test.lower().replace('-', '_'))
def get_expected_eval_path(test):
return os.path.join('{}_tests'.format(get_test_dir(test)),
'expected-output/eval.json')
def get_actual_eval_path(test, temp_dir):
return os.path.join(temp_dir, test.lower(), 'eval/default/eval.json')
def open_json(path):
with open(path, 'r') as file:
return json.load(file)
def check_eval_item(test, expected_item, actual_item):
errors = []
f1_threshold = 0.01
class_name = expected_item['class_name']
expected_f1 = expected_item['f1'] or 0.0
actual_f1 = actual_item['f1'] or 0.0
if math.fabs(expected_f1 - actual_f1) > f1_threshold:
errors.append(
TestError(
test, 'F1 scores are not close enough',
'for class_name: {} expected f1: {}, actual f1: {}'.format(
class_name, expected_item['f1'], actual_item['f1'])))
return errors
def check_eval(test, temp_dir):
errors = []
actual_eval_path = get_actual_eval_path(test, temp_dir)
expected_eval_path = get_expected_eval_path(test)
if os.path.isfile(actual_eval_path):
expected_eval = open_json(expected_eval_path)['overall']
actual_eval = open_json(actual_eval_path)['overall']
for expected_item in expected_eval:
class_name = expected_item['class_name']
actual_item = \
next(filter(
lambda x: x['class_name'] == class_name, actual_eval))
errors.extend(check_eval_item(test, expected_item, actual_item))
else:
errors.append(
TestError(test, 'actual eval file does not exist',
actual_eval_path))
return errors
def get_experiment(test, tmp_dir):
if test == rv.OBJECT_DETECTION:
return ObjectDetectionIntegrationTest().exp_main(
os.path.join(tmp_dir, test.lower()))
if test == rv.CHIP_CLASSIFICATION:
return ChipClassificationIntegrationTest().exp_main(
os.path.join(tmp_dir, test.lower()))
if test == rv.SEMANTIC_SEGMENTATION:
return SemanticSegmentationIntegrationTest().exp_main(
os.path.join(tmp_dir, test.lower()))
raise Exception('Unknown test {}'.format(test))
def run_test(test, temp_dir):
errors = []
experiment = get_experiment(test, temp_dir)
# Check serialization
pp_uri = os.path.join(experiment.bundle_uri, 'predict_package.zip')
experiment.task.predict_package_uri = pp_uri
msg = experiment.to_proto()
experiment = rv.ExperimentConfig.from_proto(msg)
# Check that running doesn't raise any exceptions.
try:
IntegrationTestExperimentRunner(os.path.join(temp_dir, test.lower())) \
.run(experiment, rerun_commands=True)
except Exception as exc:
errors.append(
TestError(test, 'raised an exception while running',
traceback.format_exc()))
return errors
# Check that the eval is similar to expected eval.
errors.extend(check_eval(test, temp_dir))
if not errors:
# Check the prediction package
# This will only work with raster_sources that
# have a single URI.
skip = False
experiment = experiment.fully_resolve()
scenes_to_uris = {}
scenes = experiment.dataset.validation_scenes
for scene in scenes:
rs = scene.raster_source
if hasattr(rs, 'uri'):
scenes_to_uris[scene.id] = rs.uri
elif hasattr(rs, 'uris'):
uris = rs.uris
if len(uris) > 1:
skip = True
else:
scenes_to_uris[scene.id] = uris[0]
else:
skip = True
if skip:
console_warning('Skipping predict package test for '
'test {}, experiment {}'.format(
test, experiment.id))
else:
console_info('Checking predict package produces same results...')
pp = experiment.task.predict_package_uri
predict = rv.Predictor(pp, temp_dir).predict
for scene_config in scenes:
# Need to write out labels and read them back,
# otherwise the floating point precision direct box
# coordinates will not match those from the PREDICT
# command, which are rounded to pixel coordinates
# via pyproj logic (in the case of rasterio crs transformer.
predictor_label_store_uri = os.path.join(
temp_dir, test.lower(),
'predictor/{}'.format(scene_config.id))
uri = scenes_to_uris[scene_config.id]
predict(uri, predictor_label_store_uri)
scene = scene_config.create_scene(experiment.task, temp_dir)
scene_labels = scene.prediction_label_store.get_labels()
extent = scene.raster_source.get_extent()
crs_transformer = scene.raster_source.get_crs_transformer()
predictor_label_store = scene_config.label_store \
.for_prediction(
predictor_label_store_uri) \
.create_store(
experiment.task,
extent,
crs_transformer,
temp_dir)
from rastervision.data import ActivateMixin
with ActivateMixin.compose(scene, predictor_label_store):
if not predictor_label_store.get_labels() == scene_labels:
e = TestError(
test, ('Predictor did not produce the same labels '
'as the Predict command'),
'for scene {} in experiment {}'.format(
scene_config.id, experiment.id))
errors.append(e)
return errors
@click.command()
@click.argument('tests', nargs=-1)
def main(tests):
"""Runs RV end-to-end and checks that evaluation metrics are correct."""
if len(tests) == 0:
tests = all_tests
tests = list(map(lambda x: x.upper(), tests))
with RVConfig.get_tmp_dir() as temp_dir:
errors = []
for test in tests:
if test not in all_tests:
print('{} is not a valid test.'.format(test))
return
errors.extend(run_test(test, temp_dir))
for error in errors:
print(error)
for test in tests:
nb_test_errors = len(
list(filter(lambda error: error.test == test, errors)))
if nb_test_errors == 0:
print('{} test passed!'.format(test))
if errors:
exit(1)
if __name__ == '__main__':
main()
|
the-stack_106_14679
|
from tkinter import *
root=Tk()
root.title("LABO")
root.geometry('400x500')
root.resizable(width=False,height=False)
#menu and submenu
main_menu=Menu(root)
file_menu=Menu(root)
#add commands to the submenu
file_menu.add_command(label='New..')
file_menu.add_command(label='Save As ..')
file_menu.add_command(label='Exit')
main_menu.add_cascade(label='File',menu=file_menu)
#add the rest of the menu options to the main menu
main_menu.add_command(label='Edit')
main_menu.add_command(label='Quit')
root.config(menu=main_menu)
#conversation window
chatWindow=Text(root, bd=1, bg="black", width="50", height="8", font=("Arial", 23), foreground="#00ffff")
chatWindow.place(x=6,y=6,height=385,width=370)
#text area window
messageWindow = Text(root, bd=0, bg="black",width="30", height="4", font=("Arial", 23), foreground="#00ffff")
messageWindow.place(x=128, y=400, height=88, width=260)
scrollbar=Scrollbar(root,command=chatWindow.yview,cursor='star')
scrollbar.place(x=375,y=5,height=385)
#submit button
button=Button(root, text="Send", width="12", height=5,
bd=0, bg="#0080ff", activebackground="#00bfff",foreground='#ffffff',font=("Arial", 12))
button.place(x=6,y=400,height=88)
root.mainloop()
|
the-stack_106_14682
|
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited (“QuantumBlack”) name and logo
# (either separately or in combination, “QuantumBlack Trademarks”) are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains unit test for the cli command 'kedro new'
"""
import json
import os
from pathlib import Path
import pytest
import yaml
from kedro import __version__ as version
from kedro.cli.cli import TEMPLATE_PATH, _fix_user_path, _get_default_config, cli
FILES_IN_TEMPLATE_NO_EXAMPLE = 35
FILES_IN_TEMPLATE_WITH_EXAMPLE = 37
# pylint: disable=too-many-arguments
def _invoke(
cli_runner,
args,
project_name=None,
repo_name=None,
python_package=None,
include_example=None,
):
click_prompts = (project_name, repo_name, python_package, include_example)
input_string = "\n".join(x or "" for x in click_prompts)
return cli_runner.invoke(cli, args, input=input_string)
# pylint: disable=too-many-arguments
def _assert_template_ok(
result,
files_in_template,
repo_name=None,
project_name="New Kedro Project",
output_dir=".",
package_name=None,
):
print(result.output)
assert result.exit_code == 0
assert "Project generated in" in result.output
if repo_name:
full_path = (Path(output_dir) / repo_name).absolute()
generated_files = [
p for p in full_path.rglob("*") if p.is_file() and p.name != ".DS_Store"
]
assert len(generated_files) == files_in_template
assert full_path.exists()
assert (full_path / ".gitignore").is_file()
if project_name:
with (full_path / "README.md").open() as file:
assert project_name in file.read()
with (full_path / ".gitignore").open() as file:
assert "KEDRO" in file.read()
with (full_path / "src" / "requirements.txt").open() as file:
assert version in file.read()
if package_name:
assert (full_path / "src" / package_name / "__init__.py").is_file()
class TestInteractiveNew:
"""Tests for running `kedro new` interactively."""
repo_name = "project-test"
package_name = "package_test"
include_example = "y"
def test_new_no_example(self, cli_runner):
"""Test new project creation without code example."""
project_name = "Test"
result = _invoke(
cli_runner,
["-v", "new"],
project_name=project_name,
repo_name=self.repo_name,
include_example="N",
)
_assert_template_ok(
result,
FILES_IN_TEMPLATE_NO_EXAMPLE,
repo_name=self.repo_name,
project_name=project_name,
package_name="test",
)
def test_new_with_example(self, cli_runner):
"""Test new project creation with code example."""
project_name = "Test"
result = _invoke(
cli_runner,
["-v", "new"],
project_name=project_name,
repo_name=self.repo_name,
include_example="y",
)
_assert_template_ok(
result,
FILES_IN_TEMPLATE_WITH_EXAMPLE,
repo_name=self.repo_name,
project_name=project_name,
package_name="test",
)
def test_new_custom_dir(self, cli_runner):
"""Test that default package name does not change if custom
repo name was specified."""
result = _invoke(
cli_runner,
["new"],
repo_name=self.repo_name,
include_example=self.include_example,
)
_assert_template_ok(
result,
FILES_IN_TEMPLATE_WITH_EXAMPLE,
repo_name=self.repo_name,
package_name="new_kedro_project",
)
def test_new_correct_path(self, cli_runner):
"""Test new project creation with the default project name."""
result = _invoke(
cli_runner,
["new"],
repo_name=self.repo_name,
include_example=self.include_example,
)
_assert_template_ok(
result, FILES_IN_TEMPLATE_WITH_EXAMPLE, repo_name=self.repo_name
)
def test_fail_if_dir_exists(self, cli_runner):
"""Check the error if the output directory already exists."""
empty_file = Path(self.repo_name) / "empty_file"
empty_file.parent.mkdir(parents=True)
empty_file.touch()
old_contents = list(Path(self.repo_name).iterdir())
result = _invoke(
cli_runner,
["-v", "new"],
repo_name=self.repo_name,
include_example=self.include_example,
)
assert list(Path(self.repo_name).iterdir()) == old_contents
assert "directory already exists" in result.output
assert result.exit_code != 0
@pytest.mark.parametrize("repo_name", [".repo", "re!po", "-repo", "repo-"])
def test_bad_repo_name(self, cli_runner, repo_name):
"""Check the error if the repository name is invalid."""
result = _invoke(
cli_runner,
["new"],
repo_name=repo_name,
include_example=self.include_example,
)
assert result.exit_code == 0
assert "is not a valid repository name." in result.output
@pytest.mark.parametrize(
"pkg_name", ["0package", "_", "package-name", "package name"]
)
def test_bad_pkg_name(self, cli_runner, pkg_name):
"""Check the error if the package name is invalid."""
result = _invoke(
cli_runner,
["new"],
python_package=pkg_name,
include_example=self.include_example,
)
assert result.exit_code == 0
assert "is not a valid Python package name." in result.output
@pytest.mark.parametrize("include_example", ["A", "a", "_", "?"])
def test_bad_include_example(self, cli_runner, include_example):
"""Check the error include example response is invalid."""
result = _invoke(
cli_runner,
["new"],
python_package=self.package_name,
include_example=include_example,
)
assert result.exit_code == 0
assert "invalid input" in result.output
def _create_config_file(
config_path, project_name, repo_name, output_dir=None, include_example=False
):
config = {
"project_name": project_name,
"repo_name": repo_name,
"python_package": repo_name.replace("-", "_"),
"include_example": include_example,
}
if output_dir is not None:
config["output_dir"] = output_dir
with open(config_path, "w+") as config_file:
yaml.dump(config, config_file)
return config
class TestNewFromConfig:
"""Test `kedro new` with config option provided."""
project_name = "test1"
repo_name = "project-test1"
config_path = "config.yml"
include_example = True
def test_config_does_not_exist(self, cli_runner):
"""Check the error if the config file does not exist."""
result = _invoke(cli_runner, ["new", "-c", "missing.yml"])
assert result.exit_code != 0
assert "does not exist" in result.output
def test_empty_config(self, cli_runner):
"""Check the error if the config file is empty."""
open("touch", "a").close()
result = _invoke(cli_runner, ["-v", "new", "-c", "touch"])
assert result.exit_code != 0
assert "is empty" in result.output
def test_new_from_config_no_example(self, cli_runner):
"""Test project created from config without example code."""
output_dir = "test_dir"
include_example = False
Path(output_dir).mkdir(parents=True)
_create_config_file(
self.config_path,
self.project_name,
self.repo_name,
output_dir,
include_example,
)
result = _invoke(cli_runner, ["-v", "new", "--config", self.config_path])
_assert_template_ok(
result,
FILES_IN_TEMPLATE_NO_EXAMPLE,
self.repo_name,
self.project_name,
output_dir,
)
def test_new_from_config_with_example(self, cli_runner):
"""Test project created from config with example code."""
output_dir = "test_dir"
Path(output_dir).mkdir(parents=True)
_create_config_file(
self.config_path,
self.project_name,
self.repo_name,
output_dir,
self.include_example,
)
result = _invoke(cli_runner, ["-v", "new", "--config", self.config_path])
_assert_template_ok(
result,
FILES_IN_TEMPLATE_WITH_EXAMPLE,
self.repo_name,
self.project_name,
output_dir,
)
def test_wrong_config(self, cli_runner):
"""Check the error if the output directory is invalid."""
output_dir = "/usr/invalid/dir"
_create_config_file(
self.config_path, self.project_name, self.repo_name, output_dir
)
result = _invoke(cli_runner, ["new", "-c", self.config_path])
assert result.exit_code != 0
assert "is not a valid output directory." in result.output
def test_bad_yaml(self, cli_runner):
"""Check the error if config YAML is invalid."""
Path(self.config_path).write_text(
"output_dir: \nproject_name:\ttest\nrepo_name:\ttest1\n"
)
result = _invoke(cli_runner, ["new", "-c", self.config_path])
assert result.exit_code != 0
assert "that cannot start any token" in result.output
def test_output_dir_with_tilde_in_path(self, mocker):
"""Check the error if the output directory contains "~" ."""
home_dir = os.path.join("/home", "directory")
output_dir = os.path.join("~", "here")
expected = os.path.join(home_dir, "here")
mocker.patch.dict("os.environ", {"HOME": home_dir, "USERPROFILE": home_dir})
actual = _fix_user_path(output_dir)
assert actual == expected
def test_output_dir_with_relative_path(self, mocker):
"""Check the error if the output directory contains a relative path."""
home_dir = os.path.join("/home", "directory")
current_dir = os.path.join(home_dir, "current", "directory")
output_dir = os.path.join("path", "to", "here")
expected = os.path.join(current_dir, output_dir)
mocker.patch.dict("os.environ", {"HOME": home_dir, "USERPROFILE": home_dir})
mocker.patch("os.getcwd", return_value=current_dir)
actual = _fix_user_path(output_dir)
assert actual == expected
def test_missing_output_dir(self, cli_runner):
"""Check the error if config YAML does not contain the output
directory."""
_create_config_file(
self.config_path,
self.project_name,
self.repo_name,
output_dir=None,
include_example=self.include_example,
) # output dir missing
result = _invoke(cli_runner, ["-v", "new", "--config", self.config_path])
assert result.exit_code != 0
assert "[output_dir] not found in" in result.output
assert not Path(self.repo_name).exists()
def test_missing_include_example(self, cli_runner):
"""Check the error if config YAML does not contain include example."""
output_dir = "test_dir"
Path(output_dir).mkdir(parents=True)
_create_config_file(
self.config_path,
self.project_name,
self.repo_name,
output_dir,
include_example=None,
) # include_example missing
result = _invoke(cli_runner, ["-v", "new", "--config", self.config_path])
assert result.exit_code != 0
assert "It must be a boolean value" in result.output
assert not Path(self.repo_name).exists()
def test_default_config_up_to_date():
"""Validate the contents of the default config file."""
cookie_json_path = Path(TEMPLATE_PATH) / "cookiecutter.json"
cookie = json.loads(cookie_json_path.read_text("utf-8"))
cookie_keys = [
key for key in cookie if not key.startswith("_") and key != "kedro_version"
]
cookie_keys.append("output_dir")
default_config_keys = _get_default_config().keys()
assert set(cookie_keys) == set(default_config_keys)
|
the-stack_106_14683
|
from pyspark import SparkContext
import sys
import glob
import random
import json
import time
sc = SparkContext()
sc.setSystemProperty('spark.driver.memory', '4g')
sc.setSystemProperty('spark.executor.memory', '4g')
sc.setLogLevel("OFF")
input_path = sys.argv[1] # folder: test1/, 1-5
n_cluster = int(sys.argv[2]) # 10/10/5/8/15
out_file1 = sys.argv[3] # cluster_res1.json
out_file2 = sys.argv[4] # intermediate1.csv
start_time = time.time() # 300s
def getSquaredD(vector0, vector1):
return sum([(c0 - c1) ** 2 for c0, c1 in zip(vector0, vector1)])
# K-means ++ initialize
def getMinDistance(vector):
return min([getSquaredD(centroid, vector) for centroid in centroids])
def getCluster(vector):
compareList = [getSquaredD(vector, c) for c in centroids]
return compareList.index(min(compareList))
def getCenter(points, num_points):
matrix = [point[1] for point in points]
return [sum(c) / num_points for c in zip(*matrix)]
# [[index0, vector0], [i1,v1],...
def getSumandSQ(IVlist):
Sum = list()
SumSQ = list()
pointIndexes = list()
matrix = list()
for i, v in IVlist:
pointIndexes.append(i)
matrix.append(v)
for c in zip(*matrix):
Sum.append(sum(c))
SumSQ.append(sum(ci ** 2 for ci in c))
return [Sum, SumSQ, pointIndexes]
def getCentroids(N, Sum):
return [point / N for point in Sum]
def getVariance(N, Sum, SumSQ):
return [sq / N - (Sum[i] / N) ** 2 for i, sq in enumerate(SumSQ)]
def determineDSCS(vector):
indicator = True
for idx, cent in enumerate(DScentroids):
MD = sum([(pi - cent[i]) ** 2 / DSvariance[idx][i] for i, pi in enumerate(vector)])
if MD < a2d:
indicator = False
return [idx, vector]
break
if indicator:
for idx, cent in enumerate(CScentroids):
MD = sum([(pi - cent[i]) ** 2 / CSvariance[idx][i] for i, pi in enumerate(vector)])
if MD < a2d:
indicator = False
return [idx + lenDS, vector]
break
if indicator:
return [-1, vector]
def getVectorSum(vector0, vector1):
return [sum(c) for c in zip(vector0, vector1)]
def getMatrixSum(matrix):
return [sum(c) for c in zip(*matrix)]
# CSCS merge
def detectMerge(vector):
mergeList = list()
for i, cs in enumerate(CScentroids):
distance = getSquaredD(vector, cs)
if distance < a2d4:
mergeList.append([i + lenDS, distance])
if len(mergeList) == 0:
return []
else:
if len(mergeList) >= 2:
minD = mergeList[0][1]
index = mergeList[0][0]
for candidate in mergeList:
if candidate[1] < minD:
minD = candidate[1]
index = candidate[0]
return index
else:
return mergeList[0][0]
# DSCS merge
def detectMerge1(vector):
mergeList = list()
for i, ds in enumerate(DScentroids):
distance = getSquaredD(vector, ds)
if distance < a2d4:
mergeList.append([i, distance])
if len(mergeList) == 0:
return []
else:
if len(mergeList) >= 2:
minD = mergeList[0][1]
index = mergeList[0][0]
for candidate in mergeList:
if candidate[1] < minD:
minD = candidate[1]
index = candidate[0]
return index
else:
return mergeList[0][0]
intermediate = list()
for round_id, filename in enumerate(glob.glob(input_path + "*.txt")):
if round_id == 0:
# initialize
# x: point(x[0]: point index, x[1:]: vector)
# x[0]: index, x[1]: vector
dataRDD = sc.textFile(filename) \
.map(lambda x: [float(point) for point in x.strip("\n").split(',')]) \
.map(lambda x: [str(int(x[0])), x[1:]]) \
.cache()
num_sample = int(dataRDD.count() / 2)
random.seed(5)
sampleList = random.sample(dataRDD.collect(), num_sample)
random.seed(5)
centroids = [random.choice(sampleList)[1]]
# d: dimension
d = len(centroids[0])
a = 2 # hyperparameter: 2,3,4
a2d = a ** 2 * d # if md(Mahalanobis D)**2 < a2d
# 4a**2*d: btw centroids
a2d4 = 4 * a2d
kn = 2 * n_cluster # 2~5
sampleRDD = sc.parallelize(sampleList).cache()
# get initial kn centroids by using K-means++
for i in range(1, kn):
maxDistance = sampleRDD \
.map(lambda x: [x[1], getMinDistance(x[1])]) \
.sortBy(lambda x: x[1], False) \
.take(1)
centroids.append(maxDistance[0][0])
cnt = 0
diff = 100
while (diff > 20 and cnt < 40):
cnt += 1
# clusterRDD x: [0, [[1, [1,2,3]], [2, [4,5,6]]]]
clusterRDD = sampleRDD \
.map(lambda x: [getCluster(x[1]), x]) \
.groupByKey() \
.map(lambda x: [x[0], list(x[1])]) \
.cache()
newCenRDD = clusterRDD \
.map(lambda x: [x[0], getCenter(x[1], len(x[1]))]) \
.sortBy(lambda x: x[0]).cache()
diff = newCenRDD \
.map(lambda x: [1, getSquaredD(x[1], centroids[x[0]])]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
# new centroids
centroids = newCenRDD.map(lambda x: x[1]).collect()
# clusterRDD x: [0, [[1, [1,2,3]], [2, [4,5,6]]]]
# remove RS from sample
sampleRDD = clusterRDD.filter(lambda x: len(x[1]) != 1) \
.flatMap(lambda x: x[1]).cache()
random.seed(5)
centroids = [random.choice(sampleRDD.collect())[1]]
# get initial k centroids by using K-means++
for i in range(1, n_cluster):
maxDistance = sampleRDD \
.map(lambda x: [x[1], getMinDistance(x[1])]) \
.sortBy(lambda x: x[1], False) \
.take(1)
centroids.append(maxDistance[0][0])
cnt = 0
diff = 100
while (diff > 4 and cnt < 40):
cnt += 1
# clusterRDD x: [0, [[1, [1,2,3]], [2, [4,5,6]]]]
clusterRDD = sampleRDD \
.map(lambda x: [getCluster(x[1]), x]) \
.groupByKey() \
.map(lambda x: [x[0], list(x[1])]) \
.cache()
newCenRDD = clusterRDD \
.map(lambda x: [x[0], getCenter(x[1], len(x[1]))]) \
.sortBy(lambda x: x[0]).cache()
diff = newCenRDD \
.map(lambda x: [1, getSquaredD(x[1], centroids[x[0]])]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
# new centroids
centroids = newCenRDD.map(lambda x: x[1]).collect()
# DS
# getSumandSQ: [Sum, SumSQ, pointIndexes]
# [clusterNum, N, [Sum, SumSQ, pointIndexes]]
DSrdd = clusterRDD \
.map(lambda x: [x[0], len(x[1]), getSumandSQ(x[1])]) \
.sortBy(lambda x: x[0]).cache()
# [cln, [N, Sum, SumSQ]]
DS = DSrdd.map(lambda x: [x[0], [x[1], x[2][0], x[2][1]]]) \
.collect()
lenDS = DSrdd.count()
# [cln, pointIndexes]
DSList = DSrdd.map(lambda x: [x[0], x[2][2]]) \
.collect()
DScentroids = DSrdd.map(lambda x: getCentroids(x[1], x[2][0])).collect()
DSvariance = DSrdd.map(lambda x: getVariance(x[1], x[2][0], x[2][1])).collect()
num_DS = DSrdd \
.map(lambda x: [1, x[1]]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
DSpoints = DSrdd.flatMap(lambda x: x[2][2]) \
.collect()
# CS and RS: kn with remain points
dataRDD = dataRDD \
.filter(lambda x: x[0] not in DSpoints) \
.cache()
centroids = [random.choice(dataRDD.collect())[1]]
for i in range(1, kn):
maxDistance = dataRDD \
.map(lambda x: [x[1], getMinDistance(x[1])]) \
.sortBy(lambda x: x[1], False) \
.take(1)
centroids.append(maxDistance[0][0])
cnt = 0
diff = 100
while (diff > 4 and cnt < 40):
cnt += 1
# clusterRDD x: [0, [[1, [1,2,3]], [2, [4,5,6]]]]]
clusterRDD = dataRDD \
.map(lambda x: [getCluster(x[1]), x]) \
.groupByKey() \
.map(lambda x: [x[0], list(x[1])]) \
.cache()
newCenRDD = clusterRDD \
.map(lambda x: [x[0], getCenter(x[1], len(x[1]))]) \
.sortBy(lambda x: x[0]).cache()
diff = newCenRDD \
.map(lambda x: [1, getSquaredD(x[1], centroids[x[0]])]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
# new centroids
centroids = newCenRDD.map(lambda x: x[1]).collect()
RSrdd = clusterRDD.filter(lambda x: len(x[1]) == 1) \
.map(lambda x: [x[1][0][0], x[1][0][1]]).cache()
lenRS = RSrdd.count()
RS = RSrdd.collect()
# getSumandSQ: [Sum, SumSQ, pointIndexes]
# [N, [Sum, SumSQ, pointIndexes]]
CSrdd = clusterRDD.filter(lambda x: len(x[1]) != 1) \
.map(lambda x: [len(x[1]), getSumandSQ(x[1])]) \
.cache()
CScentroids = CSrdd.map(lambda x: getCentroids(x[0], x[1][0])).collect()
CSvariance = CSrdd.map(lambda x: getVariance(x[0], x[1][0], x[1][1])).collect()
num_CS = CSrdd \
.map(lambda x: [1, x[0]]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
lenCS = CSrdd.count()
# [N, Sum, SumSQ]
CS = CSrdd \
.map(lambda x: [x[0], x[1][0], x[1][1]]).collect()
CSList = CSrdd \
.map(lambda x: x[1][2]).collect()
# CS re-numbering
# CS [[10, [N, Sum, SumSQ]],
# CSList [[10, pointIndexes],
CS = [[idx + lenDS, cs] for idx, cs in enumerate(CS)]
CSList = [[idx + lenDS, cs] for idx, cs in enumerate(CSList)]
DSCS = DS + CS
DSCSList = DSList + CSList
intermediate.append([round_id + 1, lenDS, num_DS, lenCS, num_CS, lenRS])
del sampleList
# if md(Mahalanobis D**2) < a2d
# sum((pi-ci)**2/variance of i)
else:
# [index, [clusterNum, vector]]
dataRDD = sc.textFile(filename) \
.map(lambda x: [float(point) for point in x.strip("\n").split(',')]) \
.map(lambda x: [str(int(x[0])), determineDSCS(x[1:])]).cache()
addRS = dataRDD.filter(lambda x: x[1][0] == -1) \
.map(lambda x: [x[0], x[1][1]]).collect()
RS += addRS
lenRS = len(RS)
# [index, [clusterNum, vector]]
# -> [clusterNum, [index, vector]]
# -> [clusterNum, N, [Sum, SumSQ, pointIndexes]]
DSCSrdd = dataRDD.filter(lambda x: x[1][0] != -1) \
.map(lambda x: [x[1][0], [x[0], x[1][1]]]) \
.groupByKey() \
.map(lambda x: [x[0], len(x[1]), getSumandSQ(x[1])]) \
.cache()
# [[clusterNum, [N, Sum, SumSQ]]
addDSCS = DSCSrdd.map(lambda x: [x[0], [x[1], x[2][0], x[2][1]]]) \
.collect()
# update DSCS
for dscs in addDSCS:
clNum = dscs[0]
dscs0 = DSCS[clNum]
N = dscs[1][0] + dscs0[1][0]
Sum = getVectorSum(dscs[1][1], dscs0[1][1])
SumSQ = getVectorSum(dscs[1][2], dscs0[1][2])
DSCS[clNum] = [clNum, [N, Sum, SumSQ]]
addDSCSList = DSCSrdd.map(lambda x: [x[0], x[2][2]]) \
.collect()
# update DSCSList
for points in addDSCSList:
clNum = points[0]
dscs0 = DSCSList[clNum]
DSCSList[clNum][1] = points[1] + dscs0[1]
DS = DSCS[:n_cluster]
CS = DSCS[n_cluster:]
lenCS = len(CS)
DSrdd = sc.parallelize(DS).cache()
lenDS = DSrdd.count()
DScentroids = DSrdd.map(lambda x: getCentroids(x[1][0], x[1][1])).collect()
DSvariance = DSrdd.map(lambda x: getVariance(x[1][0], x[1][1], x[1][2])).collect()
num_DS = DSrdd \
.map(lambda x: [1, x[1][0]]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
if lenRS <= kn: # no clustering
CSrdd = sc.parallelize(CS).cache()
CScentroids = CSrdd.map(lambda x: getCentroids(x[1][0], x[1][1])).collect()
CSvariance = CSrdd.map(lambda x: getVariance(x[1][0], x[1][1], x[1][2])).collect()
num_CS = CSrdd \
.map(lambda x: [1, x[1][0]]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
intermediate.append([round_id + 1, lenDS, num_DS, lenCS, num_CS, lenRS])
# if len(RS) > kn, kn means for RS to generate new CS
# update RS
# merge old and new CS
else:
dataRDD = sc.parallelize(RS).cache()
random.seed(5)
centroids = [random.choice(dataRDD.collect())[1]]
for i in range(1, kn):
maxDistance = dataRDD \
.map(lambda x: [x[1], getMinDistance(x[1])]) \
.sortBy(lambda x: x[1], False) \
.take(1)
centroids.append(maxDistance[0][0])
cnt = 0
diff = 100
while (diff > 4 and cnt < 40):
cnt += 1
# clusterRDD x: [0, [[1, [1,2,3]], [2, [4,5,6]]]]]
clusterRDD = dataRDD \
.map(lambda x: [getCluster(x[1]), x]) \
.groupByKey() \
.map(lambda x: [x[0], list(x[1])]) \
.cache()
newCenRDD = clusterRDD \
.map(lambda x: [x[0], getCenter(x[1], len(x[1]))]) \
.sortBy(lambda x: x[0]).cache()
diff = newCenRDD \
.map(lambda x: [1, getSquaredD(x[1], centroids[x[0]])]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
# new centroids
centroids = newCenRDD.map(lambda x: x[1]).collect()
# clusterRDD x: [0, [[1, [1,2,3]], [2, [4,5,6]]]]
RS = clusterRDD.filter(lambda x: len(x[1]) == 1) \
.map(lambda x: [x[1][0][0], x[1][0][1]]).collect()
# [N, Sum, SumSQ, pointIndexes]
addCS = clusterRDD.filter(lambda x: len(x[1]) != 1) \
.map(lambda x: [len(x[1]), getSumandSQ(x[1])]) \
.map(lambda x: [x[0], x[1][0], x[1][1], x[1][2]]) \
.collect()
# renumbering
# [clusterNum, [N, Sum, SumSQ, pointIndexes]]
addCS = [[i, cs] for i, cs in enumerate(addCS)]
addCSrdd = sc.parallelize(addCS) \
.cache()
# find pair of old and new CS
# [clusterNum, [N, Sum, SumSQ, pointIndexes]
CSCSpair = addCSrdd \
.map(lambda x: [x[0], getCentroids(x[1][0], x[1][1])]) \
.map(lambda x: [x[0], detectMerge(x[1])]) \
.filter(lambda x: x[1] != []) \
.map(lambda x: [x[1], x[0]]) \
.groupByKey() \
.map(lambda x: [x[0], list(x[1])]) \
.collect()
if CSCSpair == []: # no merge
# [N, Sum, SumSQ]
addCS0 = addCSrdd \
.map(lambda x: [x[0] + lenDS + lenCS, [x[1][0], x[1][1], x[1][2]]]) \
.collect()
# point indexes
addCSList0 = addCSrdd \
.map(lambda x: [x[0] + lenDS + lenCS, x[1][3]]).collect()
else:
# merge CS and CS
merger = list()
for pair in CSCSpair:
p1 = pair[1]
N = 0
sumList = list()
sumSQlist = list()
indexes = list()
for i in p1:
N += addCS[i][1][0]
sumList.append(addCS[i][1][1])
sumSQlist.append(addCS[i][1][2])
indexes += addCS[i][1][3]
merger.append([N, getMatrixSum(sumList), getMatrixSum(sumSQlist), indexes])
# update DSCS, DSCSList by merging newCS
for i, pair in enumerate(CSCSpair):
clNum = pair[0]
dscs = DSCS[clNum]
N = dscs[1][0] + merger[i][0]
Sum = getVectorSum(dscs[1][1], merger[i][1])
SumSQ = getVectorSum(dscs[1][2], merger[i][2])
DSCS[clNum] = [clNum, [N, Sum, SumSQ]]
DSCSList[clNum][1] += merger[i][3]
mergeNum = sc.parallelize(CSCSpair) \
.map(lambda x: x[1]) \
.flatMap(lambda x: x).collect()
addCS = addCSrdd \
.filter(lambda x: x[0] not in mergeNum) \
.map(lambda x: x[1]).collect()
# renumbering
# [clusterNum, [N, Sum, SumSQ, pointIndexes]]
addCS = [[i + lenDS + lenCS, cs] for i, cs in enumerate(addCS)]
addCSrdd = sc.parallelize(addCS) \
.cache()
# [N, Sum, SumSQ]
addCS0 = addCSrdd \
.map(lambda x: [x[0], [x[1][0], x[1][1], x[1][2]]]) \
.collect()
# point indexes
addCSList0 = addCSrdd \
.map(lambda x: [x[0], x[1][3]]).collect()
DSCS += addCS0
DSCSList += addCSList0
CS = DSCS[n_cluster:]
CSrdd = sc.parallelize(CS).cache()
CScentroids = CSrdd.map(lambda x: getCentroids(x[1][0], x[1][1])).collect()
CSvariance = CSrdd.map(lambda x: getVariance(x[1][0], x[1][1], x[1][2])).collect()
lenCS = CSrdd.count()
num_CS = CSrdd \
.map(lambda x: [1, x[1][0]]) \
.reduceByKey(lambda a, b: a + b) \
.map(lambda x: x[1]).take(1)[0]
intermediate.append([round_id + 1, lenDS, num_DS, lenCS, num_CS, len(RS)])
# merge DS CS
DSCSpair = sc.parallelize(DSCS[n_cluster:]) \
.map(lambda x: [x[0], getCentroids(x[1][0], x[1][1])]) \
.map(lambda x: [x[0], detectMerge1(x[1])]) \
.filter(lambda x: x[1] != []) \
.map(lambda x: [x[1], x[0]]) \
.groupByKey() \
.map(lambda x: [x[0], list(x[1])]) \
.collect()
for pair in DSCSpair:
ds = pair[0]
cslist = pair[1]
for cs in cslist:
DSCSList[ds][1] += DSCSList[cs][1]
DSList = DSCSList[:n_cluster]
num_DS = sc.parallelize(DSList) \
.flatMap(lambda x: x[1]) \
.count()
mergeNum = sc.parallelize(DSCSpair) \
.flatMap(lambda x: x[1]).collect()
newCSrdd = sc.parallelize(DSCSList[n_cluster:]) \
.filter(lambda x: x[0] not in mergeNum) \
.cache()
lenCS = newCSrdd.count()
csrdd = newCSrdd.flatMap(lambda x: x[1]).cache()
num_CS = csrdd.count()
CSList = csrdd.collect()
# revise the last intermediate
intermediate[-1] = [round_id + 1, lenDS, num_DS, lenCS, num_CS, len(RS)]
f1 = open(out_file1, mode="w+")
result = dict()
for clusterList in DSList:
clnum = clusterList[0]
points = clusterList[1]
for point in points:
result.update({point: clnum})
for point in CSList:
result.update({point: -1})
for point in RS:
result.update({point[0]: -1})
json.dump(result, f1)
f1.close()
f2 = open(out_file2, mode="w+")
f2.write(
"round_id,nof_cluster_discard,nof_point_discard,nof_cluster_compression,nof_point_compression,nof_point_retained\n")
for item in intermediate:
f2.write(str(item).strip("[]"))
f2.write("\n")
f2.close()
print("Duration: %d" % int(time.time() - start_time))
|
the-stack_106_14685
|
import sys
import cv2
import filters
filters_list = [
"black_white",
"invert",
"blur",
"sketch",
"sketch_with_edge_detection",
"sharpen",
"sepia",
"gaussian_blur",
"emboss",
"image_2d_convolution",
"median_filtering",
"vignette",
"warm",
"cold",
"cartoon",
"moon",
]
if __name__ == "__main__":
if len(sys.argv) not in [3, 4]:
print(
"Usage: python test.py <FILTER> <IMAGE SRC> <IMAGE DESTINATION(OPTIONAL)>"
)
sys.exit(0)
if len(sys.argv) == 3:
_, filter_name, src = sys.argv
dest = None
else:
_, filter_name, src, dest = sys.argv
filter_name = filter_name.lower()
if filter_name not in filters_list:
print("Invalid filter! Possible filters are" + "\n".join(filters_list))
sys.exit(1)
image = cv2.imread(src)
edited_image = getattr(filters, filter_name)(image)
if not dest:
cv2.imwrite("edited.jpg", edited_image)
print("Saved in the current directory as edited.jpg")
else:
cv2.imwrite(dest + "edited.jpg", edited_image)
print(f"Saved at {dest} as edited.jpg")
|
the-stack_106_14687
|
from myhdl import Signal, intbv
from rhea.system import Clock
_clkmgmt_cnt = 0
class ClockManagement(object):
def __init__(self, clockin, reset=None, output_frequencies=None,
vendor='none'):
""" An interface to various vendor clock generators.
Most FPGA have a DLL/PLL/MMCM or some kind of primitive to generate
clocks. This interface is used to generically create clocks in a
design. The device specific primitives will be generated based on
the attributes of this interface (object).
This interface is used with the vendor.*.device_clock_mgmt modules.
Ports:
clockin: the input clock, thre frequency attribute is used.
reset: optional
Parameters:
output_frequencies: A list of desired output frequencies.
Example usage:
clockext = Clock(0, frequency=50e6)
resetext = Reset(0, active=0, async=True)
clkmgmt = ClockManagement(clockext, resetext,
output_frequencies=(150e6, 200e6))
clkmgmt.model = brd.fpga
clkmgmt.vendor = vendor.altera
"""
global _clkmgmt_cnt
self.clkmgmt_num = _clkmgmt_cnt
_clkmgmt_cnt += 1
number_of_outputs = len(output_frequencies)
self.vendor = vendor
self.clockin = clockin
self.clockin_out = Signal(bool(0))
self.input_frequency = int(clockin.frequency)
self.reset = reset
self.enable = Signal(bool(0))
self.output_frequencies = tuple(map(int, output_frequencies))
self.clocks = [Clock(0, f) for f in output_frequencies]
self.clocksout = Signal(intbv(0)[number_of_outputs:])
for ii, clk in enumerate(self.clocks):
vars(self)['clock{:02d}'.format(ii)] = clk
self.locked = Signal(bool(0))
|
the-stack_106_14690
|
""" Models (mostly base classes) for the various kinds of renderer
types that Bokeh supports.
"""
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Int, String, Enum, Instance, List, Dict, Tuple, Include
from ..mixins import LineProps, TextProps
from ..enums import Units, Orientation, RenderLevel
from ..validation.errors import BAD_COLUMN_NAME, MISSING_GLYPH, NO_SOURCE_FOR_GLYPH
from .. import validation
from .sources import DataSource
from .glyphs import Glyph
class Renderer(PlotObject):
""" A base class for renderer types. ``Renderer`` is not
generally useful to instantiate on its own.
"""
class GlyphRenderer(Renderer):
"""
"""
@validation.error(MISSING_GLYPH)
def _check_missing_glyph(self):
if not self.glyph: return str(self)
@validation.error(NO_SOURCE_FOR_GLYPH)
def _check_no_source_for_glyph(self):
if not self.data_source: return str(self)
@validation.error(BAD_COLUMN_NAME)
def _check_bad_column_name(self):
if not self.glyph: return
if not self.data_source: return
missing = set()
for name, item in self.glyph.vm_serialize().items():
if not isinstance(item, dict): continue
if 'field' in item and item['field'] not in self.data_source.column_names:
missing.add(item['field'])
if missing:
return "%s [renderer: %s]" % (", ".join(sorted(missing)), self)
data_source = Instance(DataSource, help="""
Local data source to use when rendering glyphs on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering glyphs on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering glyphs on the plot. If unset, use the
default -range.
""")
# TODO: (bev) is this actually used?
units = Enum(Units)
glyph = Instance(Glyph, help="""
The glyph to render, in conjunction with the supplied data source
and ranges.
""")
selection_glyph = Instance(Glyph, help="""
An optional glyph used for selected points.
""")
nonselection_glyph = Instance(Glyph, help="""
An optional glyph used for explicitly non-selected points
(i.e., non-selected when there are other points that are selected,
but not when no points at all are selected.)
""")
level = Enum(RenderLevel, default="glyph", help="""
Specifies the level in which to render the glyph.
""")
# TODO: (bev) This should really go in a separate module
class Legend(Renderer):
""" Render informational legends for a plot.
"""
plot = Instance(".models.plots.Plot", help="""
The Plot to which this Legend is attached.
""")
orientation = Enum(Orientation, help="""
The location where the legend should draw itself.
""")
border_props = Include(LineProps, help="""
The %s for the legend border outline.
""")
label_props = Include(TextProps, help="""
The %s for the legend labels.
""")
label_standoff = Int(15, help="""
The distance (in pixels) to separate the label from its associated glyph.
""")
label_height = Int(20, help="""
The height (in pixels) of the area that legend labels should occupy.
""")
label_width = Int(50, help="""
The width (in pixels) of the area that legend labels should occupy.
""")
glyph_height = Int(20, help="""
The height (in pixels) that the rendered legend glyph should occupy.
""")
glyph_width = Int(20, help="""
The width (in pixels) that the rendered legend glyph should occupy.
""")
legend_padding = Int(10, help="""
Amount of padding around the legend.
""")
legend_spacing = Int(3, help="""
Amount of spacing between legend entries.
""")
legends = List(Tuple(String, List(Instance(GlyphRenderer))), help="""
A list of tuples that maps text labels to the legend to corresponding
renderers that should draw sample representations for those labels.
.. note::
The ``legends`` attribute may also be set from a dict or OrderedDict,
but note that if a dict is used, the order of the legend entries is
unspecified.
""").accepts(
Dict(String, List(Instance(GlyphRenderer))), lambda d: list(d.items())
)
class GuideRenderer(Renderer):
""" A base class for all guide renderer types. ``GuideRenderer`` is
not generally useful to instantiate on its own.
"""
plot = Instance(".models.plots.Plot", help="""
The plot to which this guide renderer is attached.
""")
def __init__(self, **kwargs):
super(GuideRenderer, self).__init__(**kwargs)
if self.plot is not None:
if self not in self.plot.renderers:
self.plot.renderers.append(self)
|
the-stack_106_14691
|
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Dict,
Hashable,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import pandas as pd
from . import dtypes, pdcompat
from .alignment import deep_align
from .duck_array_ops import lazy_array_equiv
from .indexes import Index, PandasIndex
from .utils import Frozen, compat_dict_union, dict_equiv, equivalent
from .variable import Variable, as_variable, assert_unique_multiindex_level_names
if TYPE_CHECKING:
from .coordinates import Coordinates
from .dataarray import DataArray
from .dataset import Dataset
DimsLike = Union[Hashable, Sequence[Hashable]]
ArrayLike = Any
VariableLike = Union[
ArrayLike,
Tuple[DimsLike, ArrayLike],
Tuple[DimsLike, ArrayLike, Mapping],
Tuple[DimsLike, ArrayLike, Mapping, Mapping],
]
XarrayValue = Union[DataArray, Variable, VariableLike]
DatasetLike = Union[Dataset, Mapping[Hashable, XarrayValue]]
CoercibleValue = Union[XarrayValue, pd.Series, pd.DataFrame]
CoercibleMapping = Union[Dataset, Mapping[Hashable, CoercibleValue]]
PANDAS_TYPES = (pd.Series, pd.DataFrame, pdcompat.Panel)
_VALID_COMPAT = Frozen(
{
"identical": 0,
"equals": 1,
"broadcast_equals": 2,
"minimal": 3,
"no_conflicts": 4,
"override": 5,
}
)
def broadcast_dimension_size(variables: List[Variable]) -> Dict[Hashable, int]:
"""Extract dimension sizes from a dictionary of variables.
Raises ValueError if any dimensions have different sizes.
"""
dims: Dict[Hashable, int] = {}
for var in variables:
for dim, size in zip(var.dims, var.shape):
if dim in dims and size != dims[dim]:
raise ValueError(f"index {dim!r} not aligned")
dims[dim] = size
return dims
class MergeError(ValueError):
"""Error class for merge failures due to incompatible arguments."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def unique_variable(
name: Hashable,
variables: List[Variable],
compat: str = "broadcast_equals",
equals: bool = None,
) -> Variable:
"""Return the unique variable from a list of variables or raise MergeError.
Parameters
----------
name : hashable
Name for this variable.
variables : list of Variable
List of Variable objects, all of which go by the same name in different
inputs.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
Type of equality check to use.
equals : None or bool, optional
corresponding to result of compat test
Returns
-------
Variable to use in the result.
Raises
------
MergeError: if any of the variables are not equal.
"""
out = variables[0]
if len(variables) == 1 or compat == "override":
return out
combine_method = None
if compat == "minimal":
compat = "broadcast_equals"
if compat == "broadcast_equals":
dim_lengths = broadcast_dimension_size(variables)
out = out.set_dims(dim_lengths)
if compat == "no_conflicts":
combine_method = "fillna"
if equals is None:
# first check without comparing values i.e. no computes
for var in variables[1:]:
equals = getattr(out, compat)(var, equiv=lazy_array_equiv)
if equals is not True:
break
if equals is None:
# now compare values with minimum number of computes
out = out.compute()
for var in variables[1:]:
equals = getattr(out, compat)(var)
if not equals:
break
if not equals:
raise MergeError(
f"conflicting values for variable {name!r} on objects to be combined. "
"You can skip this check by specifying compat='override'."
)
if combine_method:
for var in variables[1:]:
out = getattr(out, combine_method)(var)
return out
def _assert_compat_valid(compat):
if compat not in _VALID_COMPAT:
raise ValueError(
"compat={!r} invalid: must be {}".format(compat, set(_VALID_COMPAT))
)
MergeElement = Tuple[Variable, Optional[Index]]
def merge_collected(
grouped: Dict[Hashable, List[MergeElement]],
prioritized: Mapping[Hashable, MergeElement] = None,
compat: str = "minimal",
combine_attrs="override",
) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, Index]]:
"""Merge dicts of variables, while resolving conflicts appropriately.
Parameters
----------
grouped : mapping
prioritized : mapping
compat : str
Type of equality check to use when checking for conflicts.
Returns
-------
Dict with keys taken by the union of keys on list_of_mappings,
and Variable values corresponding to those that should be found on the
merged result.
"""
if prioritized is None:
prioritized = {}
_assert_compat_valid(compat)
merged_vars: Dict[Hashable, Variable] = {}
merged_indexes: Dict[Hashable, Index] = {}
for name, elements_list in grouped.items():
if name in prioritized:
variable, index = prioritized[name]
merged_vars[name] = variable
if index is not None:
merged_indexes[name] = index
else:
indexed_elements = [
(variable, index)
for variable, index in elements_list
if index is not None
]
if indexed_elements:
# TODO(shoyer): consider adjusting this logic. Are we really
# OK throwing away variable without an index in favor of
# indexed variables, without even checking if values match?
variable, index = indexed_elements[0]
for _, other_index in indexed_elements[1:]:
if not index.equals(other_index):
raise MergeError(
f"conflicting values for index {name!r} on objects to be "
f"combined:\nfirst value: {index!r}\nsecond value: {other_index!r}"
)
if compat == "identical":
for other_variable, _ in indexed_elements[1:]:
if not dict_equiv(variable.attrs, other_variable.attrs):
raise MergeError(
"conflicting attribute values on combined "
f"variable {name!r}:\nfirst value: {variable.attrs!r}\nsecond value: {other_variable.attrs!r}"
)
merged_vars[name] = variable
merged_vars[name].attrs = merge_attrs(
[var.attrs for var, _ in indexed_elements],
combine_attrs=combine_attrs,
)
merged_indexes[name] = index
else:
variables = [variable for variable, _ in elements_list]
try:
merged_vars[name] = unique_variable(name, variables, compat)
except MergeError:
if compat != "minimal":
# we need more than "minimal" compatibility (for which
# we drop conflicting coordinates)
raise
if name in merged_vars:
merged_vars[name].attrs = merge_attrs(
[var.attrs for var in variables], combine_attrs=combine_attrs
)
return merged_vars, merged_indexes
def collect_variables_and_indexes(
list_of_mappings: "List[DatasetLike]",
) -> Dict[Hashable, List[MergeElement]]:
"""Collect variables and indexes from list of mappings of xarray objects.
Mappings must either be Dataset objects, or have values of one of the
following types:
- an xarray.Variable
- a tuple `(dims, data[, attrs[, encoding]])` that can be converted in
an xarray.Variable
- or an xarray.DataArray
"""
from .dataarray import DataArray
from .dataset import Dataset
grouped: Dict[Hashable, List[Tuple[Variable, Optional[Index]]]] = {}
def append(name, variable, index):
values = grouped.setdefault(name, [])
values.append((variable, index))
def append_all(variables, indexes):
for name, variable in variables.items():
append(name, variable, indexes.get(name))
for mapping in list_of_mappings:
if isinstance(mapping, Dataset):
append_all(mapping.variables, mapping.xindexes)
continue
for name, variable in mapping.items():
if isinstance(variable, DataArray):
coords = variable._coords.copy() # use private API for speed
indexes = dict(variable.xindexes)
# explicitly overwritten variables should take precedence
coords.pop(name, None)
indexes.pop(name, None)
append_all(coords, indexes)
variable = as_variable(variable, name=name)
if variable.dims == (name,):
variable = variable.to_index_variable()
index = variable._to_xindex()
else:
index = None
append(name, variable, index)
return grouped
def collect_from_coordinates(
list_of_coords: "List[Coordinates]",
) -> Dict[Hashable, List[MergeElement]]:
"""Collect variables and indexes to be merged from Coordinate objects."""
grouped: Dict[Hashable, List[Tuple[Variable, Optional[Index]]]] = {}
for coords in list_of_coords:
variables = coords.variables
indexes = coords.xindexes
for name, variable in variables.items():
value = grouped.setdefault(name, [])
value.append((variable, indexes.get(name)))
return grouped
def merge_coordinates_without_align(
objects: "List[Coordinates]",
prioritized: Mapping[Hashable, MergeElement] = None,
exclude_dims: AbstractSet = frozenset(),
combine_attrs: str = "override",
) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, Index]]:
"""Merge variables/indexes from coordinates without automatic alignments.
This function is used for merging coordinate from pre-existing xarray
objects.
"""
collected = collect_from_coordinates(objects)
if exclude_dims:
filtered: Dict[Hashable, List[MergeElement]] = {}
for name, elements in collected.items():
new_elements = [
(variable, index)
for variable, index in elements
if exclude_dims.isdisjoint(variable.dims)
]
if new_elements:
filtered[name] = new_elements
else:
filtered = collected
return merge_collected(filtered, prioritized, combine_attrs=combine_attrs)
def determine_coords(
list_of_mappings: Iterable["DatasetLike"],
) -> Tuple[Set[Hashable], Set[Hashable]]:
"""Given a list of dicts with xarray object values, identify coordinates.
Parameters
----------
list_of_mappings : list of dict or list of Dataset
Of the same form as the arguments to expand_variable_dicts.
Returns
-------
coord_names : set of variable names
noncoord_names : set of variable names
All variable found in the input should appear in either the set of
coordinate or non-coordinate names.
"""
from .dataarray import DataArray
from .dataset import Dataset
coord_names: Set[Hashable] = set()
noncoord_names: Set[Hashable] = set()
for mapping in list_of_mappings:
if isinstance(mapping, Dataset):
coord_names.update(mapping.coords)
noncoord_names.update(mapping.data_vars)
else:
for name, var in mapping.items():
if isinstance(var, DataArray):
coords = set(var._coords) # use private API for speed
# explicitly overwritten variables should take precedence
coords.discard(name)
coord_names.update(coords)
return coord_names, noncoord_names
def coerce_pandas_values(objects: Iterable["CoercibleMapping"]) -> List["DatasetLike"]:
"""Convert pandas values found in a list of labeled objects.
Parameters
----------
objects : list of Dataset or mapping
The mappings may contain any sort of objects coercible to
xarray.Variables as keys, including pandas objects.
Returns
-------
List of Dataset or dictionary objects. Any inputs or values in the inputs
that were pandas objects have been converted into native xarray objects.
"""
from .dataarray import DataArray
from .dataset import Dataset
out = []
for obj in objects:
if isinstance(obj, Dataset):
variables: "DatasetLike" = obj
else:
variables = {}
if isinstance(obj, PANDAS_TYPES):
obj = dict(obj.iteritems())
for k, v in obj.items():
if isinstance(v, PANDAS_TYPES):
v = DataArray(v)
variables[k] = v
out.append(variables)
return out
def _get_priority_vars_and_indexes(
objects: List["DatasetLike"], priority_arg: Optional[int], compat: str = "equals"
) -> Dict[Hashable, MergeElement]:
"""Extract the priority variable from a list of mappings.
We need this method because in some cases the priority argument itself
might have conflicting values (e.g., if it is a dict with two DataArray
values with conflicting coordinate values).
Parameters
----------
objects : list of dict-like of Variable
Dictionaries in which to find the priority variables.
priority_arg : int or None
Integer object whose variable should take priority.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts"}, optional
Compatibility checks to use when merging variables.
Returns
-------
A dictionary of variables and associated indexes (if any) to prioritize.
"""
if priority_arg is None:
return {}
collected = collect_variables_and_indexes([objects[priority_arg]])
variables, indexes = merge_collected(collected, compat=compat)
grouped: Dict[Hashable, MergeElement] = {}
for name, variable in variables.items():
grouped[name] = (variable, indexes.get(name))
return grouped
def merge_coords(
objects: Iterable["CoercibleMapping"],
compat: str = "minimal",
join: str = "outer",
priority_arg: Optional[int] = None,
indexes: Optional[Mapping[Hashable, Index]] = None,
fill_value: object = dtypes.NA,
) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, Index]]:
"""Merge coordinate variables.
See merge_core below for argument descriptions. This works similarly to
merge_core, except everything we don't worry about whether variables are
coordinates or not.
"""
_assert_compat_valid(compat)
coerced = coerce_pandas_values(objects)
aligned = deep_align(
coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value
)
collected = collect_variables_and_indexes(aligned)
prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)
variables, out_indexes = merge_collected(collected, prioritized, compat=compat)
assert_unique_multiindex_level_names(variables)
return variables, out_indexes
def merge_data_and_coords(data, coords, compat="broadcast_equals", join="outer"):
"""Used in Dataset.__init__."""
objects = [data, coords]
explicit_coords = coords.keys()
indexes = dict(_extract_indexes_from_coords(coords))
return merge_core(
objects, compat, join, explicit_coords=explicit_coords, indexes=indexes
)
def _extract_indexes_from_coords(coords):
"""Yields the name & index of valid indexes from a mapping of coords"""
for name, variable in coords.items():
variable = as_variable(variable, name=name)
if variable.dims == (name,):
yield name, variable._to_xindex()
def assert_valid_explicit_coords(variables, dims, explicit_coords):
"""Validate explicit coordinate names/dims.
Raise a MergeError if an explicit coord shares a name with a dimension
but is comprised of arbitrary dimensions.
"""
for coord_name in explicit_coords:
if coord_name in dims and variables[coord_name].dims != (coord_name,):
raise MergeError(
f"coordinate {coord_name} shares a name with a dataset dimension, but is "
"not a 1D variable along that dimension. This is disallowed "
"by the xarray data model."
)
def merge_attrs(variable_attrs, combine_attrs):
"""Combine attributes from different variables according to combine_attrs"""
if not variable_attrs:
# no attributes to merge
return None
if combine_attrs == "drop":
return {}
elif combine_attrs == "override":
return dict(variable_attrs[0])
elif combine_attrs == "no_conflicts":
result = dict(variable_attrs[0])
for attrs in variable_attrs[1:]:
try:
result = compat_dict_union(result, attrs)
except ValueError as e:
raise MergeError(
"combine_attrs='no_conflicts', but some values are not "
f"the same. Merging {str(result)} with {str(attrs)}"
) from e
return result
elif combine_attrs == "drop_conflicts":
result = {}
dropped_keys = set()
for attrs in variable_attrs:
result.update(
{
key: value
for key, value in attrs.items()
if key not in result and key not in dropped_keys
}
)
result = {
key: value
for key, value in result.items()
if key not in attrs or equivalent(attrs[key], value)
}
dropped_keys |= {key for key in attrs if key not in result}
return result
elif combine_attrs == "identical":
result = dict(variable_attrs[0])
for attrs in variable_attrs[1:]:
if not dict_equiv(result, attrs):
raise MergeError(
f"combine_attrs='identical', but attrs differ. First is {str(result)} "
f", other is {str(attrs)}."
)
return result
else:
raise ValueError(f"Unrecognised value for combine_attrs={combine_attrs}")
class _MergeResult(NamedTuple):
variables: Dict[Hashable, Variable]
coord_names: Set[Hashable]
dims: Dict[Hashable, int]
indexes: Dict[Hashable, pd.Index]
attrs: Dict[Hashable, Any]
def merge_core(
objects: Iterable["CoercibleMapping"],
compat: str = "broadcast_equals",
join: str = "outer",
combine_attrs: Optional[str] = "override",
priority_arg: Optional[int] = None,
explicit_coords: Optional[Sequence] = None,
indexes: Optional[Mapping[Hashable, Index]] = None,
fill_value: object = dtypes.NA,
) -> _MergeResult:
"""Core logic for merging labeled objects.
This is not public API.
Parameters
----------
objects : list of mapping
All values must be convertable to labeled arrays.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
Compatibility checks to use when merging variables.
join : {"outer", "inner", "left", "right"}, optional
How to combine objects with different indexes.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"}, optional
How to combine attributes of objects
priority_arg : int, optional
Optional argument in `objects` that takes precedence over the others.
explicit_coords : set, optional
An explicit list of variables from `objects` that are coordinates.
indexes : dict, optional
Dictionary with values given by pandas.Index objects.
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
variables : dict
Dictionary of Variable objects.
coord_names : set
Set of coordinate names.
dims : dict
Dictionary mapping from dimension names to sizes.
attrs : dict
Dictionary of attributes
Raises
------
MergeError if the merge cannot be done successfully.
"""
from .dataarray import DataArray
from .dataset import Dataset, calculate_dimensions
_assert_compat_valid(compat)
coerced = coerce_pandas_values(objects)
aligned = deep_align(
coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value
)
collected = collect_variables_and_indexes(aligned)
prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)
variables, out_indexes = merge_collected(
collected, prioritized, compat=compat, combine_attrs=combine_attrs
)
assert_unique_multiindex_level_names(variables)
dims = calculate_dimensions(variables)
coord_names, noncoord_names = determine_coords(coerced)
if explicit_coords is not None:
assert_valid_explicit_coords(variables, dims, explicit_coords)
coord_names.update(explicit_coords)
for dim, size in dims.items():
if dim in variables:
coord_names.add(dim)
ambiguous_coords = coord_names.intersection(noncoord_names)
if ambiguous_coords:
raise MergeError(
"unable to determine if these variables should be "
f"coordinates or not in the merged result: {ambiguous_coords}"
)
attrs = merge_attrs(
[var.attrs for var in coerced if isinstance(var, (Dataset, DataArray))],
combine_attrs,
)
return _MergeResult(variables, coord_names, dims, out_indexes, attrs)
def merge(
objects: Iterable[Union["DataArray", "CoercibleMapping"]],
compat: str = "no_conflicts",
join: str = "outer",
fill_value: object = dtypes.NA,
combine_attrs: str = "override",
) -> "Dataset":
"""Merge any number of xarray objects into a single Dataset as variables.
Parameters
----------
objects : iterable of Dataset or iterable of DataArray or iterable of dict-like
Merge together all variables from these objects. If any of them are
DataArray objects, they must have a name.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects.
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"}, default: "override"
String indicating how to combine attrs of the objects being merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
Dataset
Dataset with combined variables from each object.
Examples
--------
>>> x = xr.DataArray(
... [[1.0, 2.0], [3.0, 5.0]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
... name="var1",
... )
>>> y = xr.DataArray(
... [[5.0, 6.0], [7.0, 8.0]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]},
... name="var2",
... )
>>> z = xr.DataArray(
... [[0.0, 3.0], [4.0, 9.0]],
... dims=("time", "lon"),
... coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]},
... name="var3",
... )
>>> x
<xarray.DataArray 'var1' (lat: 2, lon: 2)>
array([[1., 2.],
[3., 5.]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> y
<xarray.DataArray 'var2' (lat: 2, lon: 2)>
array([[5., 6.],
[7., 8.]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 150.0
>>> z
<xarray.DataArray 'var3' (time: 2, lon: 2)>
array([[0., 3.],
[4., 9.]])
Coordinates:
* time (time) float64 30.0 60.0
* lon (lon) float64 100.0 150.0
>>> xr.merge([x, y, z])
<xarray.Dataset>
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0 150.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="identical")
<xarray.Dataset>
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0 150.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="equals")
<xarray.Dataset>
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0 150.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="equals", fill_value=-999.0)
<xarray.Dataset>
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0 150.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0
var2 (lat, lon) float64 5.0 -999.0 6.0 -999.0 ... -999.0 7.0 -999.0 8.0
var3 (time, lon) float64 0.0 -999.0 3.0 4.0 -999.0 9.0
>>> xr.merge([x, y, z], join="override")
<xarray.Dataset>
Dimensions: (lat: 2, lon: 2, time: 2)
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0 2.0 3.0 5.0
var2 (lat, lon) float64 5.0 6.0 7.0 8.0
var3 (time, lon) float64 0.0 3.0 4.0 9.0
>>> xr.merge([x, y, z], join="inner")
<xarray.Dataset>
Dimensions: (lat: 1, lon: 1, time: 2)
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0
var2 (lat, lon) float64 5.0
var3 (time, lon) float64 0.0 4.0
>>> xr.merge([x, y, z], compat="identical", join="inner")
<xarray.Dataset>
Dimensions: (lat: 1, lon: 1, time: 2)
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0
var2 (lat, lon) float64 5.0
var3 (time, lon) float64 0.0 4.0
>>> xr.merge([x, y, z], compat="broadcast_equals", join="outer")
<xarray.Dataset>
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0 150.0
* time (time) float64 30.0 60.0
Data variables:
var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], join="exact")
Traceback (most recent call last):
...
ValueError: indexes along dimension 'lat' are not equal
Raises
------
xarray.MergeError
If any variables with the same name have conflicting values.
See also
--------
concat
"""
from .dataarray import DataArray
from .dataset import Dataset
dict_like_objects = []
for obj in objects:
if not isinstance(obj, (DataArray, Dataset, dict)):
raise TypeError(
"objects must be an iterable containing only "
"Dataset(s), DataArray(s), and dictionaries."
)
obj = obj.to_dataset(promote_attrs=True) if isinstance(obj, DataArray) else obj
dict_like_objects.append(obj)
merge_result = merge_core(
dict_like_objects,
compat,
join,
combine_attrs=combine_attrs,
fill_value=fill_value,
)
return Dataset._construct_direct(**merge_result._asdict())
def dataset_merge_method(
dataset: "Dataset",
other: "CoercibleMapping",
overwrite_vars: Union[Hashable, Iterable[Hashable]],
compat: str,
join: str,
fill_value: Any,
combine_attrs: str,
) -> _MergeResult:
"""Guts of the Dataset.merge method."""
# we are locked into supporting overwrite_vars for the Dataset.merge
# method due for backwards compatibility
# TODO: consider deprecating it?
if isinstance(overwrite_vars, Iterable) and not isinstance(overwrite_vars, str):
overwrite_vars = set(overwrite_vars)
else:
overwrite_vars = {overwrite_vars}
if not overwrite_vars:
objs = [dataset, other]
priority_arg = None
elif overwrite_vars == set(other):
objs = [dataset, other]
priority_arg = 1
else:
other_overwrite: Dict[Hashable, CoercibleValue] = {}
other_no_overwrite: Dict[Hashable, CoercibleValue] = {}
for k, v in other.items():
if k in overwrite_vars:
other_overwrite[k] = v
else:
other_no_overwrite[k] = v
objs = [dataset, other_no_overwrite, other_overwrite]
priority_arg = 2
return merge_core(
objs,
compat,
join,
priority_arg=priority_arg,
fill_value=fill_value,
combine_attrs=combine_attrs,
)
def dataset_update_method(
dataset: "Dataset", other: "CoercibleMapping"
) -> _MergeResult:
"""Guts of the Dataset.update method.
This drops a duplicated coordinates from `other` if `other` is not an
`xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068,
GH2180).
"""
from .dataarray import DataArray
from .dataset import Dataset
if not isinstance(other, Dataset):
other = dict(other)
for key, value in other.items():
if isinstance(value, DataArray):
# drop conflicting coordinates
coord_names = [
c
for c in value.coords
if c not in value.dims and c in dataset.coords
]
if coord_names:
other[key] = value.drop_vars(coord_names)
# use ds.coords and not ds.indexes, else str coords are cast to object
# TODO: benbovy - flexible indexes: fix this (it only works with pandas indexes)
indexes = {key: PandasIndex(dataset.coords[key]) for key in dataset.xindexes.keys()}
return merge_core(
[dataset, other],
priority_arg=1,
indexes=indexes, # type: ignore
combine_attrs="override",
)
|
the-stack_106_14693
|
from datetime import timedelta
from azure.servicebus._common.utils import utc_now
from azure.servicebus import ServiceBusReceivedMessage
class MockReceiver:
def __init__(self):
self._running = True
def renew_message_lock(self, message):
if message._exception_on_renew_lock:
raise Exception("Generated exception via MockReceivedMessage exception_on_renew_lock")
if not message._prevent_renew_lock:
message.locked_until_utc = message.locked_until_utc + timedelta(seconds=message._lock_duration)
class MockReceivedMessage(ServiceBusReceivedMessage):
def __init__(self, prevent_renew_lock=False, exception_on_renew_lock=False):
self._lock_duration = 2
self._received_timestamp_utc = utc_now()
self.locked_until_utc = self._received_timestamp_utc + timedelta(seconds=self._lock_duration)
self._settled = False
self._receiver = MockReceiver()
self._prevent_renew_lock = prevent_renew_lock
self._exception_on_renew_lock = exception_on_renew_lock
@property
def _lock_expired(self):
if self.locked_until_utc and self.locked_until_utc <= utc_now():
return True
return False
@property
def locked_until_utc(self):
return self._locked_until_utc
@locked_until_utc.setter
def locked_until_utc(self, value):
self._locked_until_utc = value
|
the-stack_106_14695
|
import pickle
import re
from tqdm import tqdm
import json
import matplotlib.patches as mpatches
import seaborn as sns
from pycocotools.coco import COCO
import json
import numpy as np
import matplotlib.pyplot as plt
import string
import sys
import os
from scipy.spatial import distance
sys.path.append('..')
from coco_mask.datasets import *
from mals.inference_debias import id2object
from utils import bog_task_to_attribute, bog_attribute_to_task, bog_mals
val_dataset = CoCoDataset(None, version='val')
categories = val_dataset.categories
labels_to_names = val_dataset.labels_to_names
man_words = ['man', 'boy', 'gentleman', 'male', 'men']
woman_words = ['woman', 'girl', 'lady', 'female', 'women']
def caption_to_array(captions, categories):
this_categories = []
gender = None
# iterate through caption and append to this_cat
for caption in captions:
caption = caption.lower().translate(str.maketrans('', '', string.punctuation))
for i in range(len(categories)):
if labels_to_names[categories[i]].replace(' ', '') in caption.replace(' ', ''):
if labels_to_names[categories[i]] == 'car':
this_caption = caption.replace(' ', '')
is_car = False
while 'car' in this_caption:
if this_caption[this_caption.find('car'):this_caption.find('car')+len('carrot')] == 'carrot':
this_caption = this_caption[this_caption.find('car')+2:]
else:
is_car = True
break
if not is_car:
continue
if labels_to_names[categories[i]] == 'dog':
this_caption = caption.replace(' ', '')
is_dog = False
while 'dog' in this_caption:
if this_caption[max(0, this_caption.find('dog')-3):this_caption.find('dog')+3] == 'hotdog':
this_caption = this_caption[this_caption.find('dog')+2:]
else:
is_dog = True
break
if not is_dog:
continue
this_categories.append(i)
if gender == -1:
continue
for man_word in man_words:
if man_word in caption.split():
if gender == 0:
gender = -1
else:
gender = 1
for woman_word in woman_words:
if woman_word in caption.split():
if gender == 1:
gender = -1
else:
gender = 0
if gender == -1:
gender = None
return list(set(this_categories)), gender
########### generate bog_tilde_train for the captions #######
if os.path.isfile('coco_captions_bog_tilde_train.pkl'):
bog_tilde_train, num_attributes_train, num_images_train = pickle.load(open('coco_captions_bog_tilde_train.pkl', 'rb'))
else:
train_dataset = CoCoDataset(None, version='train')
categories = train_dataset.categories
labels_to_names = train_dataset.labels_to_names
version = 'train'
coco = COCO('/n/fs/visualai-scr/Data/Coco/2014data/annotations/captions_{}2014.json'.format(version))
gender_data = pickle.load(open('/n/fs/visualai-scr/Data/Coco/2014data/bias_splits/{}.data'.format(version), 'rb'))
gender_info = {int(chunk['img'][10+len(version):22+len(version)]): chunk['annotation'][0] for chunk in gender_data}
num_labels = len(categories)
bog_tilde_train = np.zeros((num_labels, 2))
num_attributes_train = [0, 0]
for image_id in train_dataset.image_ids:
if int(image_id) not in gender_info.keys():
continue
gender = gender_info[image_id]
annIds = coco.getAnnIds(imgIds=image_id)
anns = coco.loadAnns(annIds)
captions = [chunk['caption'] for chunk in anns]
gt_categories, gt_gender = caption_to_array(captions, categories)
if gt_gender is None:
continue
num_attributes_train[gt_gender] += 1
for gt_cat in gt_categories:
if gt_cat == 0:
continue
bog_tilde_train[gt_cat][gt_gender] += 1
pickle.dump([bog_tilde_train, num_attributes_train, sum(num_attributes_train)], open('coco_captions_bog_tilde_train.pkl', 'wb'))
# getting an image caption given an id
version = 'val'
all_image_ids = {}
imageid_to_captions = {}
coco = COCO('/n/fs/visualai-scr/Data/Coco/2014data/annotations/captions_{}2014.json'.format(version))
gender_data = pickle.load(open('/n/fs/visualai-scr/Data/Coco/2014data/bias_splits/{}.data'.format(version), 'rb'))
gender_info = {int(chunk['img'][10+len(version):22+len(version)]): chunk['annotation'][0] for chunk in gender_data}
women_snowboard = ['baseline_ft', 'equalizer', 'confident', 'upweight', 'balanced', 'confusion']
model_names = ['baseline_ft', 'equalizer']
for ind, model_name in enumerate(model_names):
print("Model name: {}".format(model_name))
if model_name in women_snowboard:
with open('final_captions_eccv2018/{}.json'.format(model_name)) as f:
results = json.load(f)
else:
results = pickle.load(open('ImageCaptioning.pytorch/vis/{}_results.pkl'.format(model_name), 'rb'))
num_labels = len(categories)
bog_tilde = np.zeros((num_labels, 2))
bog_gt_g = np.zeros((num_labels, 2))
bog_gt_o = np.zeros((num_labels, 2))
bog_preds = np.zeros((num_labels, 2))
# for outcome divergence between genders measure
gender_accs = [[0, 0, 0], [0, 0, 0]]
actual_nums_of_gender = [0, 0]
predict_nums_of_gender = [0, 0]
num_samples = 0
for i in tqdm(range(len(results))):
# figure out labels and stuff based on captions
if model_name in women_snowboard:
eval_id = results[i]['image_id']
else:
eval_id = results[i]['file_name'].split('/')[-1].split('_')[-1][:-4]
if int(eval_id) not in gender_info.keys():
continue
gender = gender_info[int(eval_id)]
annIds = coco.getAnnIds(imgIds=int(eval_id))
anns = coco.loadAnns(annIds)
captions = [chunk['caption'] for chunk in anns]
gt_categories, gt_gender = caption_to_array(captions, categories)
if gt_gender is None:
continue
pred_caption = [results[i]['caption']]
pred_categories, pred_gender = caption_to_array(pred_caption, categories)
if ind == 0 and gt_gender != pred_gender:
all_image_ids[eval_id] = set(pred_categories).intersection(set(gt_categories))
imageid_to_captions[eval_id] = [pred_caption, None]
if ind == 1:
if eval_id in all_image_ids.keys():
if gt_gender != pred_gender:
del all_image_ids[eval_id]
else:
imageid_to_captions[eval_id][1] = pred_caption
wrong_cats = set(pred_categories).symmetric_difference(set(gt_categories))
all_image_ids[eval_id] = all_image_ids[eval_id].intersection(wrong_cats)
if pred_gender is None: # if not predict gender, skip
gender_accs[gt_gender][2] += 1
continue
if gt_gender != pred_gender and pred_gender is not None:
gender_accs[gt_gender][1] += 1
else:
gender_accs[gt_gender][0] += 1
num_samples += 1
actual_nums_of_gender[gt_gender] += 1
predict_nums_of_gender[pred_gender] += 1
for gt_cat in gt_categories:
if gt_cat == 0:
continue
bog_tilde[gt_cat][gt_gender] += 1
bog_gt_o[gt_cat][pred_gender] += 1
for pred_cat in pred_categories:
if pred_cat == 0:
continue
bog_gt_g[pred_cat][gt_gender] += 1
bog_preds[pred_cat][pred_gender] += 1
print("Numbers of gender, ACTUAL: {0}, PRED: {1}".format(actual_nums_of_gender, predict_nums_of_gender))
num_attributes = actual_nums_of_gender
diff_ta, t_to_a_value = bog_task_to_attribute(bog_tilde, bog_gt_o, num_attributes=num_attributes, disaggregate=True, num_attributes_train=num_attributes_train, bog_tilde_train=bog_tilde_train)
diff_at, a_to_t_value = bog_attribute_to_task(bog_tilde, bog_gt_g, num_attributes=num_attributes, disaggregate=True, num_attributes_train=num_attributes_train, bog_tilde_train=bog_tilde_train)
bog_mals(bog_tilde_train, bog_preds)
if ind == 0:
base = [diff_ta, diff_at, bog_tilde, bog_gt_o, bog_gt_g]
elif ind == 1:
equalize = [diff_ta, diff_at, bog_tilde, bog_gt_o, bog_gt_g]
# this is gender neutral version from paper
#print(gender_accs)
#gender_accs[0] = 100*(gender_accs[0] / np.sum(gender_accs[0][:2]))
#gender_accs[1] = 100*(gender_accs[1] / np.sum(gender_accs[1][:2]))
#print("Outcome divergence: {}".format(distance.jensenshannon(gender_accs[0][:2], gender_accs[1][:2])))
#gender_accs[0] = 100*(gender_accs[0] / np.sum(gender_accs[0]))
#gender_accs[1] = 100*(gender_accs[1] / np.sum(gender_accs[1]))
#print("Outcome divergence: {}".format(distance.jensenshannon(gender_accs[0], gender_accs[1])))
print("---")
print("--------- Comparing between baseline and equalizer --------")
for i in range(len(diff_at)):
for j in range(len(diff_at[0])):
if base[0][i][j] > equalize[0][i][j] and base[1][i][j] < equalize[1][i][j] and equalize[1][i][j] > 0: # t->a goes down, a->t goes up
print("{0} ({9}) - {1}\nbase T->A: {2}, A->T: {3}\nequalizer T->A: {4}, A->T: {5}\nnumbers for A->T: {6} to base: {7}, equalizer: {8}\n\n".format(labels_to_names[categories[i]], 'woman' if j == 0 else 'man', base[0][i][j], base[1][i][j], equalize[0][i][j], equalize[1][i][j], base[2][i], base[4][i], equalize[4][i], i))
###### qualitative image examples ######
for image_id in all_image_ids.keys():
if len(all_image_ids[image_id]) > 0:
print("{0}:\nbaseline: {1}\nequalizer: {2}\n\n".format(image_id, imageid_to_captions[image_id][0], imageid_to_captions[image_id][1]))
|
the-stack_106_14697
|
import os
import pickle
import numpy as np
from relic.histalp_runs import multi_parameter_run
from relic.preprocessing import configure, GLCDICT
from relic.length_observations import get_length_observations
import logging
log = logging.getLogger(__name__)
if __name__ == '__main__':
# Local working directory (where OGGM will write its output)
WORKING_DIR = os.environ.get('OGGM_WORKDIR')
myglcs = list(GLCDICT.keys())
while True:
try:
gdirs = configure(WORKING_DIR, myglcs, baselineclimate='HISTALP')
break
except:
log.warning('fiona error')
pass
# read length data observations
meta, obs = get_length_observations(myglcs)
pcpsf = np.arange(0.5, 4.1, 0.25)
glenas = np.arange(1.0, 4.1, 0.5)
mbbias = np.append(np.arange(-1400, 1100, 200), np.array([-100, 100]))
pdict = {'prcp_scaling_factor': pcpsf,
'glena_factor': glenas,
'mbbias': mbbias}
jobid = int(os.environ.get('JOBID'))
rval = multi_parameter_run(pdict, gdirs, meta, obs, runid=jobid)
out = os.path.join('/home/users/mdusch/length_change/finito/all/out', 'out_%d.p' % jobid)
pickle.dump(rval, open(out, 'wb'))
log.warning('finito')
|
the-stack_106_14700
|
import torch.utils.data as data
import os
import sys
import random
import numpy as np
import cv2
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(root, source):
if not os.path.exists(source):
print("Setting file %s for UCF101 dataset doesn't exist." % (source))
sys.exit()
else:
clips = []
with open(source) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split()
clip_path = os.path.join(root, line_info[0])
duration = int(line_info[1])
target = int(line_info[2])
item = (clip_path, duration, target)
clips.append(item)
return clips
def ReadSegmentRGB(path, offsets, new_height, new_width, new_length, is_color, name_pattern, duration):
if is_color:
cv_read_flag = cv2.IMREAD_COLOR # > 0
else:
cv_read_flag = cv2.IMREAD_GRAYSCALE # = 0
interpolation = cv2.INTER_LINEAR
fp = open("val_video_list.txt","a")
fp.write("++++++++++++++++++++++++\n"+path+"\n")
sampled_list = []
for offset_id in range(len(offsets)):
offset = offsets[offset_id]
for length_id in range(1, new_length+1):
loaded_frame_index = length_id + offset
moded_loaded_frame_index = loaded_frame_index % (duration + 1)
if moded_loaded_frame_index == 0:
moded_loaded_frame_index = (duration + 1)
frame_name = name_pattern % (moded_loaded_frame_index)
frame_path = path + "/" + frame_name
#if offset==0:
print(offset,length_id,loaded_frame_index, moded_loaded_frame_index, frame_name)
print("frame_path={}".format(frame_path))
cv_img_origin = cv2.imread(frame_path, cv_read_flag)
fp.write(frame_path)
if cv_img_origin is None:
print("Could not load file %s" % (frame_path))
input("debugging not enough frame images")
sys.exit()
# TODO: error handling here
if new_width > 0 and new_height > 0:
# use OpenCV3, use OpenCV2.4.13 may have error
cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation)
else:
cv_img = cv_img_origin
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
sampled_list.append(cv_img)
#print("sampled_list.len = {}, shape = {}".format(len(sampled_list),sampled_list[0].shape))
clip_input = np.concatenate(sampled_list, axis=2)
fp.close()
return clip_input
def ReadSegmentFlow(path, offsets, new_height, new_width, new_length, is_color, name_pattern,duration):
if is_color:
cv_read_flag = cv2.IMREAD_COLOR # > 0
else:
cv_read_flag = cv2.IMREAD_GRAYSCALE # = 0
interpolation = cv2.INTER_LINEAR
sampled_list = []
for offset_id in range(len(offsets)):
offset = offsets[offset_id]
for length_id in range(1, new_length+1):
loaded_frame_index = length_id + offset
moded_loaded_frame_index = loaded_frame_index % (duration + 1)
if moded_loaded_frame_index == 0:
moded_loaded_frame_index = (duration + 1)
frame_name_x = name_pattern % ("x", moded_loaded_frame_index)
frame_path_x = path + "/" + frame_name_x
cv_img_origin_x = cv2.imread(frame_path_x, cv_read_flag)
frame_name_y = name_pattern % ("y", moded_loaded_frame_index)
frame_path_y = path + "/" + frame_name_y
cv_img_origin_y = cv2.imread(frame_path_y, cv_read_flag)
if cv_img_origin_x is None or cv_img_origin_y is None:
print("Could not load file %s or %s" % (frame_path_x, frame_path_y))
sys.exit()
# TODO: error handling here
if new_width > 0 and new_height > 0:
cv_img_x = cv2.resize(cv_img_origin_x, (new_width, new_height), interpolation)
cv_img_y = cv2.resize(cv_img_origin_y, (new_width, new_height), interpolation)
else:
cv_img_x = cv_img_origin_x
cv_img_y = cv_img_origin_y
sampled_list.append(np.expand_dims(cv_img_x, 2))
sampled_list.append(np.expand_dims(cv_img_y, 2))
clip_input = np.concatenate(sampled_list, axis=2)
return clip_input
class videoreloc(data.Dataset):
def __init__(self,
root,
source,
phase,
modality,
name_pattern=None,
is_color=True,
num_segments=1,
new_length=1,
new_width=0,
new_height=0,
transform=None,
target_transform=None,
video_transform=None,
ensemble_training = False):
classes, class_to_idx = find_classes(root)
clips = make_dataset(root, source)
if len(clips) == 0:
raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n"
"Check your data directory."))
self.root = root
self.source = source
self.phase = phase
self.modality = modality
self.classes = classes
self.class_to_idx = class_to_idx
self.clips = clips
self.ensemble_training = ensemble_training
if name_pattern:
self.name_pattern = name_pattern
else:
if self.modality == "rgb" or self.modality == "CNN" :
self.name_pattern = "%04d.jpg" #"img_%05d.jpg"
elif self.modality == "flow":
self.name_pattern = "flow_%s_%05d.jpg"
self.is_color = is_color
self.num_segments = num_segments
self.new_length = new_length
self.new_width = new_width
self.new_height = new_height
self.transform = transform
self.target_transform = target_transform
self.video_transform = video_transform
def __getitem__(self, index):
path, duration, target = self.clips[index]
print("index = {}, path = {}, duration = {}, target = {}".format(index, path, duration, target))
duration = duration - 1
average_duration = int(duration / self.num_segments)
average_part_length = int(np.floor((duration-self.new_length) / self.num_segments))
offsets = []
for seg_id in range(self.num_segments):
if self.phase == "train":
if average_duration >= self.new_length:
# No +1 because randint(a,b) return a random integer N such t
offset = random.randint(0, average_duration - self.new_length)
# No +1 because randint(a,b) return a random integer N such that a <= N <= b.
offsets.append(offset + seg_id * average_duration)
elif duration >= self.new_length:
offset = random.randint(0, average_part_length)
offsets.append(seg_id*average_part_length + offset)
else:
increase = random.randint(0, duration)
offsets.append(0 + seg_id * increase)
elif self.phase == "val":
if average_duration >= self.new_length:
offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration))
elif duration >= self.new_length:
offsets.append(int((seg_id*average_part_length + (seg_id + 1) * average_part_length)/2))
else:
increase = int(duration / self.num_segments)
offsets.append(0 + seg_id * increase)
else:
print("Only phase train and val are supported.")
#print("num_segments = {},new_length = {} ,average_duration = {}, duration = {}, offsets = {}".format(self.num_segments,self.new_length, average_duration , average_part_length, offsets ))
if self.modality == "rgb" or self.modality == "CNN":
clip_input = ReadSegmentRGB(path,
offsets,
self.new_height,
self.new_width,
self.new_length,
self.is_color,
self.name_pattern,
duration
)
elif self.modality == "flow":
clip_input = ReadSegmentFlow(path,
offsets,
self.new_height,
self.new_width,
self.new_length,
self.is_color,
self.name_pattern,
duration
)
else:
print("No such modality %s" % (self.modality))
if self.transform is not None:
clip_input = self.transform(clip_input)
if self.target_transform is not None:
target = self.target_transform(target)
if self.video_transform is not None:
clip_input = self.video_transform(clip_input)
print("clip_input.shape = {}, target={}".format(clip_input.shape,target))
#input('dbg dataloaded')
return clip_input, target
def __len__(self):
return len(self.clips)
|
the-stack_106_14701
|
from datetime import datetime
dados = dict()
dados['Nome'] = str(input('Nome : '))
nasc = int(input('Ano de nascimento : '))
dados['idade'] = datetime.now().year - nasc
dados['CTPS'] = int(input('Carteira de trabalho : (0) não tem! '))
if dados['CTPS'] != 0:
dados['Contratação'] = int(input('Ano de contratação : '))
dados['Salário'] = float(input('Salário : R$ '))
dados['Aposentadoria'] = dados['idade'] + ((dados['Contratação'] + 35) - datetime.now().year)
print('-=' * 30)
for k, v in dados.items():
print(f' - {k} tem o valor {v} ')
|
the-stack_106_14702
|
import requests
import rdflib
import click
from .utils import *
# Examples
# curl -H 'Accept: application/ld+json' 'https://scigraph.springernature.com/pub.10.1007/978-1-62703-715-0_2'
class SciGraphClient(object):
"""
Simple class for accessing SciGraph entities
"""
url = 'https://scigraph.springernature.com/'
_default_headers = {'Accept': 'application/ld+json'}
def __init__(self, *args, **kwargs):
allowed_keys = ['verbose']
self.__dict__.update((k, False) for k in allowed_keys)
self.__dict__.update(
(k, v) for k, v in kwargs.items() if k in allowed_keys)
self.uri = None
self.data = None # the raw data coming back from SciGraph
self.rdfgraph = rdflib.ConjunctiveGraph()
@property
def triples_count(self):
"""
Simply dereference a scigraph URI
"""
return len(self.rdfgraph)
def get_entity_from_uri(self, uri, rdf_format):
"""
Simply dereference a scigraph URI
"""
data = self._do_request(uri, rdf_format)
if data:
self.uri = uri
self.data = data.text
self._build_rdf_object(rdf_format)
return self.data
else:
return None
def get_entity_from_doi(self, doi, rdf_format):
"""
Simply dereference a scigraph URI based on a DOI
"""
uri = self.url + "pub." + doi
data = self._do_request(uri, rdf_format)
if data:
self.uri = uri
self.data = data.text
return self.data
else:
return None
def _do_request(self, uri, rdf_format):
"""
Request data from back end service
"""
if not rdf_format or rdf_format == "json-ld" or rdf_format == "jsonld":
headers = {'Accept': 'application/ld+json'}
elif rdf_format == "nt":
headers = {'Accept': 'application/n-triples'}
elif rdf_format == "turtle":
headers = {'Accept': 'text/turtle'}
elif rdf_format == "xml":
headers = {'Accept': 'application/rdf+xml'}
if self.verbose: printDebug(f">> Requesting format '{rdf_format}' from URI: {uri}", dim=True)
response = requests.get(uri, headers=headers)
if response.status_code == 404:
return False
else:
if response.url.startswith("https://"):
# https ok for retrieval, but rdf payload always uses http uris
response.url = response.url.replace("https://", "http://")
if self.verbose: printDebug(f">> Found: {response.url}\n----------------", dim=True)
return response
def _build_rdf_object(self, rdf_format):
"""Use rdflib to create a graph using the scigraph data returned
"""
if rdf_format == "jsonld":
rdf_format = "json-ld" # fix for rdflib
if self.data:
self.rdfgraph.parse(data=self.data, format=rdf_format)
|
the-stack_106_14703
|
import base64
import gzip
import importlib
import io
import logging
import secrets
import urllib
import zlib
from . import exceptions
from . import packet
from . import payload
from . import socket
default_logger = logging.getLogger('engineio.server')
class Server(object):
"""An Engine.IO server.
This class implements a fully compliant Engine.IO web server with support
for websocket and long-polling transports.
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are "threading",
"eventlet", "gevent" and "gevent_uwsgi". If this
argument is not given, "eventlet" is tried first, then
"gevent_uwsgi", then "gevent", and finally "threading".
The first async mode that has all its dependencies
installed is the one that is chosen.
:param ping_interval: The interval in seconds at which the server pings
the client. The default is 25 seconds. For advanced
control, a two element tuple can be given, where
the first number is the ping interval and the second
is a grace period added by the server.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default
is 20 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 1,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: If set to a string, it is the name of the HTTP cookie the
server sends back tot he client containing the client
session id. If set to a dictionary, the ``'name'`` key
contains the cookie name and other keys define cookie
attributes, where the value of each attribute can be a
string, a callable with no arguments, or a boolean. If set
to ``None`` (the default), a cookie is not sent to the
client.
:param cors_allowed_origins: Origin or list of origins that are allowed to
connect to this server. Only the same origin
is allowed by default. Set this argument to
``'*'`` to allow all origins, or to ``[]`` to
disable CORS handling.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default
is ``True``.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param async_handlers: If set to ``True``, run message event handlers in
non-blocking threads. To run handlers synchronously,
set to ``False``. The default is ``True``.
:param monitor_clients: If set to ``True``, a background task will ensure
inactive clients are closed. Set to ``False`` to
disable the monitoring task (not recommended). The
default is ``True``.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. Defaults to
``['polling', 'websocket']``.
:param kwargs: Reserved for future extensions, any additional parameters
given as keyword arguments will be silently ignored.
"""
compression_methods = ['gzip', 'deflate']
event_names = ['connect', 'disconnect', 'message']
valid_transports = ['polling', 'websocket']
_default_monitor_clients = True
sequence_number = 0
def __init__(self, async_mode=None, ping_interval=25, ping_timeout=20,
max_http_buffer_size=1000000, allow_upgrades=True,
http_compression=True, compression_threshold=1024,
cookie=None, cors_allowed_origins=None,
cors_credentials=True, logger=False, json=None,
async_handlers=True, monitor_clients=None, transports=None,
**kwargs):
self.ping_timeout = ping_timeout
if isinstance(ping_interval, tuple):
self.ping_interval = ping_interval[0]
self.ping_interval_grace_period = ping_interval[1]
else:
self.ping_interval = ping_interval
self.ping_interval_grace_period = 0
self.max_http_buffer_size = max_http_buffer_size
self.allow_upgrades = allow_upgrades
self.http_compression = http_compression
self.compression_threshold = compression_threshold
self.cookie = cookie
self.cors_allowed_origins = cors_allowed_origins
self.cors_credentials = cors_credentials
self.async_handlers = async_handlers
self.sockets = {}
self.handlers = {}
self.log_message_keys = set()
self.start_service_task = monitor_clients \
if monitor_clients is not None else self._default_monitor_clients
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
modes = self.async_modes()
if async_mode is not None:
modes = [async_mode] if async_mode in modes else []
self._async = None
self.async_mode = None
for mode in modes:
try:
self._async = importlib.import_module(
'engineio.async_drivers.' + mode)._async
asyncio_based = self._async['asyncio'] \
if 'asyncio' in self._async else False
if asyncio_based != self.is_asyncio_based():
continue # pragma: no cover
self.async_mode = mode
break
except ImportError:
pass
if self.async_mode is None:
raise ValueError('Invalid async_mode specified')
if self.is_asyncio_based() and \
('asyncio' not in self._async or not
self._async['asyncio']): # pragma: no cover
raise ValueError('The selected async_mode is not asyncio '
'compatible')
if not self.is_asyncio_based() and 'asyncio' in self._async and \
self._async['asyncio']: # pragma: no cover
raise ValueError('The selected async_mode requires asyncio and '
'must use the AsyncServer class')
if transports is not None:
if isinstance(transports, str):
transports = [transports]
transports = [transport for transport in transports
if transport in self.valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or self.valid_transports
self.logger.info('Server initialized for %s.', self.async_mode)
def is_asyncio_based(self):
return False
def async_modes(self):
return ['eventlet', 'gevent_uwsgi', 'gevent', 'threading']
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler(sid, environ):
print('Connection request')
if environ['REMOTE_ADDR'] in blacklisted:
return False # reject
# as a method:
def message_handler(sid, msg):
print('Received message: ', msg)
eio.send(sid, 'response')
eio.on('message', message_handler)
The handler function receives the ``sid`` (session ID) for the
client as first argument. The ``'connect'`` event handler receives the
WSGI environment as a second argument, and can return ``False`` to
reject the connection. The ``'message'`` handler receives the message
payload as a second argument. The ``'disconnect'`` handler does not
take a second argument.
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def send(self, sid, data):
"""Send a message to a client.
:param sid: The session id of the recipient client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
"""
try:
socket = self._get_socket(sid)
except KeyError:
# the socket is not available
self.logger.warning('Cannot send to sid %s', sid)
return
socket.send(packet.Packet(packet.MESSAGE, data=data))
def get_session(self, sid):
"""Return the user session for a client.
:param sid: The session id of the client.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved unless
``save_session()`` is called, or when the ``session`` context manager
is used.
"""
socket = self._get_socket(sid)
return socket.session
def save_session(self, sid, session):
"""Store the user session for a client.
:param sid: The session id of the client.
:param session: The session dictionary.
"""
socket = self._get_socket(sid)
socket.session = session
def session(self, sid):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
with eio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid):
self.server = server
self.sid = sid
self.session = None
def __enter__(self):
self.session = self.server.get_session(sid)
return self.session
def __exit__(self, *args):
self.server.save_session(sid, self.session)
return _session_context_manager(self, sid)
def disconnect(self, sid=None):
"""Disconnect a client.
:param sid: The session id of the client to close. If this parameter
is not given, then all clients are closed.
"""
if sid is not None:
try:
socket = self._get_socket(sid)
except KeyError: # pragma: no cover
# the socket was already closed or gone
pass
else:
socket.close()
if sid in self.sockets: # pragma: no cover
del self.sockets[sid]
else:
for client in self.sockets.values():
client.close()
self.sockets = {}
def transport(self, sid):
"""Return the name of the transport used by the client.
The two possible values returned by this function are ``'polling'``
and ``'websocket'``.
:param sid: The session of the client.
"""
return 'websocket' if self._get_socket(sid).upgraded else 'polling'
def handle_request(self, environ, start_response):
"""Handle an HTTP request from the client.
This is the entry point of the Engine.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence.
"""
if self.cors_allowed_origins != []:
# Validate the origin header if present
# This is important for WebSocket more than for HTTP, since
# browsers only apply CORS controls to HTTP.
origin = environ.get('HTTP_ORIGIN')
if origin:
allowed_origins = self._cors_allowed_origins(environ)
if allowed_origins is not None and origin not in \
allowed_origins:
self._log_error_once(
origin + ' is not an accepted origin.', 'bad-origin')
r = self._bad_request('Not an accepted origin.')
start_response(r['status'], r['headers'])
return [r['response']]
method = environ['REQUEST_METHOD']
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
jsonp = False
jsonp_index = None
# make sure the client uses an allowed transport
transport = query.get('transport', ['polling'])[0]
if transport not in self.transports:
self._log_error_once('Invalid transport', 'bad-transport')
r = self._bad_request('Invalid transport')
start_response(r['status'], r['headers'])
return [r['response']]
# make sure the client speaks a compatible Engine.IO version
sid = query['sid'][0] if 'sid' in query else None
if sid is None and query.get('EIO') != ['4']:
self._log_error_once(
'The client is using an unsupported version of the Socket.IO '
'or Engine.IO protocols', 'bad-version')
r = self._bad_request(
'The client is using an unsupported version of the Socket.IO '
'or Engine.IO protocols')
start_response(r['status'], r['headers'])
return [r['response']]
if 'j' in query:
jsonp = True
try:
jsonp_index = int(query['j'][0])
except (ValueError, KeyError, IndexError):
# Invalid JSONP index number
pass
if jsonp and jsonp_index is None:
self._log_error_once('Invalid JSONP index number',
'bad-jsonp-index')
r = self._bad_request('Invalid JSONP index number')
elif method == 'GET':
if sid is None:
# transport must be one of 'polling' or 'websocket'.
# if 'websocket', the HTTP_UPGRADE header must match.
upgrade_header = environ.get('HTTP_UPGRADE').lower() \
if 'HTTP_UPGRADE' in environ else None
if transport == 'polling' \
or transport == upgrade_header == 'websocket':
r = self._handle_connect(environ, start_response,
transport, jsonp_index)
else:
self._log_error_once('Invalid websocket upgrade',
'bad-upgrade')
r = self._bad_request('Invalid websocket upgrade')
else:
if sid not in self.sockets:
self._log_error_once('Invalid session ' + sid, 'bad-sid')
r = self._bad_request('Invalid session')
else:
socket = self._get_socket(sid)
try:
packets = socket.handle_get_request(
environ, start_response)
if isinstance(packets, list):
r = self._ok(packets, jsonp_index=jsonp_index)
else:
r = packets
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
if sid in self.sockets and self.sockets[sid].closed:
del self.sockets[sid]
elif method == 'POST':
if sid is None or sid not in self.sockets:
self._log_error_once(
'Invalid session ' + (sid or 'None'), 'bad-sid')
r = self._bad_request('Invalid session')
else:
socket = self._get_socket(sid)
try:
socket.handle_post_request(environ)
r = self._ok(jsonp_index=jsonp_index)
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
except: # pragma: no cover
# for any other unexpected errors, we log the error
# and keep going
self.logger.exception('post request handler error')
r = self._ok(jsonp_index=jsonp_index)
elif method == 'OPTIONS':
r = self._ok()
else:
self.logger.warning('Method %s not supported', method)
r = self._method_not_found()
if not isinstance(r, dict):
return r or []
if self.http_compression and \
len(r['response']) >= self.compression_threshold:
encodings = [e.split(';')[0].strip() for e in
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
for encoding in encodings:
if encoding in self.compression_methods:
r['response'] = \
getattr(self, '_' + encoding)(r['response'])
r['headers'] += [('Content-Encoding', encoding)]
break
cors_headers = self._cors_headers(environ)
start_response(r['status'], r['headers'] + cors_headers)
return [r['response']]
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object that represents the background task,
on which the ``join()`` methond can be invoked to wait for the task to
complete.
"""
th = self._async['thread'](target=target, args=args, kwargs=kwargs)
th.start()
return th # pragma: no cover
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self._async['sleep'](seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object using the appropriate async model.
This is a utility function that applications can use to create a queue
without having to worry about using the correct call for the selected
async mode.
"""
return self._async['queue'](*args, **kwargs)
def get_queue_empty_exception(self):
"""Return the queue empty exception for the appropriate async model.
This is a utility function that applications can use to work with a
queue without having to worry about using the correct call for the
selected async mode.
"""
return self._async['queue_empty']
def create_event(self, *args, **kwargs):
"""Create an event object using the appropriate async model.
This is a utility function that applications can use to create an
event without having to worry about using the correct call for the
selected async mode.
"""
return self._async['event'](*args, **kwargs)
def generate_id(self):
"""Generate a unique session id."""
id = base64.b64encode(
secrets.token_bytes(12) + self.sequence_number.to_bytes(3, 'big'))
self.sequence_number = (self.sequence_number + 1) & 0xffffff
return id.decode('utf-8').replace('/', '_').replace('+', '-')
def _generate_sid_cookie(self, sid, attributes):
"""Generate the sid cookie."""
cookie = attributes.get('name', 'io') + '=' + sid
for attribute, value in attributes.items():
if attribute == 'name':
continue
if callable(value):
value = value()
if value is True:
cookie += '; ' + attribute
else:
cookie += '; ' + attribute + '=' + value
return cookie
def _handle_connect(self, environ, start_response, transport,
jsonp_index=None):
"""Handle a client connection request."""
if self.start_service_task:
# start the service task to monitor connected clients
self.start_service_task = False
self.start_background_task(self._service_task)
sid = self.generate_id()
s = socket.Socket(self, sid)
self.sockets[sid] = s
pkt = packet.Packet(packet.OPEN, {
'sid': sid,
'upgrades': self._upgrades(sid, transport),
'pingTimeout': int(self.ping_timeout * 1000),
'pingInterval': int(
self.ping_interval + self.ping_interval_grace_period) * 1000})
s.send(pkt)
s.schedule_ping()
# NOTE: some sections below are marked as "no cover" to workaround
# what seems to be a bug in the coverage package. All the lines below
# are covered by tests, but some are not reported as such for some
# reason
ret = self._trigger_event('connect', sid, environ, run_async=False)
if ret is not None and ret is not True: # pragma: no cover
del self.sockets[sid]
self.logger.warning('Application rejected connection')
return self._unauthorized(ret or None)
if transport == 'websocket': # pragma: no cover
ret = s.handle_get_request(environ, start_response)
if s.closed and sid in self.sockets:
# websocket connection ended, so we are done
del self.sockets[sid]
return ret
else: # pragma: no cover
s.connected = True
headers = None
if self.cookie:
if isinstance(self.cookie, dict):
headers = [(
'Set-Cookie',
self._generate_sid_cookie(sid, self.cookie)
)]
else:
headers = [(
'Set-Cookie',
self._generate_sid_cookie(sid, {
'name': self.cookie, 'path': '/', 'SameSite': 'Lax'
})
)]
try:
return self._ok(s.poll(), headers=headers,
jsonp_index=jsonp_index)
except exceptions.QueueEmpty:
return self._bad_request()
def _upgrades(self, sid, transport):
"""Return the list of possible upgrades for a client connection."""
if not self.allow_upgrades or self._get_socket(sid).upgraded or \
transport == 'websocket':
return []
if self._async['websocket'] is None: # pragma: no cover
self._log_error_once(
'The WebSocket transport is not available, you must install a '
'WebSocket server that is compatible with your async mode to '
'enable it. See the documentation for details.',
'no-websocket')
return []
return ['websocket']
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
def _get_socket(self, sid):
"""Return the socket object for a given session."""
try:
s = self.sockets[sid]
except KeyError:
raise KeyError('Session not found')
if s.closed:
del self.sockets[sid]
raise KeyError('Session is disconnected')
return s
def _ok(self, packets=None, headers=None, jsonp_index=None):
"""Generate a successful HTTP response."""
if packets is not None:
if headers is None:
headers = []
headers += [('Content-Type', 'text/plain; charset=UTF-8')]
return {'status': '200 OK',
'headers': headers,
'response': payload.Payload(packets=packets).encode(
jsonp_index=jsonp_index).encode('utf-8')}
else:
return {'status': '200 OK',
'headers': [('Content-Type', 'text/plain')],
'response': b'OK'}
def _bad_request(self, message=None):
"""Generate a bad request HTTP error response."""
if message is None:
message = 'Bad Request'
message = packet.Packet.json.dumps(message)
return {'status': '400 BAD REQUEST',
'headers': [('Content-Type', 'text/plain')],
'response': message.encode('utf-8')}
def _method_not_found(self):
"""Generate a method not found HTTP error response."""
return {'status': '405 METHOD NOT FOUND',
'headers': [('Content-Type', 'text/plain')],
'response': b'Method Not Found'}
def _unauthorized(self, message=None):
"""Generate a unauthorized HTTP error response."""
if message is None:
message = 'Unauthorized'
message = packet.Packet.json.dumps(message)
return {'status': '401 UNAUTHORIZED',
'headers': [('Content-Type', 'application/json')],
'response': message.encode('utf-8')}
def _cors_allowed_origins(self, environ):
default_origins = []
if 'wsgi.url_scheme' in environ and 'HTTP_HOST' in environ:
default_origins.append('{scheme}://{host}'.format(
scheme=environ['wsgi.url_scheme'], host=environ['HTTP_HOST']))
if 'HTTP_X_FORWARDED_PROTO' in environ or \
'HTTP_X_FORWARDED_HOST' in environ:
scheme = environ.get(
'HTTP_X_FORWARDED_PROTO',
environ['wsgi.url_scheme']).split(',')[0].strip()
default_origins.append('{scheme}://{host}'.format(
scheme=scheme, host=environ.get(
'HTTP_X_FORWARDED_HOST', environ['HTTP_HOST']).split(
',')[0].strip()))
if self.cors_allowed_origins is None:
allowed_origins = default_origins
elif self.cors_allowed_origins == '*':
allowed_origins = None
elif isinstance(self.cors_allowed_origins, str):
allowed_origins = [self.cors_allowed_origins]
else:
allowed_origins = self.cors_allowed_origins
return allowed_origins
def _cors_headers(self, environ):
"""Return the cross-origin-resource-sharing headers."""
if self.cors_allowed_origins == []:
# special case, CORS handling is completely disabled
return []
headers = []
allowed_origins = self._cors_allowed_origins(environ)
if 'HTTP_ORIGIN' in environ and \
(allowed_origins is None or environ['HTTP_ORIGIN'] in
allowed_origins):
headers = [('Access-Control-Allow-Origin', environ['HTTP_ORIGIN'])]
if environ['REQUEST_METHOD'] == 'OPTIONS':
headers += [('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')]
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in environ:
headers += [('Access-Control-Allow-Headers',
environ['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'])]
if self.cors_credentials:
headers += [('Access-Control-Allow-Credentials', 'true')]
return headers
def _gzip(self, response):
"""Apply gzip compression to a response."""
bytesio = io.BytesIO()
with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
gz.write(response)
return bytesio.getvalue()
def _deflate(self, response):
"""Apply deflate compression to a response."""
return zlib.compress(response)
def _log_error_once(self, message, message_key):
"""Log message with logging.ERROR level the first time, then log
with given level."""
if message_key not in self.log_message_keys:
self.logger.error(message + ' (further occurrences of this error '
'will be logged with level INFO)')
self.log_message_keys.add(message_key)
else:
self.logger.info(message)
def _service_task(self): # pragma: no cover
"""Monitor connected clients and clean up those that time out."""
while True:
if len(self.sockets) == 0:
# nothing to do
self.sleep(self.ping_timeout)
continue
# go through the entire client list in a ping interval cycle
sleep_interval = float(self.ping_timeout) / len(self.sockets)
try:
# iterate over the current clients
for s in self.sockets.copy().values():
if not s.closing and not s.closed:
s.check_ping_timeout()
self.sleep(sleep_interval)
except (SystemExit, KeyboardInterrupt):
self.logger.info('service task canceled')
break
except:
# an unexpected exception has occurred, log it and continue
self.logger.exception('service task exception')
|
the-stack_106_14705
|
import urllib.parse
import uuid
from flask import Flask, render_template, request, redirect, make_response
from bs4 import BeautifulSoup as bs
from peewee import *
app = Flask(__name__)
db = SqliteDatabase("core.db")
class Post(Model):
id = AutoField()
token = CharField()
content = TextField()
class Meta:
database = db
@db.connection_context()
def initialize():
db.create_tables([Post])
initialize()
@app.route('/')
def index():
return render_template("index.html")
@app.route('/write', methods=["POST"])
def write():
content = request.form["content"]
token = str(uuid.uuid4())
Post.create(token=token, content=content)
return redirect("/display/" + token)
def filter_url(urls):
domain_list = []
for url in urls:
domain = urllib.parse.urlparse(url).scheme + "://" + urllib.parse.urlparse(url).netloc
if domain:
domain_list.append(domain)
return " ".join(domain_list)
@app.route('/display/<token>')
def display(token):
user_obj = Post.select().where(Post.token == token)
content = user_obj[-1].content if len(user_obj) > 0 else "Not Found"
img_urls = [x['src'] for x in bs(content).find_all("img")]
tmpl = render_template("display.html", content=content)
resp = make_response(tmpl)
resp.headers["Content-Security-Policy"] = "default-src 'none'; connect-src 'self'; img-src " \
f"'self' {filter_url(img_urls)}; script-src 'none'; " \
"style-src 'self'; base-uri 'self'; form-action 'self' "
return resp
if __name__ == '__main__':
app.run()
|
the-stack_106_14706
|
"""Support for SimpliSafe alarm systems."""
import asyncio
import logging
from simplipy import API
from simplipy.errors import InvalidCredentialsError, SimplipyError
from simplipy.websocket import (
EVENT_CAMERA_MOTION_DETECTED,
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_LOCK_LOCKED,
EVENT_LOCK_UNLOCKED,
EVENT_MOTION_DETECTED,
)
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_CODE,
CONF_CODE,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import (
ATTR_ALARM_DURATION,
ATTR_ALARM_VOLUME,
ATTR_CHIME_VOLUME,
ATTR_ENTRY_DELAY_AWAY,
ATTR_ENTRY_DELAY_HOME,
ATTR_EXIT_DELAY_AWAY,
ATTR_EXIT_DELAY_HOME,
ATTR_LIGHT,
ATTR_VOICE_PROMPT_VOLUME,
DATA_CLIENT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
VOLUMES,
)
_LOGGER = logging.getLogger(__name__)
CONF_ACCOUNTS = "accounts"
DATA_LISTENER = "listener"
TOPIC_UPDATE = "simplisafe_update_data_{0}"
EVENT_SIMPLISAFE_EVENT = "SIMPLISAFE_EVENT"
EVENT_SIMPLISAFE_NOTIFICATION = "SIMPLISAFE_NOTIFICATION"
DEFAULT_SOCKET_MIN_RETRY = 15
WEBSOCKET_EVENTS_REQUIRING_SERIAL = [EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED]
WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT = [
EVENT_CAMERA_MOTION_DETECTED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_MOTION_DETECTED,
]
ATTR_CATEGORY = "category"
ATTR_LAST_EVENT_CHANGED_BY = "last_event_changed_by"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_SERIAL = "last_event_sensor_serial"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_MESSAGE = "message"
ATTR_PIN_LABEL = "label"
ATTR_PIN_LABEL_OR_VALUE = "label_or_pin"
ATTR_PIN_VALUE = "pin"
ATTR_SYSTEM_ID = "system_id"
ATTR_TIMESTAMP = "timestamp"
SERVICE_BASE_SCHEMA = vol.Schema({vol.Required(ATTR_SYSTEM_ID): cv.positive_int})
SERVICE_REMOVE_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL_OR_VALUE): cv.string}
)
SERVICE_SET_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL): cv.string, vol.Required(ATTR_PIN_VALUE): cv.string}
)
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(ATTR_ALARM_DURATION): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=480)
),
vol.Optional(ATTR_ALARM_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_CHIME_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_ENTRY_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=255)
),
vol.Optional(ATTR_ENTRY_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_EXIT_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=45, max=255)
),
vol.Optional(ATTR_EXIT_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_LIGHT): cv.boolean,
vol.Optional(ATTR_VOICE_PROMPT_VOLUME): vol.All(
vol.Coerce(int), vol.In(VOLUMES)
),
}
)
ACCOUNT_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CODE): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_ACCOUNTS): vol.All(
cv.ensure_list, [ACCOUNT_CONFIG_SCHEMA]
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
@callback
def _async_save_refresh_token(hass, config_entry, token):
"""Save a refresh token to the config entry."""
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_TOKEN: token}
)
async def async_register_base_station(hass, system, config_entry_id):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={(DOMAIN, system.serial)},
manufacturer="SimpliSafe",
model=system.version,
name=system.address,
)
async def async_setup(hass, config):
"""Set up the SimpliSafe component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for account in conf[CONF_ACCOUNTS]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: account[CONF_USERNAME],
CONF_PASSWORD: account[CONF_PASSWORD],
CONF_CODE: account.get(CONF_CODE),
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up SimpliSafe as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_USERNAME]
if CONF_CODE in config_entry.data:
# If an alarm code was provided as part of configuration.yaml, pop it out of
# the config entry's data and move it to options:
data = {**config_entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**config_entry.options,
CONF_CODE: data.pop(CONF_CODE),
}
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
try:
api = await API.login_via_token(config_entry.data[CONF_TOKEN], websession)
except InvalidCredentialsError:
_LOGGER.error("Invalid credentials provided")
return False
except SimplipyError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady
_async_save_refresh_token(hass, config_entry, api.refresh_token)
simplisafe = SimpliSafe(hass, api, config_entry)
await simplisafe.async_init()
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = simplisafe
for component in ("alarm_control_panel", "lock"):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
@callback
def verify_system_exists(coro):
"""Log an error if a service call uses an invalid system ID."""
async def decorator(call):
"""Decorate."""
system_id = int(call.data[ATTR_SYSTEM_ID])
if system_id not in simplisafe.systems:
_LOGGER.error("Unknown system ID in service call: %s", system_id)
return
await coro(call)
return decorator
@callback
def v3_only(coro):
"""Log an error if the decorated coroutine is called with a v2 system."""
async def decorator(call):
"""Decorate."""
system = simplisafe.systems[int(call.data[ATTR_SYSTEM_ID])]
if system.version != 3:
_LOGGER.error("Service only available on V3 systems")
return
await coro(call)
return decorator
@verify_system_exists
@_verify_domain_control
async def remove_pin(call):
"""Remove a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.remove_pin(call.data[ATTR_PIN_LABEL_OR_VALUE])
except SimplipyError as err:
_LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def set_pin(call):
"""Set a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_pin(call.data[ATTR_PIN_LABEL], call.data[ATTR_PIN_VALUE])
except SimplipyError as err:
_LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@v3_only
@_verify_domain_control
async def set_system_properties(call):
"""Set one or more system parameters."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_properties(
{
prop: value
for prop, value in call.data.items()
if prop != ATTR_SYSTEM_ID
}
)
except SimplipyError as err:
_LOGGER.error("Error during service call: %s", err)
return
for service, method, schema in [
("remove_pin", remove_pin, SERVICE_REMOVE_PIN_SCHEMA),
("set_pin", set_pin, SERVICE_SET_PIN_SCHEMA),
(
"set_system_properties",
set_system_properties,
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA,
),
]:
async_register_admin_service(hass, DOMAIN, service, method, schema=schema)
config_entry.add_update_listener(async_update_options)
return True
async def async_unload_entry(hass, entry):
"""Unload a SimpliSafe config entry."""
tasks = [
hass.config_entries.async_forward_entry_unload(entry, component)
for component in ("alarm_control_panel", "lock")
]
await asyncio.gather(*tasks)
hass.data[DOMAIN][DATA_CLIENT].pop(entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(entry.entry_id)
remove_listener()
return True
async def async_update_options(hass, config_entry):
"""Handle an options update."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]
simplisafe.options = config_entry.options
class SimpliSafeWebsocket:
"""Define a SimpliSafe websocket "manager" object."""
def __init__(self, hass, websocket):
"""Initialize."""
self._hass = hass
self._websocket = websocket
self.last_events = {}
@staticmethod
def _on_connect():
"""Define a handler to fire when the websocket is connected."""
_LOGGER.info("Connected to websocket")
@staticmethod
def _on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
_LOGGER.info("Disconnected from websocket")
def _on_event(self, event):
"""Define a handler to fire when a new SimpliSafe event arrives."""
_LOGGER.debug("New websocket event: %s", event)
self.last_events[event.system_id] = event
async_dispatcher_send(self._hass, TOPIC_UPDATE.format(event.system_id))
if event.event_type not in WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_EVENT,
event_data={
ATTR_LAST_EVENT_CHANGED_BY: event.changed_by,
ATTR_LAST_EVENT_TYPE: event.event_type,
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_SERIAL: event.sensor_serial,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_SYSTEM_ID: event.system_id,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
},
)
async def async_websocket_connect(self):
"""Register handlers and connect to the websocket."""
self._websocket.on_connect(self._on_connect)
self._websocket.on_disconnect(self._on_disconnect)
self._websocket.on_event(self._on_event)
await self._websocket.async_connect()
class SimpliSafe:
"""Define a SimpliSafe data object."""
def __init__(self, hass, api, config_entry):
"""Initialize."""
self._api = api
self._config_entry = config_entry
self._emergency_refresh_token_used = False
self._hass = hass
self._system_notifications = {}
self.options = config_entry.options or {}
self.initial_event_to_use = {}
self.systems = {}
self.websocket = SimpliSafeWebsocket(hass, api.websocket)
@callback
def _async_process_new_notifications(self, system):
"""Act on any new system notifications."""
old_notifications = self._system_notifications.get(system.system_id, [])
latest_notifications = system.notifications
# Save the latest notifications:
self._system_notifications[system.system_id] = latest_notifications
# Process any notifications that are new:
to_add = set(latest_notifications) - set(old_notifications)
if not to_add:
return
_LOGGER.debug("New system notifications: %s", to_add)
for notification in to_add:
text = notification.text
if notification.link:
text = f"{text} For more information: {notification.link}"
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_NOTIFICATION,
event_data={
ATTR_CATEGORY: notification.category,
ATTR_CODE: notification.code,
ATTR_MESSAGE: text,
ATTR_TIMESTAMP: notification.timestamp,
},
)
async def async_init(self):
"""Initialize the data class."""
asyncio.create_task(self.websocket.async_websocket_connect())
self.systems = await self._api.get_systems()
for system in self.systems.values():
self._hass.async_create_task(
async_register_base_station(
self._hass, system, self._config_entry.entry_id
)
)
# Future events will come from the websocket, but since subscription to the
# websocket doesn't provide the most recent event, we grab it from the REST
# API to ensure event-related attributes aren't empty on startup:
try:
self.initial_event_to_use[
system.system_id
] = await system.get_latest_event()
except SimplipyError as err:
_LOGGER.error("Error while fetching initial event: %s", err)
self.initial_event_to_use[system.system_id] = {}
async def refresh(event_time):
"""Refresh data from the SimpliSafe account."""
await self.async_update()
self._hass.data[DOMAIN][DATA_LISTENER][
self._config_entry.entry_id
] = async_track_time_interval(self._hass, refresh, DEFAULT_SCAN_INTERVAL)
await self.async_update()
async def async_update(self):
"""Get updated data from SimpliSafe."""
async def update_system(system):
"""Update a system."""
await system.update()
self._async_process_new_notifications(system)
_LOGGER.debug('Updated REST API data for "%s"', system.address)
async_dispatcher_send(self._hass, TOPIC_UPDATE.format(system.system_id))
tasks = [update_system(system) for system in self.systems.values()]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, InvalidCredentialsError):
if self._emergency_refresh_token_used:
_LOGGER.error(
"SimpliSafe authentication disconnected. Please restart HASS."
)
remove_listener = self._hass.data[DOMAIN][DATA_LISTENER].pop(
self._config_entry.entry_id
)
remove_listener()
return
_LOGGER.warning("SimpliSafe cloud error; trying stored refresh token")
self._emergency_refresh_token_used = True
return await self._api.refresh_access_token(
self._config_entry.data[CONF_TOKEN]
)
if isinstance(result, SimplipyError):
_LOGGER.error("SimpliSafe error while updating: %s", result)
return
if isinstance(result, SimplipyError):
_LOGGER.error("Unknown error while updating: %s", result)
return
if self._api.refresh_token != self._config_entry.data[CONF_TOKEN]:
_async_save_refresh_token(
self._hass, self._config_entry, self._api.refresh_token
)
# If we've reached this point using an emergency refresh token, we're in the
# clear and we can discard it:
if self._emergency_refresh_token_used:
self._emergency_refresh_token_used = False
class SimpliSafeEntity(Entity):
"""Define a base SimpliSafe entity."""
def __init__(self, simplisafe, system, name, *, serial=None):
"""Initialize."""
self._async_unsub_dispatcher_connect = None
self._last_processed_websocket_event = None
self._name = name
self._online = True
self._simplisafe = simplisafe
self._system = system
self.websocket_events_to_listen_for = [
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
]
if serial:
self._serial = serial
else:
self._serial = system.serial
self._attrs = {
ATTR_LAST_EVENT_INFO: simplisafe.initial_event_to_use[system.system_id].get(
"info"
),
ATTR_LAST_EVENT_SENSOR_NAME: simplisafe.initial_event_to_use[
system.system_id
].get("sensorName"),
ATTR_LAST_EVENT_SENSOR_TYPE: simplisafe.initial_event_to_use[
system.system_id
].get("sensorType"),
ATTR_LAST_EVENT_TIMESTAMP: simplisafe.initial_event_to_use[
system.system_id
].get("eventTimestamp"),
ATTR_SYSTEM_ID: system.system_id,
}
@property
def available(self):
"""Return whether the entity is available."""
# We can easily detect if the V3 system is offline, but no simple check exists
# for the V2 system. Therefore, we mark the entity as available if:
# 1. We can verify that the system is online (assuming True if we can't)
# 2. We can verify that the entity is online
system_offline = self._system.version == 3 and self._system.offline
return not system_offline and self._online
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._system.system_id)},
"manufacturer": "SimpliSafe",
"model": self._system.version,
"name": self._name,
"via_device": (DOMAIN, self._system.serial),
}
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the entity."""
return f"{self._system.address} {self._name}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._serial
@callback
def _async_should_ignore_websocket_event(self, event):
"""Return whether this entity should ignore a particular websocket event.
Note that we can't check for a final condition – whether the event belongs to
a particular entity, like a lock – because some events (like arming the system
from a keypad _or_ from the website) should impact the same entity.
"""
# We've already processed this event:
if self._last_processed_websocket_event == event:
return True
# This is an event for a system other than the one this entity belongs to:
if event.system_id != self._system.system_id:
return True
# This isn't an event that this entity cares about:
if event.event_type not in self.websocket_events_to_listen_for:
return True
# This event is targeted at a specific entity whose serial number is different
# from this one's:
if (
event.event_type in WEBSOCKET_EVENTS_REQUIRING_SERIAL
and event.sensor_serial != self._serial
):
return True
return False
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.update_from_latest_data()
self.async_write_ha_state()
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE.format(self._system.system_id), update
)
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the entity."""
self.async_update_from_rest_api()
last_websocket_event = self._simplisafe.websocket.last_events.get(
self._system.system_id
)
if self._async_should_ignore_websocket_event(last_websocket_event):
return
self._last_processed_websocket_event = last_websocket_event
if last_websocket_event.sensor_type:
sensor_type = last_websocket_event.sensor_type.name
else:
sensor_type = None
self._attrs.update(
{
ATTR_LAST_EVENT_INFO: last_websocket_event.info,
ATTR_LAST_EVENT_SENSOR_NAME: last_websocket_event.sensor_name,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_LAST_EVENT_TIMESTAMP: last_websocket_event.timestamp,
}
)
self._async_internal_update_from_websocket_event(last_websocket_event)
@callback
def async_update_from_rest_api(self):
"""Update the entity with the provided REST API data."""
@callback
def _async_internal_update_from_websocket_event(self, event):
"""Check for connection events and set offline appropriately.
Should not be called directly.
"""
if event.event_type == EVENT_CONNECTION_LOST:
self._online = False
elif event.event_type == EVENT_CONNECTION_RESTORED:
self._online = True
# It's uncertain whether SimpliSafe events will still propagate down the
# websocket when the base station is offline. Just in case, we guard against
# further action until connection is restored:
if not self._online:
return
self.async_update_from_websocket_event(event)
@callback
def async_update_from_websocket_event(self, event):
"""Update the entity with the provided websocket API data."""
async def async_will_remove_from_hass(self) -> None:
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
|
the-stack_106_14707
|
from typing import TYPE_CHECKING
from grouper.usecases.interfaces import AuditLogInterface
if TYPE_CHECKING:
from datetime import datetime
from grouper.entities.audit_log_entry import AuditLogEntry
from grouper.entities.group_request import GroupRequestStatus, UserGroupRequest
from grouper.repositories.audit_log import AuditLogRepository
from grouper.usecases.authorization import Authorization
from typing import List, Optional
class AuditLogService(AuditLogInterface):
"""Updates the audit log when changes are made.
The date parameter to the log methods is primarily for use in tests, where to get a consistent
sort order the audit log entries may need to be spaced out over time. If not set, the default
is the current time.
"""
def __init__(self, audit_log_repository):
# type: (AuditLogRepository) -> None
self.audit_log_repository = audit_log_repository
def entries_affecting_group(self, group, limit):
# type: (str, int) -> List[AuditLogEntry]
return self.audit_log_repository.entries_affecting_group(group, limit)
def entries_affecting_permission(self, permission, limit):
# type: (str, int) -> List[AuditLogEntry]
return self.audit_log_repository.entries_affecting_permission(permission, limit)
def entries_affecting_user(self, user, limit):
# type: (str, int) -> List[AuditLogEntry]
return self.audit_log_repository.entries_affecting_user(user, limit)
def log_create_service_account(self, service, owner, authorization, date=None):
# type: (str, str, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="create_service_account",
description="Created new service account",
on_group=owner,
on_user=service,
date=date,
)
def log_create_service_account_from_disabled_user(self, user, authorization, date=None):
# type: (str, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="create_service_account_from_disabled_user",
description="Convert a disabled user into a disabled service account",
on_user=user,
date=date,
)
def log_create_permission(self, permission, authorization, date=None):
# type: (str, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="create_permission",
description="Created permission",
on_permission=permission,
date=date,
)
def log_disable_permission(self, permission, authorization, date=None):
# type: (str, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="disable_permission",
description="Disabled permission",
on_permission=permission,
date=date,
)
def log_disable_user(self, username, authorization, date=None):
# type: (str, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="disable_user",
description="Disabled user",
on_user=username,
date=date,
)
def log_enable_service_account(self, user, owner, authorization, date=None):
# type: (str, str, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="enable_service_account",
description="Enabled service account",
on_user=user,
on_group=owner,
date=date,
)
def log_revoke_group_permission_grant(
self,
group, # type: str
permission, # type: str
argument, # type: str
authorization, # type: Authorization
date=None, # type: Optional[datetime]
):
# type: (...) -> None
self.audit_log_repository.log(
authorization=authorization,
action="revoke_permission",
description="Revoked permission with argument: {}".format(argument),
on_group=group,
on_permission=permission,
)
def log_revoke_service_account_permission_grant(
self,
service_account, # type: str
permission, # type: str
argument, # type: str
authorization, # type: Authorization
date=None, # type: Optional[datetime]
):
# type: (...) -> None
self.audit_log_repository.log(
authorization=authorization,
action="revoke_permission",
description="Revoked permission with argument: {}".format(argument),
on_permission=permission,
on_user=service_account,
)
def log_user_group_request_status_change(self, request, status, authorization, date=None):
# type: (UserGroupRequest, GroupRequestStatus, Authorization, Optional[datetime]) -> None
self.audit_log_repository.log(
authorization=authorization,
action="update_request",
description="Updated request to status: {}".format(status.value),
on_group=request.group,
on_user=request.requester,
date=date,
)
|
the-stack_106_14710
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
openapi_spec_methods_override = {
"get": {"get": {"description": "Get query detail information."}},
"get_list": {
"get": {
"description": "Get a list of queries, use Rison or JSON query "
"parameters for filtering, sorting, pagination and "
" for selecting specific columns and metadata.",
}
},
}
""" Overrides GET methods OpenApi descriptions """
|
the-stack_106_14713
|
"""LInked List sparse matrix class
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['lil_matrix','isspmatrix_lil']
import numpy as np
from scipy._lib.six import xrange
from .base import spmatrix, isspmatrix
from .sputils import (getdtype, isshape, isscalarlike, IndexMixin,
upcast_scalar, get_index_dtype, isintlike)
from . import _csparsetools
class lil_matrix(spmatrix, IndexMixin):
"""Row-based linked list sparse matrix
This is a structure for constructing sparse matrices incrementally.
Note that inserting a single item can take linear time in the worst case;
to construct a matrix efficiently, make sure the items are pre-sorted by
index, per row.
This can be instantiated in several ways:
lil_matrix(D)
with a dense matrix or rank-2 ndarray D
lil_matrix(S)
with another sparse matrix S (equivalent to S.tolil())
lil_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
LIL format data array of the matrix
rows
LIL format row index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the LIL format
- supports flexible slicing
- changes to the matrix sparsity structure are efficient
Disadvantages of the LIL format
- arithmetic operations LIL + LIL are slow (consider CSR or CSC)
- slow column slicing (consider CSC)
- slow matrix vector products (consider CSR or CSC)
Intended Usage
- LIL is a convenient format for constructing sparse matrices
- once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- consider using the COO format when constructing large matrices
Data Structure
- An array (``self.rows``) of rows, each of which is a sorted
list of column indices of non-zero elements.
- The corresponding nonzero values are stored in similar
fashion in ``self.data``.
"""
format = 'lil'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
spmatrix.__init__(self)
self.dtype = getdtype(dtype, arg1, default=float)
# First get the shape
if isspmatrix(arg1):
if isspmatrix_lil(arg1) and copy:
A = arg1.copy()
else:
A = arg1.tolil()
if dtype is not None:
A = A.astype(dtype)
self.shape = A.shape
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
elif isinstance(arg1,tuple):
if isshape(arg1):
if shape is not None:
raise ValueError('invalid use of shape parameter')
M, N = arg1
self.shape = (M,N)
self.rows = np.empty((M,), dtype=object)
self.data = np.empty((M,), dtype=object)
for i in range(M):
self.rows[i] = []
self.data[i] = []
else:
raise TypeError('unrecognized lil_matrix constructor usage')
else:
# assume A is dense
try:
A = np.asmatrix(arg1)
except TypeError:
raise TypeError('unsupported matrix type')
else:
from .csr import csr_matrix
A = csr_matrix(A, dtype=dtype).tolil()
self.shape = A.shape
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
def set_shape(self,shape):
shape = tuple(shape)
if len(shape) != 2:
raise ValueError("Only two-dimensional sparse arrays "
"are supported.")
try:
shape = int(shape[0]),int(shape[1]) # floats, other weirdness
except:
raise TypeError('invalid shape')
if not (shape[0] >= 0 and shape[1] >= 0):
raise ValueError('invalid shape')
if (self._shape != shape) and (self._shape is not None):
try:
self = self.reshape(shape)
except NotImplementedError:
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
self._shape = shape
shape = property(fget=spmatrix.get_shape, fset=set_shape)
def __iadd__(self,other):
self[:,:] = self + other
return self
def __isub__(self,other):
self[:,:] = self - other
return self
def __imul__(self,other):
if isscalarlike(other):
self[:,:] = self * other
return self
else:
return NotImplemented
def __itruediv__(self,other):
if isscalarlike(other):
self[:,:] = self / other
return self
else:
return NotImplemented
# Whenever the dimensions change, empty lists should be created for each
# row
def getnnz(self, axis=None):
if axis is None:
return sum([len(rowvals) for rowvals in self.data])
if axis < 0:
axis += 2
if axis == 0:
out = np.zeros(self.shape[1], dtype=np.intp)
for row in self.rows:
out[row] += 1
return out
elif axis == 1:
return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
else:
raise ValueError('axis out of bounds')
def count_nonzero(self):
return sum(np.count_nonzero(rowvals) for rowvals in self.data)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def __str__(self):
val = ''
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
return val[:-1]
def getrowview(self, i):
"""Returns a view of the 'i'th row (without copying).
"""
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i]
new.data[0] = self.data[i]
return new
def getrow(self, i):
"""Returns a copy of the 'i'th row.
"""
i = self._check_row_bounds(i)
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i][:]
new.data[0] = self.data[i][:]
return new
def _check_row_bounds(self, i):
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('row index out of bounds')
return i
def _check_col_bounds(self, j):
if j < 0:
j += self.shape[1]
if j < 0 or j >= self.shape[1]:
raise IndexError('column index out of bounds')
return j
def __getitem__(self, index):
"""Return the element(s) index=(i, j), where j may be a slice.
This always returns a copy for consistency, since slices into
Python lists return copies.
"""
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Other types are
# handled below.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
# Utilities found in IndexMixin
i, j = self._unpack_index(index)
# Proper check for other scalar index types
i_intlike = isintlike(i)
j_intlike = isintlike(j)
if i_intlike and j_intlike:
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
elif j_intlike or isinstance(j, slice):
# column slicing fast path
if j_intlike:
j = self._check_col_bounds(j)
j = slice(j, j+1)
if i_intlike:
i = self._check_row_bounds(i)
i = xrange(i, i+1)
i_shape = None
elif isinstance(i, slice):
i = xrange(*i.indices(self.shape[0]))
i_shape = None
else:
i = np.atleast_1d(i)
i_shape = i.shape
if i_shape is None or len(i_shape) == 1:
return self._get_row_ranges(i, j)
i, j = self._index_to_arrays(i, j)
if i.size == 0:
return lil_matrix(i.shape, dtype=self.dtype)
new = lil_matrix(i.shape, dtype=self.dtype)
i, j = _prepare_index_for_memoryview(i, j)
_csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
i, j)
return new
def _get_row_ranges(self, rows, col_slice):
"""
Fast path for indexing in the case where column index is slice.
This gains performance improvement over brute force by more
efficient skipping of zeros, by accessing the elements
column-wise in order.
Parameters
----------
rows : sequence or xrange
Rows indexed. If xrange, must be within valid bounds.
col_slice : slice
Columns indexed
"""
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
col_range = xrange(j_start, j_stop, j_stride)
nj = len(col_range)
new = lil_matrix((len(rows), nj), dtype=self.dtype)
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
rows,
j_start, j_stop, j_stride, nj)
return new
def __setitem__(self, index, x):
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Scalar index
# assignment for other types is handled below together
# with fancy indexing.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
x = self.dtype.type(x)
if x.size > 1:
# Triggered if input was an ndarray
raise ValueError("Trying to assign a sequence to an item")
_csparsetools.lil_insert(self.shape[0], self.shape[1],
self.rows, self.data, i, j, x)
return
# General indexing
i, j = self._unpack_index(index)
# shortcut for common case of full matrix assign:
if (isspmatrix(x) and isinstance(i, slice) and i == slice(None) and
isinstance(j, slice) and j == slice(None)
and x.shape == self.shape):
x = lil_matrix(x, dtype=self.dtype)
self.rows = x.rows
self.data = x.data
return
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
x = x.toarray()
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
# Set values
i, j, x = _prepare_index_for_memoryview(i, j, x)
_csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
self.rows, self.data,
i, j, x)
def _mul_scalar(self, other):
if other == 0:
# Multiply by zero: return the zero matrix
new = lil_matrix(self.shape, dtype=self.dtype)
else:
res_dtype = upcast_scalar(self.dtype, other)
new = self.copy()
new = new.astype(res_dtype)
# Multiply this scalar by every element.
for j, rowvals in enumerate(new.data):
new.data[j] = [val*other for val in rowvals]
return new
def __truediv__(self, other): # self / other
if isscalarlike(other):
new = self.copy()
# Divide every element by this scalar
for j, rowvals in enumerate(new.data):
new.data[j] = [val/other for val in rowvals]
return new
else:
return self.tocsr() / other
def copy(self):
from copy import deepcopy
new = lil_matrix(self.shape, dtype=self.dtype)
new.data = deepcopy(self.data)
new.rows = deepcopy(self.rows)
return new
copy.__doc__ = spmatrix.copy.__doc__
def reshape(self,shape):
new = lil_matrix(shape, dtype=self.dtype)
j_max = self.shape[1]
for i,row in enumerate(self.rows):
for col,j in enumerate(row):
new_r,new_c = np.unravel_index(i*j_max + j,shape)
new[new_r,new_c] = self[i,j]
return new
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
d = self._process_toarray_args(order, out)
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
d[i, j] = self.data[i][pos]
return d
def transpose(self):
return self.tocsr().transpose().tolil()
def tolil(self, copy=False):
if copy:
return self.copy()
else:
return self
tolil.__doc__ = spmatrix.tolil.__doc__
def tocsr(self, copy=False):
lst = [len(x) for x in self.rows]
idx_dtype = get_index_dtype(maxval=max(self.shape[1], sum(lst)))
indptr = np.asarray(lst, dtype=idx_dtype)
indptr = np.concatenate((np.array([0], dtype=idx_dtype),
np.cumsum(indptr, dtype=idx_dtype)))
indices = []
for x in self.rows:
indices.extend(x)
indices = np.asarray(indices, dtype=idx_dtype)
data = []
for x in self.data:
data.extend(x)
data = np.asarray(data, dtype=self.dtype)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def _prepare_index_for_memoryview(i, j, x=None):
"""
Convert index and data arrays to form suitable for passing to the
Cython fancy getset routines.
The conversions are necessary since to (i) ensure the integer
index arrays are in one of the accepted types, and (ii) to ensure
the arrays are writable so that Cython memoryview support doesn't
choke on them.
Parameters
----------
i, j
Index arrays
x : optional
Data arrays
Returns
-------
i, j, x
Re-formatted arrays (x is omitted, if input was None)
"""
if i.dtype > j.dtype:
j = j.astype(i.dtype)
elif i.dtype < j.dtype:
i = i.astype(j.dtype)
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
i = i.astype(np.intp)
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
j = j.astype(np.intp)
if x is not None:
if not x.flags.writeable:
x = x.copy()
return i, j, x
else:
return i, j
def isspmatrix_lil(x):
return isinstance(x, lil_matrix)
|
the-stack_106_14715
|
#-----------------------------------
# GLOBAL FEATURE EXTRACTION
#-----------------------------------
# organize imports
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import mahotas
import cv2
import os
import h5py
# fixed-sizes for image
fixed_size = tuple((500, 500))
# path to training data
train_path = "dataset/train"
# no.of.trees for Random Forests
num_trees = 100
# bins for histogram
bins = 8
# train_test_split size
test_size = 0.10
# seed for reproducing same results
seed = 9
# feature-descriptor-1: Hu Moments
def fd_hu_moments(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
feature = cv2.HuMoments(cv2.moments(image)).flatten()
return feature
# feature-descriptor-2: Haralick Texture
def fd_haralick(image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# compute the haralick texture feature vector
haralick = mahotas.features.haralick(gray).mean(axis=0)
# return the result
return haralick
# feature-descriptor-3: Color Histogram
def fd_histogram(image, mask=None):
# convert the image to HSV color-space
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# compute the color histogram
hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256])
# normalize the histogram
cv2.normalize(hist, hist)
# return the histogram
return hist.flatten()
# get the training labels
train_labels = os.listdir(train_path)
# sort the training labels
train_labels.sort()
print(train_labels)
# empty lists to hold feature vectors and labels
global_features = []
labels = []
i, j = 0, 0
k = 0
# num of images per class
images_per_class = 778
# loop over the training data sub-folders
for training_name in train_labels:
# join the training data path and each species training folder
dir = os.path.join(train_path, training_name)
# get the current training label
current_label = training_name
k = 1
# loop over the images in each sub-folder
for x in range(1,images_per_class+1):
# get the image file name
file = dir + "/" + str(x) + ".png"
# read the image and resize it to a fixed-size
image = cv2.imread(file)
image = cv2.resize(image, fixed_size)
####################################
# Global Feature extraction
####################################
fv_hu_moments = fd_hu_moments(image)
fv_haralick = fd_haralick(image)
fv_histogram = fd_histogram(image)
###################################
# Concatenate global features
###################################
global_feature = np.hstack([fv_histogram, fv_haralick, fv_hu_moments])
# update the list of labels and feature vectors
labels.append(current_label)
global_features.append(global_feature)
i += 1
k += 1
print( "[STATUS] processed folder: {}".format(current_label))
j += 1
print ("[STATUS] completed Global Feature Extraction...")
# get the overall feature vector size
print ("[STATUS] feature vector size {}".format(np.array(global_features).shape))
# get the overall training label size
print ("[STATUS] training Labels {}".format(np.array(labels).shape))
# encode the target labels
targetNames = np.unique(labels)
le = LabelEncoder()
target = le.fit_transform(labels)
print ("[STATUS] training labels encoded...")
# normalize the feature vector in the range (0-1)
scaler = MinMaxScaler(feature_range=(0, 1))
rescaled_features = scaler.fit_transform(global_features)
print ("[STATUS] feature vector normalized...")
print ("[STATUS] target labels: {}".format(target))
print ("[STATUS] target labels shape: {}".format(target.shape))
# save the feature vector using HDF5
h5f_data = h5py.File('output/data.h5', 'w')
h5f_data.create_dataset('dataset_1', data=np.array(rescaled_features))
h5f_label = h5py.File('output/labels.h5', 'w')
h5f_label.create_dataset('dataset_1', data=np.array(target))
h5f_data.close()
h5f_label.close()
print ("[STATUS] end of training..")
|
the-stack_106_14717
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.utils.display import Display
try:
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
except ImportError:
from ansible.module_utils.network.common.utils import load_provider
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import f5_provider_spec
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
socket_path = None
transport = 'rest'
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning("'provider' is unnecessary when using 'network_cli' and will be ignored")
elif self._play_context.connection == 'local':
provider = load_provider(f5_provider_spec, self._task.args)
transport = provider['transport'] or transport
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'bigiq'
pc.remote_addr = provider.get('server', self._play_context.remote_addr)
pc.port = int(provider['server_port'] or self._play_context.port or 22)
pc.remote_user = provider.get('user', self._play_context.connection_user)
pc.password = provider.get('password', self._play_context.password)
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'Unable to open shell. Please see: '
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while '(config' in to_text(out, errors='surrogate_then_replace').strip():
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(tmp, task_vars)
return result
|
the-stack_106_14718
|
#!/usr/bin/env python
"""The setup script."""
from os.path import exists
from setuptools import find_packages, setup
import versioneer
readme = open("README.rst").read() if exists("README.rst") else ""
requirements = ["click", "docker"]
setup(
name="pbs-ci",
description="Continuous integration utility for PBS",
long_description=readme,
maintainer="Anderson Banihirwe",
maintainer_email="[email protected]",
url="https://github.com/NCAR/pbs-ci",
packages=find_packages(),
package_dir={"pbs-ci": "pbs-ci"},
include_package_data=True,
install_requires=requirements,
license="Apache 2.0",
zip_safe=False,
keywords="pbs-ci",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
entry_points="""
[console_scripts]
pbs-ci-compose=pbs_ci.compose:docker_compose
pbs-ci-execute=pbs_ci.execute:main
""",
)
|
the-stack_106_14719
|
#!/usr/bin/env python3
import sys
import os
import argparse
import yaml
import math
from math import sqrt
import warnings
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from KicadModTree import * # NOQA
from KicadModTree.nodes.base.Pad import Pad # NOQA
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
from ipc_pad_size_calculators import *
from quad_dual_pad_border import add_dual_or_quad_pad_border
sys.path.append(os.path.join(sys.path[0], "..", "utils"))
from ep_handling_utils import getEpRoundRadiusParams
ipc_density = 'nominal'
ipc_doc_file = '../ipc_definitions.yaml'
category = 'NoLead'
default_library = 'Package_DFN_QFN'
DEFAULT_PASTE_COVERAGE = 0.65
DEFAULT_VIA_PASTE_CLEARANCE = 0.15
DEFAULT_MIN_ANNULAR_RING = 0.15
SILK_MIN_LEN = 0.1
DEBUG_LEVEL = 0
def roundToBase(value, base):
return round(value / base) * base
class NoLead():
def __init__(self, configuration):
self.configuration = configuration
with open(ipc_doc_file, 'r') as ipc_stream:
try:
self.ipc_defintions = yaml.safe_load(ipc_stream)
self.configuration['min_ep_to_pad_clearance'] = 0.2
# ToDo: find a settings file that can contain these.
self.configuration['paste_radius_ratio'] = 0.25
self.configuration['paste_maximum_radius'] = 0.25
if 'ipc_generic_rules' in self.ipc_defintions:
self.configuration['min_ep_to_pad_clearance'] = self.ipc_defintions['ipc_generic_rules'].get(
'min_ep_to_pad_clearance', 0.2)
except yaml.YAMLError as exc:
print(exc)
def calcPadDetails(self, device_dimensions, EP_size, ipc_data, ipc_round_base):
# Zmax = Lmin + 2JT + √(CL^2 + F^2 + P^2)
# Gmin = Smax − 2JH − √(CS^2 + F^2 + P^2)
# Xmax = Wmin + 2JS + √(CW^2 + F^2 + P^2)
# Some manufacturers do not list the terminal spacing (S) in their datasheet but list the terminal length (T)
# Then one can calculate
# Stol(RMS) = √(Ltol^2 + 2*^2)
# Smin = Lmin - 2*Tmax
# Smax(RMS) = Smin + Stol(RMS)
manf_tol = {
'F': self.configuration.get('manufacturing_tolerance', 0.1),
'P': self.configuration.get('placement_tolerance', 0.05)
}
pull_back_0 = TolerancedSize(nominal=0)
pull_back = device_dimensions.get('lead_to_edge', pull_back_0)
if 'lead_center_pos_x' in device_dimensions or 'lead_center_pos_y' in device_dimensions:
Gmin_x, Zmax_x, Xmax = ipc_pad_center_plus_size(ipc_data, ipc_round_base, manf_tol,
center_position=device_dimensions.get(
'lead_center_pos_x', TolerancedSize(nominal=0)),
lead_length=device_dimensions.get('lead_len_H'),
lead_width=device_dimensions['lead_width'])
Gmin_y, Zmax_y, Xmax_y_ignored = ipc_pad_center_plus_size(ipc_data, ipc_round_base, manf_tol,
center_position=device_dimensions.get(
'lead_center_pos_y', TolerancedSize(nominal=0)),
lead_length=device_dimensions.get('lead_len_H'),
lead_width=device_dimensions['lead_width'])
else:
Gmin_x, Zmax_x, Xmax = ipc_body_edge_inside_pull_back(
ipc_data, ipc_round_base, manf_tol,
body_size=device_dimensions['body_size_x'],
lead_width=device_dimensions['lead_width'],
lead_len=device_dimensions.get('lead_len_H'),
body_to_inside_lead_edge=device_dimensions.get('body_to_inside_lead_edge'),
heel_reduction=device_dimensions.get('heel_reduction', 0),
pull_back=pull_back
)
Gmin_y, Zmax_y, Xmax_y_ignored = ipc_body_edge_inside_pull_back(
ipc_data, ipc_round_base, manf_tol,
body_size=device_dimensions['body_size_y'],
lead_width=device_dimensions['lead_width'],
lead_len=device_dimensions.get('lead_len_V'),
body_to_inside_lead_edge=device_dimensions.get('body_to_inside_lead_edge'),
heel_reduction=device_dimensions.get('heel_reduction', 0),
pull_back=pull_back
)
min_ep_to_pad_clearance = self.configuration['min_ep_to_pad_clearance']
heel_reduction_max = 0
if Gmin_x - 2 * min_ep_to_pad_clearance < EP_size['x']:
heel_reduction_max = ((EP_size['x'] + 2 * min_ep_to_pad_clearance - Gmin_x) / 2)
#print('{}, {}, {}'.format(Gmin_x, EP_size['x'], min_ep_to_pad_clearance))
Gmin_x = EP_size['x'] + 2 * min_ep_to_pad_clearance
if Gmin_y - 2 * min_ep_to_pad_clearance < EP_size['y']:
heel_reduction = ((EP_size['y'] + 2 * min_ep_to_pad_clearance - Gmin_y) / 2)
if heel_reduction > heel_reduction_max:
heel_reduction_max = heel_reduction
Gmin_y = EP_size['y'] + 2 * min_ep_to_pad_clearance
heel_reduction_max += device_dimensions.get('heel_reduction', 0) # include legacy stuff
if heel_reduction_max > 0 and DEBUG_LEVEL >= 1:
print('Heel reduced by {:.4f} to reach minimum EP to pad clearances'.format(heel_reduction_max))
Pad = {}
Pad['left'] = {'center': [-(Zmax_x + Gmin_x) / 4, 0], 'size': [(Zmax_x - Gmin_x) / 2, Xmax]}
Pad['right'] = {'center': [(Zmax_x + Gmin_x) / 4, 0], 'size': [(Zmax_x - Gmin_x) / 2, Xmax]}
Pad['top'] = {'center': [0, -(Zmax_y + Gmin_y) / 4], 'size': [Xmax, (Zmax_y - Gmin_y) / 2]}
Pad['bottom'] = {'center': [0, (Zmax_y + Gmin_y) / 4], 'size': [Xmax, (Zmax_y - Gmin_y) / 2]}
return Pad
@staticmethod
def deviceDimensions(device_size_data, fp_id):
unit = device_size_data.get('unit')
dimensions = {
'body_size_x': TolerancedSize.fromYaml(device_size_data, base_name='body_size_x', unit=unit),
'body_size_y': TolerancedSize.fromYaml(device_size_data, base_name='body_size_y', unit=unit),
'lead_width': TolerancedSize.fromYaml(device_size_data, base_name='lead_width', unit=unit),
'pitch': TolerancedSize.fromYaml(device_size_data, base_name='pitch', unit=unit).nominal
}
dimensions['has_EP'] = False
if 'EP_size_x_min' in device_size_data and 'EP_size_x_max' in device_size_data or 'EP_size_x' in device_size_data:
dimensions['EP_size_x'] = TolerancedSize.fromYaml(device_size_data, base_name='EP_size_x', unit=unit)
dimensions['EP_size_y'] = TolerancedSize.fromYaml(device_size_data, base_name='EP_size_y', unit=unit)
dimensions['has_EP'] = True
dimensions['EP_center_x'] = TolerancedSize(nominal=0)
dimensions['EP_center_y'] = TolerancedSize(nominal=0)
if 'EP_center_x' in device_size_data and 'EP_center_y' in device_size_data:
dimensions['EP_center_x'] = TolerancedSize.fromYaml(
device_size_data, base_name='EP_center_x', unit=unit)
dimensions['EP_center_y'] = TolerancedSize.fromYaml(
device_size_data, base_name='EP_center_y', unit=unit)
if 'heel_reduction' in device_size_data:
print(
"\033[1;35mThe use of manual heel reduction is deprecated. It is automatically calculated from the minimum EP to pad clearance (ipc config file)\033[0m"
)
dimensions['heel_reduction'] = device_size_data.get('heel_reduction', 0)
if 'lead_to_edge' in device_size_data:
dimensions['lead_to_edge'] = TolerancedSize.fromYaml(device_size_data, base_name='lead_to_edge', unit=unit)
if 'lead_center_pos_x' in device_size_data:
dimensions['lead_center_pos_x'] = TolerancedSize.fromYaml(
device_size_data, base_name='lead_center_pos_x', unit=unit)
if 'lead_center_to_center_x' in device_size_data:
dimensions['lead_center_pos_x'] = TolerancedSize.fromYaml(
device_size_data, base_name='lead_center_to_center_x', unit=unit) / 2
if 'lead_center_pos_y' in device_size_data:
dimensions['lead_center_pos_y'] = TolerancedSize.fromYaml(
device_size_data, base_name='lead_center_pos_y', unit=unit)
if 'lead_center_to_center_y' in device_size_data:
dimensions['lead_center_pos_y'] = TolerancedSize.fromYaml(
device_size_data, base_name='lead_center_to_center_y', unit=unit) / 2
dimensions['lead_len_H'] = None
dimensions['lead_len_V'] = None
if 'lead_len_H' in device_size_data and 'lead_len_V' in device_size_data:
dimensions['lead_len_H'] = TolerancedSize.fromYaml(device_size_data, base_name='lead_len_H', unit=unit)
dimensions['lead_len_V'] = TolerancedSize.fromYaml(device_size_data, base_name='lead_len_V', unit=unit)
elif 'lead_len' in device_size_data or (
'lead_len_min' in device_size_data and 'lead_len_max' in device_size_data):
dimensions['lead_len_H'] = TolerancedSize.fromYaml(device_size_data, base_name='lead_len', unit=unit)
dimensions['lead_len_V'] = dimensions['lead_len_H']
if 'body_to_inside_lead_edge' in device_size_data:
dimensions['body_to_inside_lead_edge'] = TolerancedSize.fromYaml(
device_size_data, base_name='body_to_inside_lead_edge', unit=unit)
elif dimensions['lead_len_H'] is None:
raise KeyError('{}: Either lead length or inside lead to edge dimension must be given.'.format(fp_id))
return dimensions
def generateFootprint(self, device_params, fp_id):
print('Building footprint for parameter set: {}'.format(fp_id))
device_dimensions = NoLead.deviceDimensions(device_params, fp_id)
if device_dimensions['has_EP'] and 'thermal_vias' in device_params:
self.__createFootprintVariant(device_params, device_dimensions, True)
self.__createFootprintVariant(device_params, device_dimensions, False)
def __createFootprintVariant(self, device_params, device_dimensions, with_thermal_vias):
fab_line_width = self.configuration.get('fab_line_width', 0.1)
silk_line_width = self.configuration.get('silk_line_width', 0.12)
lib_name = device_params.get('library', default_library)
pincount = device_params['num_pins_x'] * 2 + device_params['num_pins_y'] * 2
default_ipc_config = 'qfn_pull_back' if 'lead_to_edge' in device_params else 'qfn'
if device_params.get('ipc_class', default_ipc_config) == 'qfn_pull_back':
ipc_reference = 'ipc_spec_flat_no_lead_pull_back'
else:
ipc_reference = 'ipc_spec_flat_no_lead'
used_density = device_params.get('ipc_density', ipc_density)
ipc_data_set = self.ipc_defintions[ipc_reference][used_density]
ipc_round_base = self.ipc_defintions[ipc_reference]['round_base']
layout = ''
if device_dimensions['has_EP']:
name_format = self.configuration['fp_name_EP_format_string_no_trailing_zero']
if 'EP_size_x_overwrite' in device_params:
EP_size = {
'x': device_params['EP_size_x_overwrite'],
'y': device_params['EP_size_y_overwrite']
}
else:
EP_size = {
'x': device_dimensions['EP_size_x'].nominal,
'y': device_dimensions['EP_size_y'].nominal
}
EP_center = {
'x': device_dimensions['EP_center_x'].nominal,
'y': device_dimensions['EP_center_y'].nominal
}
else:
name_format = self.configuration['fp_name_format_string_no_trailing_zero']
if device_params.get('use_name_format', 'QFN') == 'LGA':
name_format = self.configuration['fp_name_lga_format_string_no_trailing_zero']
if device_params['num_pins_x'] > 0 and device_params['num_pins_y'] > 0:
layout = self.configuration['lga_layout_border'].format(
nx=device_params['num_pins_x'], ny=device_params['num_pins_y'])
EP_size = {'x': 0, 'y': 0}
if 'custom_name_format' in device_params:
name_format = device_params['custom_name_format']
pad_details = self.calcPadDetails(device_dimensions, EP_size, ipc_data_set, ipc_round_base)
pad_suffix = '_Pad{pad_x:.2f}x{pad_y:.2f}mm'.format(pad_x=pad_details['left']['size'][0],
pad_y=pad_details['left']['size'][1])
pad_suffix = '' if device_params.get('include_pad_size', 'none') not in ('fp_name_only', 'both') else pad_suffix
pad_suffix_3d = '' if device_params.get('include_pad_size', 'none') not in ('both') else pad_suffix
suffix = device_params.get('suffix', '')
suffix_3d = suffix if device_params.get('include_suffix_in_3dpath', 'True') == 'True' else ""
model3d_path_prefix = self.configuration.get('3d_model_prefix', '${KICAD6_3DMODEL_DIR}')
size_x = device_dimensions['body_size_x'].nominal
size_y = device_dimensions['body_size_y'].nominal
fp_name = name_format.format(
man=device_params.get('manufacturer', ''),
mpn=device_params.get('part_number', ''),
pkg=device_params['device_type'],
pincount=pincount,
size_y=size_y,
size_x=size_x,
pitch=device_dimensions['pitch'],
layout=layout,
ep_size_x=EP_size['x'],
ep_size_y=EP_size['y'],
suffix=pad_suffix,
suffix2=suffix,
vias=self.configuration.get('thermal_via_suffix', '_ThermalVias') if with_thermal_vias else ''
).replace('__', '_').lstrip('_')
fp_name_2 = name_format.format(
man=device_params.get('manufacturer', ''),
mpn=device_params.get('part_number', ''),
pkg=device_params['device_type'],
pincount=pincount,
size_y=size_y,
size_x=size_x,
pitch=device_dimensions['pitch'],
layout=layout,
ep_size_x=EP_size['x'],
ep_size_y=EP_size['y'],
suffix=pad_suffix_3d,
suffix2=suffix_3d,
vias=''
).replace('__', '_').lstrip('_')
if 'fp_name_prefix' in device_params:
prefix = device_params['fp_name_prefix']
if not prefix.endswith('_'):
prefix += '_'
fp_name = prefix + fp_name
fp_name_2 = prefix + fp_name_2
model_name = '{model3d_path_prefix:s}{lib_name:s}.3dshapes/{fp_name:s}.wrl'\
.format(
model3d_path_prefix=model3d_path_prefix, lib_name=lib_name,
fp_name=fp_name_2)
# print(fp_name)
# print(pad_details)
kicad_mod = Footprint(fp_name)
# init kicad footprint
kicad_mod.setDescription(
"{manufacturer} {mpn} {package}, {pincount} Pin ({datasheet}), generated with kicad-footprint-generator {scriptname}"
.format(
manufacturer=device_params.get('manufacturer', ''),
package=device_params['device_type'],
mpn=device_params.get('part_number', ''),
pincount=pincount,
datasheet=device_params['size_source'],
scriptname=os.path.basename(__file__).replace(" ", " ")
).lstrip())
kicad_mod.setTags(self.configuration['keyword_fp_string']
.format(
man=device_params.get('manufacturer', ''),
package=device_params['device_type'],
category=category
).lstrip())
kicad_mod.setAttribute('smd')
pad_radius = add_dual_or_quad_pad_border(kicad_mod, self.configuration, pad_details, device_params)
if device_dimensions['has_EP']:
pad_shape_details = getEpRoundRadiusParams(device_params, self.configuration, pad_radius)
ep_pad_number = device_params.get('EP_pin_number', pincount + 1)
if with_thermal_vias:
thermals = device_params['thermal_vias']
paste_coverage = thermals.get('EP_paste_coverage',
device_params.get('EP_paste_coverage', DEFAULT_PASTE_COVERAGE))
kicad_mod.append(ExposedPad(
number=ep_pad_number, size=EP_size,
at=EP_center,
paste_layout=thermals.get('EP_num_paste_pads', device_params.get('EP_num_paste_pads', 1)),
paste_coverage=paste_coverage,
via_layout=thermals.get('count', 0),
paste_between_vias=thermals.get('paste_between_vias'),
paste_rings_outside=thermals.get('paste_rings_outside'),
via_drill=thermals.get('drill', 0.3),
via_grid=thermals.get('grid'),
paste_avoid_via=thermals.get('paste_avoid_via', True),
via_paste_clarance=thermals.get('paste_via_clearance', DEFAULT_VIA_PASTE_CLEARANCE),
min_annular_ring=thermals.get('min_annular_ring', DEFAULT_MIN_ANNULAR_RING),
bottom_pad_min_size=thermals.get('bottom_min_size', 0),
kicad4_compatible=args.kicad4_compatible,
**pad_shape_details
))
else:
kicad_mod.append(ExposedPad(
number=ep_pad_number, size=EP_size,
at=EP_center,
paste_layout=device_params.get('EP_num_paste_pads', 1),
paste_coverage=device_params.get('EP_paste_coverage', DEFAULT_PASTE_COVERAGE),
kicad4_compatible=args.kicad4_compatible,
**pad_shape_details
))
body_edge = {
'left': -size_x / 2,
'right': size_x / 2,
'top': -size_y / 2,
'bottom': size_y / 2
}
bounding_box = body_edge.copy()
if device_params['num_pins_x'] == 0 and EP_size['y'] > size_y:
bounding_box['top'] = -EP_size['y'] / 2
bounding_box['bottom'] = EP_size['y'] / 2
if device_params['num_pins_y'] == 0 and EP_size['x'] > size_x:
bounding_box['left'] = -EP_size['x'] / 2
bounding_box['right'] = EP_size['x'] / 2
if device_params['num_pins_y'] > 0:
bounding_box['left'] = pad_details['left']['center'][0] - pad_details['left']['size'][0] / 2
bounding_box['right'] = pad_details['right']['center'][0] + pad_details['right']['size'][0] / 2
if device_params['num_pins_x'] > 0:
bounding_box['top'] = pad_details['top']['center'][1] - pad_details['top']['size'][1] / 2
bounding_box['bottom'] = pad_details['bottom']['center'][1] + pad_details['bottom']['size'][1] / 2
pad_width = pad_details['top']['size'][0]
for key in body_edge:
if bounding_box[key] < 0:
bounding_box[key] = min(bounding_box[key], body_edge[key])
else:
bounding_box[key] = max(bounding_box[key], body_edge[key])
# ############################ SilkS ##################################
silk_pad_offset = configuration['silk_pad_clearance'] + configuration['silk_line_width'] / 2
silk_offset = configuration['silk_fab_offset']
if device_params['num_pins_x'] == 0:
kicad_mod.append(Line(
start={'x': 0,
'y': body_edge['top'] - silk_offset},
end={'x': body_edge['right'],
'y': body_edge['top'] - silk_offset},
width=configuration['silk_line_width'],
layer="F.SilkS"))
kicad_mod.append(Line(
start={'x': body_edge['left'],
'y': body_edge['bottom'] + silk_offset},
end={'x': body_edge['right'],
'y': body_edge['bottom'] + silk_offset},
width=configuration['silk_line_width'],
layer="F.SilkS", y_mirror=0))
elif device_params['num_pins_y'] == 0:
kicad_mod.append(Line(
start={'y': 0,
'x': body_edge['left'] - silk_offset},
end={'y': body_edge['bottom'],
'x': body_edge['left'] - silk_offset},
width=configuration['silk_line_width'],
layer="F.SilkS"))
kicad_mod.append(Line(
start={'y': body_edge['top'],
'x': body_edge['right'] + silk_offset},
end={'y': body_edge['bottom'],
'x': body_edge['right'] + silk_offset},
width=configuration['silk_line_width'],
layer="F.SilkS", x_mirror=0))
else:
sx1 = -(device_dimensions['pitch'] * (device_params['num_pins_x'] - 1) / 2.0 +
pad_width / 2.0 + silk_pad_offset)
sy1 = -(device_dimensions['pitch'] * (device_params['num_pins_y'] - 1) / 2.0 +
pad_width / 2.0 + silk_pad_offset)
poly_silk = [
{'x': sx1, 'y': body_edge['top'] - silk_offset},
{'x': body_edge['left'] - silk_offset, 'y': body_edge['top'] - silk_offset},
{'x': body_edge['left'] - silk_offset, 'y': sy1}
]
if sx1 - SILK_MIN_LEN < body_edge['left'] - silk_offset:
poly_silk = poly_silk[1:]
if sy1 - SILK_MIN_LEN < body_edge['top'] - silk_offset:
poly_silk = poly_silk[:-1]
if len(poly_silk) > 1:
kicad_mod.append(PolygoneLine(
polygone=poly_silk,
width=configuration['silk_line_width'],
layer="F.SilkS", x_mirror=0))
kicad_mod.append(PolygoneLine(
polygone=poly_silk,
width=configuration['silk_line_width'],
layer="F.SilkS", y_mirror=0))
kicad_mod.append(PolygoneLine(
polygone=poly_silk,
width=configuration['silk_line_width'],
layer="F.SilkS", x_mirror=0, y_mirror=0))
if len(poly_silk) > 2:
kicad_mod.append(Line(
start={'x': sx1, 'y': body_edge['top'] - silk_offset},
end={'x': body_edge['left'] - silk_offset, 'y': body_edge['top'] - silk_offset},
width=configuration['silk_line_width'],
layer="F.SilkS"))
# # ######################## Fabrication Layer ###########################
fab_bevel_size = min(configuration['fab_bevel_size_absolute'],
configuration['fab_bevel_size_relative'] * min(size_x, size_y))
poly_fab = [
{'x': body_edge['left'] + fab_bevel_size, 'y': body_edge['top']},
{'x': body_edge['right'], 'y': body_edge['top']},
{'x': body_edge['right'], 'y': body_edge['bottom']},
{'x': body_edge['left'], 'y': body_edge['bottom']},
{'x': body_edge['left'], 'y': body_edge['top'] + fab_bevel_size},
{'x': body_edge['left'] + fab_bevel_size, 'y': body_edge['top']},
]
kicad_mod.append(PolygoneLine(
polygone=poly_fab,
width=configuration['fab_line_width'],
layer="F.Fab"))
# # ############################ CrtYd ##################################
off = ipc_data_set['courtyard']
grid = configuration['courtyard_grid']
cy1 = roundToBase(bounding_box['top'] - off, grid)
kicad_mod.append(RectLine(
start={
'x': roundToBase(bounding_box['left'] - off, grid),
'y': cy1
},
end={
'x': roundToBase(bounding_box['right'] + off, grid),
'y': roundToBase(bounding_box['bottom'] + off, grid)
},
width=configuration['courtyard_line_width'],
layer='F.CrtYd'))
# ######################### Text Fields ###############################
addTextFields(kicad_mod=kicad_mod, configuration=configuration, body_edges=body_edge,
courtyard={'top': cy1, 'bottom': -cy1}, fp_name=fp_name, text_y_inside_position='center')
##################### Output and 3d model ############################
kicad_mod.append(Model(filename=model_name))
output_dir = '{lib_name:s}.pretty/'.format(lib_name=lib_name)
if not os.path.isdir(output_dir): # returns false if path does not yet exist!! (Does not check path validity)
os.makedirs(output_dir)
filename = '{outdir:s}{fp_name:s}.kicad_mod'.format(outdir=output_dir, fp_name=fp_name)
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile(filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='use confing .yaml files to create footprints.')
parser.add_argument('files', metavar='file', type=str, nargs='+',
help='list of files holding information about what devices should be created.')
parser.add_argument('--global_config', type=str, nargs='?', help='the config file defining how the footprint will look like. (KLC)',
default='../../tools/global_config_files/config_KLCv3.0.yaml')
parser.add_argument('--series_config', type=str, nargs='?',
help='the config file defining series parameters.', default='../package_config_KLCv3.yaml')
parser.add_argument('--density', type=str, nargs='?', help='IPC density level (L,N,M)', default='N')
parser.add_argument('--ipc_doc', type=str, nargs='?', help='IPC definition document',
default='../ipc_definitions.yaml')
parser.add_argument('--force_rectangle_pads', action='store_true',
help='Force the generation of rectangle pads instead of rounded rectangle')
parser.add_argument('--kicad4_compatible', action='store_true', help='Create footprints kicad 4 compatible')
parser.add_argument('-v', '--verbose', action='count', help='set debug level')
args = parser.parse_args()
if args.density == 'L':
ipc_density = 'least'
elif args.density == 'M':
ipc_density = 'most'
if args.verbose:
DEBUG_LEVEL = args.verbose
ipc_doc_file = args.ipc_doc
with open(args.global_config, 'r') as config_stream:
try:
configuration = yaml.safe_load(config_stream)
except yaml.YAMLError as exc:
print(exc)
with open(args.series_config, 'r') as config_stream:
try:
configuration.update(yaml.safe_load(config_stream))
except yaml.YAMLError as exc:
print(exc)
if args.force_rectangle_pads or args.kicad4_compatible:
configuration['round_rect_max_radius'] = None
configuration['round_rect_radius_ratio'] = 0
configuration['kicad4_compatible'] = args.kicad4_compatible
for filepath in args.files:
no_lead = NoLead(configuration)
with open(filepath, 'r') as command_stream:
try:
cmd_file = yaml.safe_load(command_stream)
except yaml.YAMLError as exc:
print(exc)
for pkg in cmd_file:
no_lead.generateFootprint(cmd_file[pkg], pkg)
|
the-stack_106_14721
|
import numpy as np
from tensorflow.keras.datasets import mnist
from ournn.tools.preprocess import sparse_one_hot_encode
from ournn.tools.matrix_tools import *
from ournn.frame import skeleton
from ournn.Layer.layers import *
from ournn.optimizers import *
from ournn.losses import *
#无奈地拿起了tensorflow的数据
(x,y),(t,d)=mnist.load_data()
x=np.expand_dims(x,axis=-1)
y=y.reshape(-1,1)
x,y=x[0:400],y[0:400]
x=(x-x.max())/(x.max()-x.min())
#热编码
y=sparse_one_hot_encode(y)
#初始化框架
sk=skeleton(name="Model1",Regularization=None)
#将不同的层添加到框架中
sk.add(
[
Conv2d(kernal_size=(5,5),padding=True,stride=2,channel_in=1,channel_o=3),
Flatten(),
Fully_connected( output_dim=500,act="relu"),
Fully_connected( output_dim=100,act="relu"),
Fully_connected(output_dim=10,act="relu")
]
)
#优化器
optimizer=SGD(loss=sparse_softmax_cross_entropy(),sample_size=0.7,lr=1e-5)
#训练
history=sk.train(x,y,epoches=20,train_test_split=0.7,optimizer=optimizer)
#显示维度信息
sk.show_info()
#将损失以及精度绘图
sk.visualization()
|
the-stack_106_14723
|
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class EventStreamWidgetDefinitionType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'EVENT_STREAM': "event_stream",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""EventStreamWidgetDefinitionType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Type of the event stream widget.. if omitted defaults to "event_stream", must be one of ["event_stream", ] # noqa: E501
Keyword Args:
value (str): Type of the event stream widget.. if omitted defaults to "event_stream", must be one of ["event_stream", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
value = "event_stream"
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
|
the-stack_106_14726
|
# Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# https://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import warnings
from time import time
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix, issparse
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils._openmp_helpers import _openmp_effective_n_threads
from ..utils.validation import check_non_negative
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
# mypy error: Module 'sklearn.manifold' has no attribute '_utils'
from . import _utils # type: ignore
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from . import _barnes_hut_tsne # type: ignore
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : ndarray of shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose
)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : sparse matrix of shape (n_samples, n_samples)
Distances of samples to its n_neighbors nearest neighbors. All other
distances are left to zero (and are not materialized in memory).
Matrix should be of CSR format.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : sparse matrix of shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors. Matrix
will be of CSR format.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances.sort_indices()
n_samples = distances.shape[0]
distances_data = distances.data.reshape(n_samples, -1)
distances_data = distances_data.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances_data, desired_perplexity, verbose
)
assert np.all(np.isfinite(conditional_P)), "All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix(
(conditional_P.ravel(), distances.indices, distances.indptr),
shape=(n_samples, n_samples),
)
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s".format(duration))
return P
def _kl_divergence(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
skip_num_points=0,
compute_error=True,
):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.0
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if compute_error:
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
else:
kl_divergence = np.nan
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
angle=0.5,
skip_num_points=0,
verbose=False,
compute_error=True,
num_threads=1,
):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2).
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : sparse matrix of shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized. Matrix should be of CSR format.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float, default=0.5
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int, default=False
Verbosity level.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
num_threads : int, default=1
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(
val_P,
X_embedded,
neighbors,
indptr,
grad,
angle,
n_components,
verbose,
dof=degrees_of_freedom,
compute_error=compute_error,
num_threads=num_threads,
)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(
objective,
p0,
it,
n_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
check_convergence = (i + 1) % n_iter_check == 0
# only compute the error when needed
kwargs["compute_error"] = check_convergence or i == n_iter - 1
error, grad = objective(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print(
"[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration)
)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress)
)
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm)
)
break
return p, error, i
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
r"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
The number of neighbors that will be considered. Should be fewer than
`n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
mentioned in [1]_. An error will be raised otherwise.
metric : str or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, for a list
of available metrics, see the documentation of argument metric in
`sklearn.pairwise.pairwise_distances` and metrics listed in
`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
"cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
References
----------
.. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
Preservation in Nonlinear Projection Methods: An Experimental Study.
In Proceedings of the International Conference on Artificial Neural Networks
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
Local Structure. Proceedings of the Twelth International Conference on
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
"""
n_samples = X.shape[0]
if n_neighbors >= n_samples / 2:
raise ValueError(
f"n_neighbors ({n_neighbors}) should be less than n_samples / 2"
f" ({n_samples / 2})"
)
dist_X = pairwise_distances(X, metric=metric)
if metric == "precomputed":
dist_X = dist_X.copy()
# we set the diagonal to np.inf to exclude the points themselves from
# their own neighborhood
np.fill_diagonal(dist_X, np.inf)
ind_X = np.argsort(dist_X, axis=1)
# `ind_X[i]` is the index of sorted distances between i and other samples
ind_X_embedded = (
NearestNeighbors(n_neighbors=n_neighbors)
.fit(X_embedded)
.kneighbors(return_distance=False)
)
# We build an inverted index of neighbors in the input space: For sample i,
# we define `inverted_index[i]` as the inverted index of sorted distances:
# inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
inverted_index = np.zeros((n_samples, n_samples), dtype=int)
ordered_indices = np.arange(n_samples + 1)
inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]
ranks = (
inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors
)
t = np.sum(ranks[ranks > 0])
t = 1.0 - t * (
2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))
)
return t
class TSNE(BaseEstimator):
"""T-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, default=2
Dimension of the embedded space.
perplexity : float, default=30.0
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. Different values can result in significantly
different results. The perplexity must be less that the number
of samples.
early_exaggeration : float, default=12.0
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float or 'auto', default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
Note that many other t-SNE implementations (bhtsne, FIt-SNE, openTSNE,
etc.) use a definition of learning_rate that is 4 times smaller than
ours. So our learning_rate=200 corresponds to learning_rate=800 in
those other implementations. The 'auto' option sets the learning_rate
to `max(N / early_exaggeration / 4, 50)` where N is the sample size,
following [4] and [5]. This will become default in 1.2.
n_iter : int, default=1000
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 1.1
init : {'random', 'pca'} or ndarray of shape (n_samples, n_components), \
default='random'
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization. `init='pca'`
will become default in 1.2.
verbose : int, default=0
Verbosity level.
random_state : int, RandomState instance or None, default=None
Determines the random number generator. Pass an int for reproducible
results across multiple function calls. Note that different
initializations might result in different local minima of the cost
function. See :term:`Glossary <random_state>`.
method : str, default='barnes_hut'
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float, default=0.5
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. This parameter
has no impact when ``metric="precomputed"`` or
(``metric="euclidean"`` and ``method="exact"``).
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.22
square_distances : True, default='deprecated'
This parameter has no effect since distance values are always squared
since 1.1.
.. deprecated:: 1.1
`square_distances` has no effect from 1.1 and will be removed in
1.3.
Attributes
----------
embedding_ : array-like of shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run.
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
MDS : Manifold learning using multidimensional scaling.
Isomap : Manifold learning based on Isometric Mapping.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
https://lvdmaaten.github.io/tsne/
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
[4] Belkina, A. C., Ciccolella, C. O., Anno, R., Halpert, R., Spidlen, J.,
& Snyder-Cappione, J. E. (2019). Automated optimized parameters for
T-distributed stochastic neighbor embedding improve visualization
and analysis of large datasets. Nature Communications, 10(1), 1-12.
[5] Kobak, D., & Berens, P. (2019). The art of using t-SNE for single-cell
transcriptomics. Nature Communications, 10(1), 1-14.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2, learning_rate='auto',
... init='random', perplexity=3).fit_transform(X)
>>> X_embedded.shape
(4, 2)
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(
self,
n_components=2,
*,
perplexity=30.0,
early_exaggeration=12.0,
learning_rate="warn",
n_iter=1000,
n_iter_without_progress=300,
min_grad_norm=1e-7,
metric="euclidean",
metric_params=None,
init="warn",
verbose=0,
random_state=None,
method="barnes_hut",
angle=0.5,
n_jobs=None,
square_distances="deprecated",
):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.metric_params = metric_params
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.n_jobs = n_jobs
self.square_distances = square_distances
def _check_params_vs_input(self, X):
if self.perplexity >= X.shape[0]:
raise ValueError("perplexity must be less than n_samples")
def _fit(self, X, skip_num_points=0):
"""Private function to fit the model using X as training data."""
if isinstance(self.init, str) and self.init == "warn":
# See issue #18018
warnings.warn(
"The default initialization in TSNE will change "
"from 'random' to 'pca' in 1.2.",
FutureWarning,
)
self._init = "random"
else:
self._init = self.init
if self.learning_rate == "warn":
# See issue #18018
warnings.warn(
"The default learning rate in TSNE will change "
"from 200.0 to 'auto' in 1.2.",
FutureWarning,
)
self._learning_rate = 200.0
else:
self._learning_rate = self.learning_rate
if isinstance(self._init, str) and self._init == "pca" and issparse(X):
raise TypeError(
"PCA initialization is currently not supported "
"with the sparse input matrix. Use "
'init="random" instead.'
)
if self.method not in ["barnes_hut", "exact"]:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.square_distances != "deprecated":
warnings.warn(
"The parameter `square_distances` has not effect and will be "
"removed in version 1.3.",
FutureWarning,
)
if self._learning_rate == "auto":
# See issue #18018
self._learning_rate = X.shape[0] / self.early_exaggeration / 4
self._learning_rate = np.maximum(self._learning_rate, 50)
else:
if not (self._learning_rate > 0):
raise ValueError("'learning_rate' must be a positive number or 'auto'.")
if self.method == "barnes_hut":
X = self._validate_data(
X,
accept_sparse=["csr"],
ensure_min_samples=2,
dtype=[np.float32, np.float64],
)
else:
X = self._validate_data(
X, accept_sparse=["csr", "csc", "coo"], dtype=[np.float32, np.float64]
)
if self.metric == "precomputed":
if isinstance(self._init, str) and self._init == "pca":
raise ValueError(
'The parameter init="pca" cannot be used with metric="precomputed".'
)
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
check_non_negative(
X,
"TSNE.fit(). With metric='precomputed', X "
"should contain positive distances.",
)
if self.method == "exact" and issparse(X):
raise TypeError(
'TSNE with method="exact" does not accept sparse '
'precomputed distance matrix. Use method="barnes_hut" '
"or provide the dense distance matrix."
)
if self.method == "barnes_hut" and self.n_components > 3:
raise ValueError(
"'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree."
)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError(
"early_exaggeration must be at least 1, but is {}".format(
self.early_exaggeration
)
)
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
# Euclidean is squared here, rather than using **= 2,
# because euclidean_distances already calculates
# squared distances, and returns np.sqrt(dist) for
# squared=False.
# Also, Euclidean is slower for n_jobs>1, so don't set here
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
metric_params_ = self.metric_params or {}
distances = pairwise_distances(
X, metric=self.metric, n_jobs=self.n_jobs, **metric_params_
)
if np.any(distances < 0):
raise ValueError(
"All distances should be positive, the metric given is not correct"
)
if self.metric != "euclidean":
distances **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(
P <= 1
), "All probabilities should be less or then equal to one"
else:
# Compute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
n_neighbors = min(n_samples - 1, int(3.0 * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(n_neighbors))
# Find the nearest neighbors for every point
knn = NearestNeighbors(
algorithm="auto",
n_jobs=self.n_jobs,
n_neighbors=n_neighbors,
metric=self.metric,
metric_params=self.metric_params,
)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print(
"[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration
)
)
t0 = time()
distances_nn = knn.kneighbors_graph(mode="distance")
duration = time() - t0
if self.verbose:
print(
"[t-SNE] Computed neighbors for {} samples in {:.3f}s...".format(
n_samples, duration
)
)
# Free the memory used by the ball_tree
del knn
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn.data **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, self.perplexity, self.verbose)
if isinstance(self._init, np.ndarray):
X_embedded = self._init
elif self._init == "pca":
pca = PCA(
n_components=self.n_components,
svd_solver="randomized",
random_state=random_state,
)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
# TODO: Update in 1.2
# PCA is rescaled so that PC1 has standard deviation 1e-4 which is
# the default value for random initialization. See issue #18018.
warnings.warn(
"The PCA initialization in TSNE will change to "
"have the standard deviation of PC1 equal to 1e-4 "
"in 1.2. This will ensure better convergence.",
FutureWarning,
)
# X_embedded = X_embedded / np.std(X_embedded[:, 0]) * 1e-4
elif self._init == "random":
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.standard_normal(
size=(n_samples, self.n_components)
).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1, 1)
return self._tsne(
P,
degrees_of_freedom,
n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points,
)
def _tsne(
self,
P,
degrees_of_freedom,
n_samples,
X_embedded,
neighbors=None,
skip_num_points=0,
):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self._learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == "barnes_hut":
obj_func = _kl_divergence_bh
opt_args["kwargs"]["angle"] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args["kwargs"]["verbose"] = self.verbose
# Get the number of threads for gradient computation here to
# avoid recomputing it at each iteration.
opt_args["kwargs"]["num_threads"] = _openmp_effective_n_threads()
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exaggeration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print(
"[t-SNE] KL divergence after %d iterations with early exaggeration: %f"
% (it + 1, kl_divergence)
)
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args["n_iter"] = self.n_iter
opt_args["it"] = it + 1
opt_args["momentum"] = 0.8
opt_args["n_iter_without_progress"] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print(
"[t-SNE] KL divergence after %d iterations: %f"
% (it + 1, kl_divergence)
)
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._check_params_vs_input(X)
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : array of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit_transform(X)
return self
def _more_tags(self):
return {"pairwise": self.metric == "precomputed"}
|
the-stack_106_14732
|
import unittest
import pandas as pd
from chemcharts.core.container.chemdata import ChemData
from chemcharts.core.container.fingerprint import *
from chemcharts.core.functions.binning import Binning
class TestBinning(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
smiles = Smiles(["COc1ccc(-c2c(-c3ccc(S(N)(=O)=O)cc3)[nH]c3ccccc23)cc1",
"COc1ccc(-c2c(-c3ccc(S(N)(=O)=O)cc3)oc3ccccc23)cc1F",
"Cc1cc(C)c(S(=O)(=O)N2CCN(C(C)c3nc(C(C)(C)C)no3)CC2)c(C)c1",
"C1ccc2c(c1)-c1ccc3ccccc3c1C2Cc1nn[nH]n1",
"Cc1ccccc1-c1c(C(=O)N=c2cccc[nH]2)cnc2ccccc12",
"N=c1[nH]c(=O)c2ncn(Cc3cccc4ccccc34)c2[nH]1",
"O=C1c2cccc3c(F)ccc(c23)CN1c1cccnc1"])
values = pd.DataFrame([1, 3, 4, 5, 2, 1, 6], columns=["test_value"])
test_data_set = ChemData(smiles)
test_data_set.set_values(values)
cls.test_chemdata = test_data_set
def test_preparation(self):
binning = Binning()
test_sorted_bin_idx, test_bin_idx = binning._preparation(values=[1, 3, 4, 5, 2, 1, 6],
num_bins=4)
self.assertListEqual([0, 1, 2], test_sorted_bin_idx)
self.assertListEqual([0, 1, 1, 2, 0, 0, 2], test_bin_idx)
def test_group_value_bins(self):
binning = Binning()
test_group_value_bins = binning._group_values_bins(values=[1, 3, 4, 5, 2, 1, 6],
sorted_bin_idx=[0, 1, 2],
bin_idx=[0, 1, 1, 2, 0, 0, 2])
self.assertListEqual([1, 2, 1], test_group_value_bins[0])
def test_calculate_medians(self):
binning = Binning()
test_median_values = binning._calculate_medians(grouped_value_bins=[[1, 2, 1], [3, 4], [5, 6]])
self.assertListEqual([1, 3.5, 5.5], test_median_values)
def test_overwrite_values_medians(self):
binning = Binning()
test_new_values = binning._overwrite_value_medians(bin_idx=[0, 1, 1, 2, 0, 0, 2],
median_values=[1, 3.5, 5.5],
sorted_bin_idx=[0, 1, 2])
self.assertListEqual([1, 3.5, 3.5, 5.5, 1, 1, 5.5], test_new_values)
def test_binning(self):
binning = Binning()
test_binning = binning.binning(self.test_chemdata, 4)
value_df = test_binning.get_values()
self.assertListEqual(list([1, 3.5, 3.5, 5.5, 1, 1, 5.5]),
value_df["test_value"].tolist())
|
the-stack_106_14733
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import logging
import os
import re
import sys
import shlex
import six
from importlib import import_module
from six.moves import configparser
from behave.model import ScenarioOutline
from behave.model_core import FileLocation
from behave.reporter.junit import JUnitReporter
from behave.reporter.summary import SummaryReporter
from behave.tag_expression import make_tag_expression
from behave.formatter.base import StreamOpener
from behave.formatter import _registry as _format_registry
from behave.userdata import UserData, parse_user_define
from behave._types import Unknown
from behave.textutil import select_best_encoding, to_texts
# -- PYTHON 2/3 COMPATIBILITY:
# SINCE Python 3.2: ConfigParser = SafeConfigParser
ConfigParser = configparser.ConfigParser
if six.PY2:
ConfigParser = configparser.SafeConfigParser
# -----------------------------------------------------------------------------
# CONFIGURATION DATA TYPES:
# -----------------------------------------------------------------------------
class LogLevel(object):
names = [
"NOTSET", "CRITICAL", "FATAL", "ERROR",
"WARNING", "WARN", "INFO", "DEBUG",
]
@staticmethod
def parse(levelname, unknown_level=None):
"""
Convert levelname into a numeric log level.
:param levelname: Logging levelname (as string)
:param unknown_level: Used if levelname is unknown (optional).
:return: Numeric log-level or unknown_level, if levelname is unknown.
"""
return getattr(logging, levelname.upper(), unknown_level)
@classmethod
def parse_type(cls, levelname):
level = cls.parse(levelname, Unknown)
if level is Unknown:
message = "%s is unknown, use: %s" % \
(levelname, ", ".join(cls.names[1:]))
raise argparse.ArgumentTypeError(message)
return level
@staticmethod
def to_string(level):
return logging.getLevelName(level)
# -----------------------------------------------------------------------------
# CONFIGURATION SCHEMA:
# -----------------------------------------------------------------------------
def valid_python_module(path):
try:
module_path, class_name = path.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
except (ValueError, AttributeError, ImportError):
raise argparse.ArgumentTypeError("No module named '%s' was found." % path)
options = [
(("-c", "--no-color"),
dict(action="store_false", dest="color",
help="Disable the use of ANSI color escapes.")),
(("--color",),
dict(action="store_true", dest="color",
help="""Use ANSI color escapes. This is the default
behaviour. This switch is used to override a
configuration file setting.""")),
(("-d", "--dry-run"),
dict(action="store_true",
help="Invokes formatters without executing the steps.")),
(("-D", "--define"),
dict(dest="userdata_defines", type=parse_user_define, action="append",
metavar="NAME=VALUE",
help="""Define user-specific data for the config.userdata dictionary.
Example: -D foo=bar to store it in config.userdata["foo"].""")),
(("-e", "--exclude"),
dict(metavar="PATTERN", dest="exclude_re",
help="""Don't run feature files matching regular expression
PATTERN.""")),
(("-i", "--include"),
dict(metavar="PATTERN", dest="include_re",
help="Only run feature files matching regular expression PATTERN.")),
(("--no-junit",),
dict(action="store_false", dest="junit",
help="Don't output JUnit-compatible reports.")),
(("--junit",),
dict(action="store_true",
help="""Output JUnit-compatible reports.
When junit is enabled, all stdout and stderr
will be redirected and dumped to the junit report,
regardless of the "--capture" and "--no-capture" options.
""")),
(("--junit-directory",),
dict(metavar="PATH", dest="junit_directory",
default="reports",
help="""Directory in which to store JUnit reports.""")),
(("--runner-class",),
dict(action="store",
default="behave.runner.Runner", type=valid_python_module,
help="Tells Behave to use a specific runner. (default: %(default)s)")),
((), # -- CONFIGFILE only
dict(dest="default_format",
help="Specify default formatter (default: pretty).")),
(("-f", "--format"),
dict(action="append",
help="""Specify a formatter. If none is specified the default
formatter is used. Pass "--format help" to get a
list of available formatters.""")),
(("--steps-catalog",),
dict(action="store_true", dest="steps_catalog",
help="""Show a catalog of all available step definitions.
SAME AS: --format=steps.catalog --dry-run --no-summary -q""")),
((), # -- CONFIGFILE only
dict(dest="scenario_outline_annotation_schema",
help="""Specify name annotation schema for scenario outline
(default="{name} -- @{row.id} {examples.name}").""")),
(("-k", "--no-skipped"),
dict(action="store_false", dest="show_skipped",
help="Don't print skipped steps (due to tags).")),
(("--show-skipped",),
dict(action="store_true",
help="""Print skipped steps.
This is the default behaviour. This switch is used to
override a configuration file setting.""")),
(("--no-snippets",),
dict(action="store_false", dest="show_snippets",
help="Don't print snippets for unimplemented steps.")),
(("--snippets",),
dict(action="store_true", dest="show_snippets",
help="""Print snippets for unimplemented steps.
This is the default behaviour. This switch is used to
override a configuration file setting.""")),
(("-m", "--no-multiline"),
dict(action="store_false", dest="show_multiline",
help="""Don't print multiline strings and tables under
steps.""")),
(("--multiline", ),
dict(action="store_true", dest="show_multiline",
help="""Print multiline strings and tables under steps.
This is the default behaviour. This switch is used to
override a configuration file setting.""")),
(("-n", "--name"),
dict(action="append", metavar="NAME_PATTERN",
help="""Select feature elements (scenarios, ...) to run
which match part of the given name (regex pattern).
If this option is given more than once,
it will match against all the given names.""")),
(("--no-capture",),
dict(action="store_false", dest="stdout_capture",
help="""Don't capture stdout (any stdout output will be
printed immediately.)""")),
(("--capture",),
dict(action="store_true", dest="stdout_capture",
help="""Capture stdout (any stdout output will be
printed if there is a failure.)
This is the default behaviour. This switch is used to
override a configuration file setting.""")),
(("--no-capture-stderr",),
dict(action="store_false", dest="stderr_capture",
help="""Don't capture stderr (any stderr output will be
printed immediately.)""")),
(("--capture-stderr",),
dict(action="store_true", dest="stderr_capture",
help="""Capture stderr (any stderr output will be
printed if there is a failure.)
This is the default behaviour. This switch is used to
override a configuration file setting.""")),
(("--no-logcapture",),
dict(action="store_false", dest="log_capture",
help="""Don't capture logging. Logging configuration will
be left intact.""")),
(("--logcapture",),
dict(action="store_true", dest="log_capture",
help="""Capture logging. All logging during a step will be captured
and displayed in the event of a failure.
This is the default behaviour. This switch is used to
override a configuration file setting.""")),
(("--logging-level",),
dict(type=LogLevel.parse_type,
help="""Specify a level to capture logging at. The default
is INFO - capturing everything.""")),
(("--logging-format",),
dict(help="""Specify custom format to print statements. Uses the
same format as used by standard logging handlers. The
default is "%%(levelname)s:%%(name)s:%%(message)s".""")),
(("--logging-datefmt",),
dict(help="""Specify custom date/time format to print
statements.
Uses the same format as used by standard logging
handlers.""")),
(("--logging-filter",),
dict(help="""Specify which statements to filter in/out. By default,
everything is captured. If the output is too verbose, use
this option to filter out needless output.
Example: --logging-filter=foo will capture statements issued
ONLY to foo or foo.what.ever.sub but not foobar or other
logger. Specify multiple loggers with comma:
filter=foo,bar,baz.
If any logger name is prefixed with a minus, eg filter=-foo,
it will be excluded rather than included.""",
config_help="""Specify which statements to filter in/out. By default,
everything is captured. If the output is too verbose,
use this option to filter out needless output.
Example: ``logging_filter = foo`` will capture
statements issued ONLY to "foo" or "foo.what.ever.sub"
but not "foobar" or other logger. Specify multiple
loggers with comma: ``logging_filter = foo,bar,baz``.
If any logger name is prefixed with a minus, eg
``logging_filter = -foo``, it will be excluded rather
than included.""")),
(("--logging-clear-handlers",),
dict(action="store_true",
help="Clear all other logging handlers.")),
(("--no-summary",),
dict(action="store_false", dest="summary",
help="""Don't display the summary at the end of the run.""")),
(("--summary",),
dict(action="store_true", dest="summary",
help="""Display the summary at the end of the run.""")),
(("-o", "--outfile"),
dict(action="append", dest="outfiles", metavar="FILE",
help="Write to specified file instead of stdout.")),
((), # -- CONFIGFILE only
dict(action="append", dest="paths",
help="Specify default feature paths, used when none are provided.")),
(("-q", "--quiet"),
dict(action="store_true",
help="Alias for --no-snippets --no-source.")),
(("-s", "--no-source"),
dict(action="store_false", dest="show_source",
help="""Don't print the file and line of the step definition with the
steps.""")),
(("--show-source",),
dict(action="store_true", dest="show_source",
help="""Print the file and line of the step
definition with the steps. This is the default
behaviour. This switch is used to override a
configuration file setting.""")),
(("--stage",),
dict(help="""Defines the current test stage.
The test stage name is used as name prefix for the environment
file and the steps directory (instead of default path names).
""")),
(("--stop",),
dict(action="store_true",
help="Stop running tests at the first failure.")),
# -- DISABLE-UNUSED-OPTION: Not used anywhere.
# (("-S", "--strict"),
# dict(action="store_true",
# help="Fail if there are any undefined or pending steps.")),
((), # -- CONFIGFILE only
dict(dest="default_tags", metavar="TAG_EXPRESSION", action="append",
help="""Define default tags when non are provided.
See --tags for more information.""")),
(("-t", "--tags"),
dict(action="append", metavar="TAG_EXPRESSION",
help="""Only execute features or scenarios with tags
matching TAG_EXPRESSION. Pass "--tags-help" for
more information.""",
config_help="""Only execute certain features or scenarios based
on the tag expression given. See below for how to code
tag expressions in configuration files.""")),
(("-T", "--no-timings"),
dict(action="store_false", dest="show_timings",
help="""Don't print the time taken for each step.""")),
(("--show-timings",),
dict(action="store_true", dest="show_timings",
help="""Print the time taken, in seconds, of each step after the
step has completed. This is the default behaviour. This
switch is used to override a configuration file
setting.""")),
(("-v", "--verbose"),
dict(action="store_true",
help="Show the files and features loaded.")),
(("-w", "--wip"),
dict(action="store_true",
help="""Only run scenarios tagged with "wip". Additionally: use the
"plain" formatter, do not capture stdout or logging output
and stop at the first failure.""")),
(("-x", "--expand"),
dict(action="store_true",
help="Expand scenario outline tables in output.")),
(("--lang",),
dict(metavar="LANG",
help="Use keywords for a language other than English.")),
(("--lang-list",),
dict(action="store_true",
help="List the languages available for --lang.")),
(("--lang-help",),
dict(metavar="LANG",
help="List the translations accepted for one language.")),
(("--tags-help",),
dict(action="store_true",
help="Show help for tag expressions.")),
(("--version",),
dict(action="store_true", help="Show version.")),
]
# -- OPTIONS: With raw value access semantics in configuration file.
raw_value_options = frozenset([
"logging_format",
"logging_datefmt",
# -- MAYBE: "scenario_outline_annotation_schema",
])
def read_configuration(path):
# pylint: disable=too-many-locals, too-many-branches
config = ConfigParser()
config.optionxform = str # -- SUPPORT: case-sensitive keys
config.read(path)
config_dir = os.path.dirname(path)
result = {}
for fixed, keywords in options:
if "dest" in keywords:
dest = keywords["dest"]
else:
for opt in fixed:
if opt.startswith("--"):
dest = opt[2:].replace("-", "_")
else:
assert len(opt) == 2
dest = opt[1:]
if dest in "tags_help lang_list lang_help version".split():
continue
if not config.has_option("behave", dest):
continue
action = keywords.get("action", "store")
if action == "store":
use_raw_value = dest in raw_value_options
result[dest] = config.get("behave", dest, raw=use_raw_value)
elif action in ("store_true", "store_false"):
result[dest] = config.getboolean("behave", dest)
elif action == "append":
if dest == "userdata_defines":
continue # -- SKIP-CONFIGFILE: Command-line only option.
result[dest] = \
[s.strip() for s in config.get("behave", dest).splitlines()]
else:
raise ValueError('action "%s" not implemented' % action)
# -- STEP: format/outfiles coupling
if "format" in result:
# -- OPTIONS: format/outfiles are coupled in configuration file.
formatters = result["format"]
formatter_size = len(formatters)
outfiles = result.get("outfiles", [])
outfiles_size = len(outfiles)
if outfiles_size < formatter_size:
for formatter_name in formatters[outfiles_size:]:
outfile = "%s.output" % formatter_name
outfiles.append(outfile)
result["outfiles"] = outfiles
elif len(outfiles) > formatter_size:
print("CONFIG-ERROR: Too many outfiles (%d) provided." %
outfiles_size)
result["outfiles"] = outfiles[:formatter_size]
for paths_name in ("paths", "outfiles"):
if paths_name in result:
# -- Evaluate relative paths relative to location.
# NOTE: Absolute paths are preserved by os.path.join().
paths = result[paths_name]
result[paths_name] = \
[os.path.normpath(os.path.join(config_dir, p)) for p in paths]
# -- STEP: Special additional configuration sections.
# SCHEMA: config_section: data_name
special_config_section_map = {
"behave.formatters": "more_formatters",
"behave.userdata": "userdata",
}
for section_name, data_name in special_config_section_map.items():
result[data_name] = {}
if config.has_section(section_name):
result[data_name].update(config.items(section_name))
return result
def config_filenames():
paths = ["./", os.path.expanduser("~")]
if sys.platform in ("cygwin", "win32") and "APPDATA" in os.environ:
paths.append(os.path.join(os.environ["APPDATA"]))
for path in reversed(paths):
for filename in reversed(
("behave.ini", ".behaverc", "setup.cfg", "tox.ini")):
filename = os.path.join(path, filename)
if os.path.isfile(filename):
yield filename
def load_configuration(defaults, verbose=False):
for filename in config_filenames():
if verbose:
print('Loading config defaults from "%s"' % filename)
defaults.update(read_configuration(filename))
if verbose:
print("Using defaults:")
for k, v in six.iteritems(defaults):
print("%15s %s" % (k, v))
def setup_parser():
# construct the parser
# usage = "%(prog)s [options] [ [FILE|DIR|URL][:LINE[:LINE]*] ]+"
usage = "%(prog)s [options] [ [DIR|FILE|FILE:LINE] ]+"
description = """\
Run a number of feature tests with behave."""
more = """
EXAMPLES:
behave features/
behave features/one.feature features/two.feature
behave features/one.feature:10
behave @features.txt
"""
parser = argparse.ArgumentParser(usage=usage, description=description)
for fixed, keywords in options:
if not fixed:
continue # -- CONFIGFILE only.
if "config_help" in keywords:
keywords = dict(keywords)
del keywords["config_help"]
parser.add_argument(*fixed, **keywords)
parser.add_argument("paths", nargs="*",
help="Feature directory, file or file location (FILE:LINE).")
return parser
class Configuration(object):
"""Configuration object for behave and behave runners."""
# pylint: disable=too-many-instance-attributes
defaults = dict(
color=sys.platform != "win32",
show_snippets=True,
show_skipped=True,
dry_run=False,
show_source=True,
show_timings=True,
stdout_capture=True,
stderr_capture=True,
log_capture=True,
logging_format="%(levelname)s:%(name)s:%(message)s",
logging_level=logging.INFO,
steps_catalog=False,
summary=True,
junit=False,
stage=None,
userdata={},
# -- SPECIAL:
default_format="pretty", # -- Used when no formatters are configured.
default_tags="", # -- Used when no tags are defined.
scenario_outline_annotation_schema=u"{name} -- @{row.id} {examples.name}"
)
cmdline_only_options = set("userdata_defines")
def __init__(self, command_args=None, load_config=True, verbose=None,
**kwargs):
"""
Constructs a behave configuration object.
* loads the configuration defaults (if needed).
* process the command-line args
* store the configuration results
:param command_args: Provide command args (as sys.argv).
If command_args is None, sys.argv[1:] is used.
:type command_args: list<str>, str
:param load_config: Indicate if configfile should be loaded (=true)
:param verbose: Indicate if diagnostic output is enabled
:param kwargs: Used to hand-over/overwrite default values.
"""
# pylint: disable=too-many-branches, too-many-statements
if command_args is None:
command_args = sys.argv[1:]
elif isinstance(command_args, six.string_types):
encoding = select_best_encoding() or "utf-8"
if six.PY2 and isinstance(command_args, six.text_type):
command_args = command_args.encode(encoding)
elif six.PY3 and isinstance(command_args, six.binary_type):
command_args = command_args.decode(encoding)
command_args = shlex.split(command_args)
elif isinstance(command_args, (list, tuple)):
command_args = to_texts(command_args)
if verbose is None:
# -- AUTO-DISCOVER: Verbose mode from command-line args.
verbose = ("-v" in command_args) or ("--verbose" in command_args)
self.version = None
self.tags_help = None
self.lang_list = None
self.lang_help = None
self.default_tags = None
self.junit = None
self.logging_format = None
self.logging_datefmt = None
self.name = None
self.scope = None
self.steps_catalog = None
self.userdata = None
self.wip = None
defaults = self.defaults.copy()
for name, value in six.iteritems(kwargs):
defaults[name] = value
self.defaults = defaults
self.formatters = []
self.reporters = []
self.name_re = None
self.outputs = []
self.include_re = None
self.exclude_re = None
self.scenario_outline_annotation_schema = None # pylint: disable=invalid-name
self.steps_dir = "steps"
self.environment_file = "environment.py"
self.userdata_defines = None
self.more_formatters = None
if load_config:
load_configuration(self.defaults, verbose=verbose)
parser = setup_parser()
parser.set_defaults(**self.defaults)
args = parser.parse_args(command_args)
for key, value in six.iteritems(args.__dict__):
if key.startswith("_") and key not in self.cmdline_only_options:
continue
setattr(self, key, value)
# -- ATTRIBUTE-NAME-CLEANUP:
self.tag_expression = None
self._tags = self.tags
self.tags = None
if isinstance(self.default_tags, six.string_types):
self.default_tags = self.default_tags.split()
self.paths = [os.path.normpath(path) for path in self.paths]
self.setup_outputs(args.outfiles)
if self.steps_catalog:
# -- SHOW STEP-CATALOG: As step summary.
self.default_format = "steps.catalog"
if self.format:
self.format.append("steps.catalog")
else:
self.format = ["steps.catalog"]
self.dry_run = True
self.summary = False
self.show_skipped = False
self.quiet = True
if self.wip:
# Only run scenarios tagged with "wip".
# Additionally:
# * use the "plain" formatter (per default)
# * do not capture stdout or logging output and
# * stop at the first failure.
self.default_format = "plain"
self._tags = ["wip"] + self.default_tags
self.color = False
self.stop = True
self.log_capture = False
self.stdout_capture = False
self.tag_expression = make_tag_expression(self._tags or self.default_tags)
# -- BACKWARD-COMPATIBLE (BAD-NAMING STYLE; deprecating):
self.tags = self.tag_expression
if self.quiet:
self.show_source = False
self.show_snippets = False
if self.exclude_re:
self.exclude_re = re.compile(self.exclude_re)
if self.include_re:
self.include_re = re.compile(self.include_re)
if self.name:
# -- SELECT: Scenario-by-name, build regular expression.
self.name_re = self.build_name_re(self.name)
if self.stage is None: # pylint: disable=access-member-before-definition
# -- USE ENVIRONMENT-VARIABLE, if stage is undefined.
self.stage = os.environ.get("BEHAVE_STAGE", None)
self.setup_stage(self.stage)
self.setup_model()
self.setup_userdata()
# -- FINALLY: Setup Reporters and Formatters
# NOTE: Reporters and Formatters can now use userdata information.
if self.junit:
# Buffer the output (it will be put into Junit report)
self.stdout_capture = True
self.stderr_capture = True
self.log_capture = True
self.reporters.append(JUnitReporter(self))
if self.summary:
self.reporters.append(SummaryReporter(self))
self.setup_formats()
unknown_formats = self.collect_unknown_formats()
if unknown_formats:
parser.error("format=%s is unknown" % ", ".join(unknown_formats))
def setup_outputs(self, args_outfiles=None):
if self.outputs:
assert not args_outfiles, "ONLY-ONCE"
return
# -- NORMAL CASE: Setup only initially (once).
if not args_outfiles:
self.outputs.append(StreamOpener(stream=sys.stdout))
else:
for outfile in args_outfiles:
if outfile and outfile != "-":
self.outputs.append(StreamOpener(outfile))
else:
self.outputs.append(StreamOpener(stream=sys.stdout))
def setup_formats(self):
"""Register more, user-defined formatters by name."""
if self.more_formatters:
for name, scoped_class_name in self.more_formatters.items():
_format_registry.register_as(name, scoped_class_name)
def collect_unknown_formats(self):
unknown_formats = []
if self.format:
for format_name in self.format:
if (format_name == "help" or
_format_registry.is_formatter_valid(format_name)):
continue
unknown_formats.append(format_name)
return unknown_formats
@staticmethod
def build_name_re(names):
"""
Build regular expression for scenario selection by name
by using a list of name parts or name regular expressions.
:param names: List of name parts or regular expressions (as text).
:return: Compiled regular expression to use.
"""
# -- NOTE: re.LOCALE is removed in Python 3.6 (deprecated in Python 3.5)
# flags = (re.UNICODE | re.LOCALE)
# -- ENSURE: Names are all unicode/text values (for issue #606).
names = to_texts(names)
pattern = u"|".join(names)
return re.compile(pattern, flags=re.UNICODE)
def exclude(self, filename):
if isinstance(filename, FileLocation):
filename = six.text_type(filename)
if self.include_re and self.include_re.search(filename) is None:
return True
if self.exclude_re and self.exclude_re.search(filename) is not None:
return True
return False
def setup_logging(self, level=None, configfile=None, **kwargs):
"""
Support simple setup of logging subsystem.
Ensures that the logging level is set.
But note that the logging setup can only occur once.
SETUP MODES:
* :func:`logging.config.fileConfig()`, if ``configfile`` is provided.
* :func:`logging.basicConfig()`, otherwise.
.. code-block: python
# -- FILE: features/environment.py
def before_all(context):
context.config.setup_logging()
:param level: Logging level of root logger.
If None, use :attr:`logging_level` value.
:param configfile: Configuration filename for fileConfig() setup.
:param kwargs: Passed to :func:`logging.basicConfig()`
"""
if level is None:
level = self.logging_level # pylint: disable=no-member
if configfile:
from logging.config import fileConfig
fileConfig(configfile)
else:
# pylint: disable=no-member
format_ = kwargs.pop("format", self.logging_format)
datefmt = kwargs.pop("datefmt", self.logging_datefmt)
logging.basicConfig(format=format_, datefmt=datefmt, **kwargs)
# -- ENSURE: Default log level is set
# (even if logging subsystem is already configured).
logging.getLogger().setLevel(level)
def setup_model(self):
if self.scenario_outline_annotation_schema:
name_schema = six.text_type(self.scenario_outline_annotation_schema)
ScenarioOutline.annotation_schema = name_schema.strip()
def setup_stage(self, stage=None):
"""Setup the test stage that selects a different set of
steps and environment implementations.
:param stage: Name of current test stage (as string or None).
EXAMPLE::
# -- SETUP DEFAULT TEST STAGE (unnamed):
config = Configuration()
config.setup_stage()
assert config.steps_dir == "steps"
assert config.environment_file == "environment.py"
# -- SETUP PRODUCT TEST STAGE:
config.setup_stage("product")
assert config.steps_dir == "product_steps"
assert config.environment_file == "product_environment.py"
"""
steps_dir = "steps"
environment_file = "environment.py"
if stage:
# -- USE A TEST STAGE: Select different set of implementations.
prefix = stage + "_"
steps_dir = prefix + steps_dir
environment_file = prefix + environment_file
self.steps_dir = steps_dir
self.environment_file = environment_file
def setup_userdata(self):
if not isinstance(self.userdata, UserData):
self.userdata = UserData(self.userdata)
if self.userdata_defines:
# -- ENSURE: Cmd-line overrides configuration file parameters.
self.userdata.update(self.userdata_defines)
def update_userdata(self, data):
"""Update userdata with data and reapply userdata defines (cmdline).
:param data: Provides (partial) userdata (as dict)
"""
self.userdata.update(data)
if self.userdata_defines:
# -- REAPPLY: Cmd-line defines (override configuration file data).
self.userdata.update(self.userdata_defines)
|
the-stack_106_14735
|
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from fedml.model.cv.batchnorm_utils import SynchronizedBatchNorm2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False
)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, BatchNorm, model_name, pretrained=True):
self.inplanes = 64
super(ResNet, self).__init__()
self.model_name = model_name
blocks = [1, 2, 4]
if self.model_name == "deeplabV3_plus":
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
elif self.model_name == "unet":
strides = [1, 2, 2, 2]
dilations = [1, 1, 1, 2]
# Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(
block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm
)
self.layer2 = self._make_layer(
block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm
)
self.layer4 = self._make_MG_unit(
block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm
)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, dilation=blocks[0] * dilation, downsample=downsample, BatchNorm=BatchNorm
)
)
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1, dilation=blocks[i] * dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, input):
if self.model_name == "deeplabV3_plus":
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat
elif self.model_name == "unet":
x = input.detach().clone()
stages = [
nn.Identity(),
nn.Sequential(self.conv1, self.bn1, self.relu),
nn.Sequential(self.maxpool, self.layer1),
self.layer2,
self.layer3,
self.layer4,
]
features = []
for i in range(len(stages)):
x = stages[i](x)
# print("In resnet ",x.shape)
features.append(x)
return features
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url("https://download.pytorch.org/models/resnet101-5d3b4d8f.pth")
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def ResNet101(output_stride, BatchNorm, model_name, pretrained=True):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, model_name, pretrained=True)
return model
if __name__ == "__main__":
import torch
model = ResNet101(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=8)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
|
the-stack_106_14736
|
#!/usr/bin/python3.6
import pyipopt
import numpy as np
import dynamics as dy
import cost
import constraint
import adolc_appro as aa
import pylab as plt
import util
import time
import mujoco_py
import click
# import pickle
np.set_printoptions(threshold=np.nan)
@click.command()
@click.option('--traj_filename', type=str, default='/tmp/trajectory.pkl', help='filename of the solution trajectory')
def main(traj_filename):
prob = {}
prob["n"] = 10
prob["qdim"] = 2
prob["udim"] = 1
# prob["dt"] = 0.1/( prob["n"]-1)
prob["dt"] = 0.1
p_L = [-2, -1*np.pi, -2, -np.pi, -3]
p_U = [2, 1*np.pi, 2, np.pi, 3]
x_L = np.tile(p_L, prob["n"])
x_U = np.tile(p_U, prob["n"])
qdim = prob['qdim']
start = np.array([0]*qdim+[0]*qdim)
start[1] = -np.pi
start[1] = 0
end = np.array([0]*qdim+[0]*qdim)
n = prob["n"]
q_v_arr_lst = [np.linspace(start[i], end[i], n) for i in range(2*prob['qdim'])]
u_arr = np.ones((prob["udim"], n))*0.001
X_2d = np.vstack([q_v_arr_lst, u_arr])
X_init = X_2d.T.flatten()
# set the control cost
X_sample = np.random.uniform(x_L, x_U)
#set the cost and the gradient of the cost
ctrl_cost = cost.Control_Cost(prob)
eval_f_adolc = aa.Func_Adolc(ctrl_cost, X_sample, scaler=True)
eval_grad_f_adolc = aa.Eval_Grad_F_Adolc(eval_f_adolc.id)
# eval_grad_f_adolc = aa.Func_Adolc(ctrl_cost.eval_grad_f, X_sample)
#set the constriant function for points at specific time
g1 = np.array([-np.pi, 0])
g2 = np.array([0, 0])
points = [(0, g1), (n-1, g2)]
# points = [(n-1, end)]
# points = [(0, start)]
# p_index, p_g_func = [constraint.get_point_constriant(prob, t, g)
# for (t, g) in points]
# q and v of the pole
dims = np.array([1,1+prob["qdim"]])
p_index_g_piar = [constraint.get_point_constriant(prob, t, g, dims)
for (t, g) in points]
p_index_iter, p_g_func_iter = zip(*p_index_g_piar)
p_index_lst = list(p_index_iter)
p_g_lst = list(p_g_func_iter)
# p_g_adolc_lst = [aa.Func_Adolc(g, X_sample[i])
# for (i, g) in p_index_g_piar]
D_factory= constraint.Dynamics_constriant
model_path = "/home/tao/src/gym/gym/envs/mujoco/assets/inverted_pendulum.xml"
model = dy.make_model(model_path)
sim = mujoco_py.MjSim(model)
qdim = model.nq
udim = model.nu
cart = dy.Mujoco_Dynamics(model, sim, qdim, udim)
dynamics = cart.dynamics
d_index, d_g_func = constraint.get_dynamic_constriants(prob,
dynamics,
range(0, n-1))
# d_g_adolc = aa.Func_Adolc(d_g_func, X_sample[d_index[0]])
#all dynamcis shares the same approximation function
# d_g_adolc_lst = [d_g_adolc for i in d_index]
d_g_lst = [d_g_func for i in d_index]
index_lst = p_index_lst + d_index
eval_g_lst = p_g_lst + d_g_lst
# X_sample_lst = [X_sample[i] for i in index_lst]
#
# g_adolc_x_pair = zip(eval_g_adolc_lst, X_sample_lst)
#
# eval_jac_adolc_lst = [aa.Eval_Jac_G_Adolc(g.id, x)
# for (g, x) in g_adolc_x_pair]
eval_g = constraint.Stacked_Constriants(eval_g_lst, index_lst)
eval_g_adolc = aa.Func_Adolc(eval_g, X_sample)
eval_jac_g_adolc = aa.Eval_Jac_G_Adolc(eval_g_adolc.id, X_sample)
# eval_g_adolc = aa.Func_Adolc(eval_g, X_sample)
# eval_jac_g = constraint.Stacked_Constriants_Jacobian(eval_g_lst ,
# eval_jac_lst,
# index_lst)
nvar = X_init.size
ncon = eval_g(X_init).size
eval_lagrangian = constraint.Eval_Lagrangian(ctrl_cost, eval_g)
#x, lagrangian, obj_factor
x_lag_lst = [X_sample, np.ones(ncon), 1]
x_lag_arr = np.hstack(x_lag_lst)
eval_lagrangian_adolc = aa.Func_Adolc(eval_lagrangian, x_lag_lst)
eval_h_adolc = aa.Eval_h_adolc(eval_lagrangian_adolc.id, x_lag_arr)
maks = eval_h_adolc(X_init, np.ones(ncon), 1, True)
H = eval_h_adolc(X_init, np.ones(ncon), 1, False)
g_L = np.zeros(ncon)
g_U = np.zeros(ncon)
nnzj = eval_jac_g_adolc.nnzj
nnzh = eval_h_adolc.nnzh
nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, 0, eval_f_adolc ,
eval_grad_f_adolc, eval_g_adolc, eval_jac_g_adolc)
# nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f_adolc ,
# eval_grad_f_adolc, eval_g_adolc, eval_jac_g_adolc, eval_h_adolc)
output, zl, zu, constraint_multipliers, obj, status = nlp.solve(X_init)
output_2d = output.reshape(n, -1)
output.dump(traj_filename)
return output_2d, prob
if __name__ == "__main__":
start_time = time.time()
output_2d, prob = main()
print("--- %s seconds ---" % (time.time() - start_time))
print(output_2d)
Q = output_2d[:, 1]
V = output_2d[:, prob["qdim"]+1]
# U = output_2d[:, 4:]
plt.plot(Q, V)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.