filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_14906
|
from django.conf.urls.defaults import *
from apps.reader import views
urlpatterns = patterns('',
url(r'^$', views.index),
url(r'^login_as', views.login_as, name='login_as'),
url(r'^logout', views.logout, name='logout'),
url(r'^login', views.login, name='login'),
url(r'^autologin/(?P<username>\w+)/(?P<secret>\w+)/?', views.autologin, name='autologin'),
url(r'^signup', views.signup, name='signup'),
url(r'^feeds/?$', views.load_feeds, name='load-feeds'),
url(r'^feed/(?P<feed_id>\d+)', views.load_single_feed, name='load-single-feed'),
url(r'^page/(?P<feed_id>\d+)', views.load_feed_page, name='load-feed-page'),
url(r'^refresh_feed/(?P<feed_id>\d+)', views.refresh_feed, name='refresh-feed'),
url(r'^favicons', views.load_feed_favicons, name='load-feed-favicons'),
url(r'^river_stories', views.load_river_stories__redis, name='load-river-stories'),
url(r'^refresh_feeds', views.refresh_feeds, name='refresh-feeds'),
url(r'^feed_unread_count', views.feed_unread_count, name='feed-unread-count'),
url(r'^starred_stories', views.load_starred_stories, name='load-starred-stories'),
url(r'^mark_all_as_read', views.mark_all_as_read, name='mark-all-as-read'),
url(r'^mark_story_as_read', views.mark_story_as_read, name='mark-story-as-read'),
url(r'^mark_feed_stories_as_read', views.mark_feed_stories_as_read, name='mark-feed-stories-as-read'),
url(r'^mark_social_stories_as_read', views.mark_social_stories_as_read, name='mark-social-stories-as-read'),
url(r'^mark_story_as_unread', views.mark_story_as_unread),
url(r'^mark_story_as_starred', views.mark_story_as_starred),
url(r'^mark_story_as_unstarred', views.mark_story_as_unstarred),
url(r'^mark_feed_as_read', views.mark_feed_as_read),
url(r'^delete_feed_by_url', views.delete_feed_by_url, name='delete-feed-by-url'),
url(r'^delete_feed', views.delete_feed, name='delete-feed'),
url(r'^delete_folder', views.delete_folder, name='delete-folder'),
url(r'^rename_feed', views.rename_feed, name='rename-feed'),
url(r'^rename_folder', views.rename_folder, name='rename-folder'),
url(r'^move_feed_to_folder', views.move_feed_to_folder, name='move-feed-to-folder'),
url(r'^move_folder_to_folder', views.move_folder_to_folder, name='move-folder-to-folder'),
url(r'^add_url', views.add_url),
url(r'^add_folder', views.add_folder),
url(r'^add_feature', views.add_feature, name='add-feature'),
url(r'^features', views.load_features, name='load-features'),
url(r'^save_feed_order', views.save_feed_order, name='save-feed-order'),
url(r'^feeds_trainer', views.feeds_trainer, name='feeds-trainer'),
url(r'^save_feed_chooser', views.save_feed_chooser, name='save-feed-chooser'),
url(r'^send_story_email', views.send_story_email, name='send-story-email'),
url(r'^retrain_all_sites', views.retrain_all_sites, name='retrain-all-sites'),
url(r'^load_tutorial', views.load_tutorial, name='load-tutorial'),
url(r'^buster', views.iframe_buster, name='iframe-buster'),
)
|
the-stack_106_14907
|
"""
Support for Adafruit DHT temperature and humidity sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.dht/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
TEMP_FAHRENHEIT, CONF_NAME, CONF_MONITORED_CONDITIONS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.temperature import celsius_to_fahrenheit
REQUIREMENTS = ['Adafruit-DHT==1.4.0']
_LOGGER = logging.getLogger(__name__)
CONF_PIN = 'pin'
CONF_SENSOR = 'sensor'
CONF_HUMIDITY_OFFSET = 'humidity_offset'
CONF_TEMPERATURE_OFFSET = 'temperature_offset'
DEFAULT_NAME = 'DHT Sensor'
# DHT11 is able to deliver data once per second, DHT22 once every two
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
SENSOR_TEMPERATURE = 'temperature'
SENSOR_HUMIDITY = 'humidity'
SENSOR_TYPES = {
SENSOR_TEMPERATURE: ['Temperature', None],
SENSOR_HUMIDITY: ['Humidity', '%']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSOR): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TEMPERATURE_OFFSET, default=0):
vol.All(vol.Coerce(float), vol.Range(min=-100, max=100)),
vol.Optional(CONF_HUMIDITY_OFFSET, default=0):
vol.All(vol.Coerce(float), vol.Range(min=-100, max=100))
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the DHT sensor."""
import Adafruit_DHT # pylint: disable=import-error
SENSOR_TYPES[SENSOR_TEMPERATURE][1] = hass.config.units.temperature_unit
available_sensors = {
"AM2302": Adafruit_DHT.AM2302,
"DHT11": Adafruit_DHT.DHT11,
"DHT22": Adafruit_DHT.DHT22,
}
sensor = available_sensors.get(config.get(CONF_SENSOR))
pin = config.get(CONF_PIN)
temperature_offset = config.get(CONF_TEMPERATURE_OFFSET)
humidity_offset = config.get(CONF_HUMIDITY_OFFSET)
if not sensor:
_LOGGER.error("DHT sensor type is not supported")
return False
data = DHTClient(Adafruit_DHT, sensor, pin)
dev = []
name = config.get(CONF_NAME)
try:
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(DHTSensor(
data, variable, SENSOR_TYPES[variable][1], name,
temperature_offset, humidity_offset))
except KeyError:
pass
add_entities(dev, True)
class DHTSensor(Entity):
"""Implementation of the DHT sensor."""
def __init__(self, dht_client, sensor_type, temp_unit, name,
temperature_offset, humidity_offset):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.dht_client = dht_client
self.temp_unit = temp_unit
self.type = sensor_type
self.temperature_offset = temperature_offset
self.humidity_offset = humidity_offset
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the DHT and updates the states."""
self.dht_client.update()
temperature_offset = self.temperature_offset
humidity_offset = self.humidity_offset
data = self.dht_client.data
if self.type == SENSOR_TEMPERATURE and SENSOR_TEMPERATURE in data:
temperature = data[SENSOR_TEMPERATURE]
_LOGGER.debug("Temperature %.1f \u00b0C + offset %.1f",
temperature, temperature_offset)
if -20 <= temperature < 80:
self._state = round(temperature + temperature_offset, 1)
if self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(celsius_to_fahrenheit(temperature), 1)
elif self.type == SENSOR_HUMIDITY and SENSOR_HUMIDITY in data:
humidity = data[SENSOR_HUMIDITY]
_LOGGER.debug("Humidity %.1f%% + offset %.1f",
humidity, humidity_offset)
if 0 <= humidity <= 100:
self._state = round(humidity + humidity_offset, 1)
class DHTClient:
"""Get the latest data from the DHT sensor."""
def __init__(self, adafruit_dht, sensor, pin):
"""Initialize the sensor."""
self.adafruit_dht = adafruit_dht
self.sensor = sensor
self.pin = pin
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data the DHT sensor."""
humidity, temperature = self.adafruit_dht.read_retry(
self.sensor, self.pin)
if temperature:
self.data[SENSOR_TEMPERATURE] = temperature
if humidity:
self.data[SENSOR_HUMIDITY] = humidity
|
the-stack_106_14908
|
'''
Created on May 19, 2015
@author: Daniil Sorokin<[email protected]>
'''
from nltk.tokenize import regexp_tokenize
from os import listdir
from bs4 import BeautifulSoup
import codecs, sys, time
from mysouputils import get_content_from_soup
pattern_words = "[\w\-0-9']+"
my_encoding = "utf-8"
def are_duplicates(doc1, doc2):
if len(doc1) > 50 and len(doc2) > 50 and not are_duplicates(doc1[:50], doc2[:50]):
return False
txt_tokens_1 = regexp_tokenize(doc1, pattern_words)
txt_tokens_2 = regexp_tokenize(doc2, pattern_words)
ngrams_1 = txt_tokens_1 + generate_ngrams(txt_tokens_1, 2)
ngrams_2 = txt_tokens_2 + generate_ngrams(txt_tokens_2, 2)
overlap = len([w for w in ngrams_1 if w in ngrams_2])
score = (2*overlap)/(len(ngrams_1) + len(ngrams_1) + 1)
if score > 0.8:
return True
else:
return False
def generate_ngrams(tokens, n):
ngrams = []
for i,token in enumerate(tokens):
if i <= len(tokens) - n:
ngram = ""
for j in range(n):
ngram += tokens[i+j] + " "
ngrams.append(ngram)
return ngrams
if __name__ == '__main__':
if len(sys.argv) > 1:
start = time.perf_counter()
mdir = sys.argv[1]
file_names = listdir(mdir)
files = []
for filename in file_names:
with codecs.open(mdir + filename, "r", encoding=my_encoding) as f:
files.append( (filename, get_content_from_soup(BeautifulSoup(f.read()))) )
print( "No of files:" + str(len(files)) )
out = codecs.open("duplicate_pairs.csv", "w", encoding=my_encoding)
while len(files):
file = files.pop()
if len(files) % 100 == 0: print(len(files))
content = file[1]
for another_file in files:
another_content = another_file[1]
if are_duplicates(content,another_content):
out.write("{},{}\n".format(file[0], another_file[0]))
out.close()
end = time.perf_counter()
print("Elapsed time: " + str(end - start))
|
the-stack_106_14909
|
"""
716 Max Stack
# https://cheonhyangzhang.gitbooks.io/leetcode-solutions/content/716-max-stack.html
Problem
Design a max stack that supports push, pop, top, peekMax and popMax.
push(x) -- Push element x onto stack.
pop() -- Remove the element on top of the stack and return it.
top() -- Get the element on the top.
peekMax() -- Retrieve the maximum element in the stack.
popMax() -- Retrieve the maximum element in the stack, and remove it. If you find more than one maximum elements, only remove the top-most one.
Example 1:
MaxStack stack = new MaxStack();
stack.push(5);
stack.push(1);
stack.push(5);
stack.top(); -> 5
stack.popMax(); -> 5
stack.top(); -> 1
stack.peekMax(); -> 5
stack.pop(); -> 1
stack.top(); -> 5
Note: -1e7 <= x <= 1e7 Number of operations won't exceed 10000. The last four operations won't be called when stack is empty.
"""
# V0
# V1
# IDEA : array
# https://github.com/qiyuangong/leetcode/blob/master/python/716_Max_Stack.py
# https://poopcode.com/max-stack-leetcode-challenge-python-solution/
class MaxStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.max_stack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
if len(self.max_stack) == 0:
self.max_stack.append(x)
return
if self.max_stack[-1] > x:
self.max_stack.append(self.max_stack[-1])
else:
self.max_stack.append(x)
def pop(self):
"""
:rtype: int
"""
if len(self.stack) != 0:
self.max_stack.pop(-1)
return self.stack.pop(-1)
def top(self):
"""
:rtype: int
"""
return self.stack[-1]
def peekMax(self):
"""
:rtype: int
"""
if len(self.max_stack) != 0:
return self.max_stack[-1]
def popMax(self):
"""
:rtype: int
"""
val = self.peekMax()
buff = []
while self.top() != val:
buff.append(self.pop())
self.pop()
while len(buff) != 0:
self.push(buff.pop(-1))
return val
# V1'
# IDEA : linked list
# https://codereview.stackexchange.com/questions/210914/leetcode-maxstack-in-python
class Node:
def __init__(self, x):
self.val = x
self.next = None
class MaxStack:
def __init__(self):
"""
Initialize your data structure here.
"""
self.head = None
self.max_val = None
def push(self, x):
"""
Push element x onto stack.
:type x: int
:rtype: void
"""
if self.head:
n = Node(x)
n.next = self.head
self.head = n
else:
self.head = Node(x)
self.max_val = max(x, self.max_val) if self.max_val or self.max_val == 0 else x
def pop(self):
"""
Removes the element on top of the stack and returns that element.
:rtype: int
"""
rtn = None
if self.head:
rtn = self.head.val
self.head = self.head.next
head = self.head
v = head.val if head else None
while head:
v = max(v, head.val)
head = head.next
self.max_val = v
return rtn
def top(self):
"""
Get the top element.
:rtype: int
"""
if self.head:
return self.head.val
def peekMax(self):
"""
Retrieve the maximum element in the stack.
:rtype: int
"""
return self.max_val
def popMax(self):
"""
Retrieve the maximum element in the stack, and remove it. If you find more than one maximum elements, only remove the top-most one.
:rtype: void
"""
prev, cur = None, self.head
while cur:
if cur.val == self.max_val and cur == self.head:
self.head = cur.next
break
elif cur.val == self.max_val:
prev.next = cur.next
break
prev, cur = cur, cur.next
cur = self.head
tmp = self.max_val
v = cur.val if cur else None
while cur:
if cur:
v = max(v, cur.val)
cur = cur.next
self.max_val = v
return tmp
# V1''
# IDEA : array
# https://blog.csdn.net/danspace1/article/details/88734584
class MaxStack(object):
def __init__(self):
self.data = []
def push(self, x):
self.data.insert(0, x)
def pop(self):
self.data.pop(0)
def top(self):
self.data[0]
def peakMax(self):
res = max(self.data)
self.data.remove(res)
return res
# V2
|
the-stack_106_14913
|
from cell_cycle_gating.findpeaks import get_kde, findpeaks
import numpy as np
from scipy.stats.mstats import mquantiles as quantile
import matplotlib.pyplot as plt
# from itertools import compress
from cell_cycle_gating import smooth
from scipy.stats import gaussian_kde
import matplotlib.gridspec as gridspec
from scipy.signal import find_peaks
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import seaborn as sns
def get_ldrgates(ldrtxt, x_ldr=None):
"""Gating based on ldr intensities
Parameters
----------
ldrtxt : 1d array
ldr txt feature across all cells in a well
x_ldr : 1d array
uniformly distributed 1d grid based on expected
range of ldr txt
Returns
-------
ldr_gates : list of floats
gating based on minima of kernel density estimate of ldr txt
"""
if x_ldr is None:
mx = np.max(ldrtxt.tolist())+0.01
x_ldr = np.arange(-0.01, mx, 0.0002)
f_ldr = get_kde(ldrtxt, x_ldr) # ldrtxt should be an array
peak_amp, peak_loc, peak_width = findpeaks(f_ldr.tolist(), npeaks=1)
# Find location of minimun on the right
f_neg = [-x for x in f_ldr[peak_loc[0]:]]
_, trough_loc, _ = findpeaks(f_neg, npeaks=1)
# If peakfinder cannot find peak minima, use ldrwidth_5x as default
if np.any(trough_loc):
trough_loc = trough_loc[0] + peak_loc[0] - 1
else:
trough_loc = peak_loc + (5 * peak_width[0])
# choose LDR cutoff based on half-proximal width and right trough of peak
ldrwidth_5x = peak_loc + (5 * peak_width[0])
ldrwidth_2p5 = peak_loc + (2.5 * peak_width[0])
cutoff_index_1 = len(x_ldr) - 2
cutoff_index_2 = np.max([3,
np.min([trough_loc, ldrwidth_5x]),
ldrwidth_2p5])
ldr_cutoff = x_ldr[np.min([cutoff_index_1, int(cutoff_index_2)])]
ldr_gates = [-np.inf, ldr_cutoff]
return np.array(ldr_gates)
def get_ldrlims(ldrtxt, x_ldr=None):
"""Limits of ldr txt feature that define x_lims for plots
Parameters
----------
ldrtxt : 1d array
ldr txt feature across all cells in a well
x_ldr : 1d array
uniformly distributed 1d grid based on expected
range of ldr txt
Returns
-------
ldr_lims : list of floats
limits of ldr txt feature that define x_lims for plots
"""
if x_ldr is None:
mx = np.max(ldrtxt.tolist())+0.01
x_ldr = np.arange(-0.01, mx, 0.0002)
ldr_lims = (quantile(ldrtxt, [5e-3, 0.995]) +
[(2.5 * (x_ldr[1] - x_ldr[0])) * x for x in [-1, 1]])
return ldr_lims
def plot_ldr_gating(ldrtxt, x_ldr=None, ldr_gates=None,
ldr_lims=None, ax=None):
"""Summary plot of gating based on gating based on LDR intensities
Parameters
----------
ldrtxt : 1d array
ldr txt feature across all cells in a well
x_ldr : 1d array
uniformly distributed 1d grid based on expected
range of ldr txt
ldr_gates : list of floats
min and max gating based on ldr txt
ldr_lims : list of floats
outer bouns of ldr txt to set as x_lim for pltos
ax : plot obj
provides positional reference for master plot
Returns
-------
"""
if x_ldr is None:
mx = np.max(ldrtxt.tolist())+0.01
x_ldr = np.arange(-0.01, mx, 0.0002)
f_ldr = get_kde(ldrtxt, x_ldr)
if not ldr_gates:
ldr_gates = get_ldrgates(ldrtxt, x_ldr)
if not ldr_lims:
ldr_lims = get_ldrlims(ldrtxt, x_ldr)
log_frac = np.log10(f_ldr+np.max(f_ldr)/100) - np.log10(np.max(f_ldr)/100)
if ax is None:
ax = plt.figure()
ax.plot(x_ldr, log_frac)
x_vals = [ldr_gates[1],
np.max([ldr_gates[0], np.min(x_ldr)]),
np.max([ldr_gates[0], np.min(x_ldr)]),
ldr_gates[1], ldr_gates[1]]
#y_vals = [np.log10(np.max(f_ldr)) * y for y in [0, 0, 0.5, 0.5, 0]]
y_vals = [0, 0, max(log_frac), max(log_frac), 0]
ax.plot(x_vals, y_vals, 'r', alpha=0.5)
ax.set_xlim(ldr_lims)
f_ldr_max = np.log10(np.max(f_ldr)) - np.log10(np.max(f_ldr)/100) + 0.1
ax.set_ylim([0, f_ldr_max])
ax.set_xlabel('LDRtxt intensity')
ax.set_ylabel('kernel density estimate')
return ldr_gates, ldr_lims
def compute_log_dna(dna, x_dna=None):
"""Computes log of DNA content bounded by x_dna[2], x_dna[-3]
Parameters
----------
dna : 1D array
DNA content of cells in a given well
x_dna : 1D array
Expected distribution of DNA content (used as x-axis grid)
Return
------
log_dna : 1D array
log transformed DNA content
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
dna_upper_bound = 10 ** x_dna[-3]
dna_lower_bound = 10 ** x_dna[2]
dna_upper_bounded = [d if d < dna_upper_bound else dna_upper_bound
for d in dna]
dna_bounded = [d if d > dna_lower_bound else dna_lower_bound
for d in dna_upper_bounded]
log_dna = np.array([np.log10(d) for d in dna_bounded])
return log_dna
def get_g1_location(log_dna, x_dna, ldrtxt, ldr_gates):
"""Computes ocation of G1 based on DNA content
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
Returns
-------
g1_loc : float
G1 location on log DNA axis
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
# Only consider susbet of cells with LDR within ldr_gates
log_dna_low_ldr = log_dna[(ldr_gates[1] >= ldrtxt) &
(ldrtxt >= ldr_gates[0])]
f_dna_low_ldr = get_kde(log_dna_low_ldr, x_dna)
dna_peaks_amp, dna_peaks_loc, _ = findpeaks(f_dna_low_ldr.tolist())
# Remove lesser peaks
dna_peaks_loc = dna_peaks_loc[dna_peaks_amp > np.max(dna_peaks_amp/10)]
dna_peaks_amp = dna_peaks_amp[dna_peaks_amp > np.max(dna_peaks_amp/10)]
xdna_loc = x_dna[dna_peaks_loc[:4]] # take the 4 highest peaks
# compute dna density surrounding peaks
dna_density = [np.mean(np.array(log_dna > (x - 0.2 * np.log10(2))) &
np.array(log_dna < (x + 1.2 * np.log10(2))))
for x in xdna_loc] + dna_peaks_amp
# Find G1 peak
if len(xdna_loc) == 2:
g1_loc = np.min(xdna_loc)
else:
g1_loc = xdna_loc[np.argmax(dna_density)]
return g1_loc
def get_g2_location(log_dna, x_dna, ldrtxt, ldr_gates, g1_loc):
"""Computes location of G2 based on DNA content
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
g1_loc : numpy float
G1 location on log DNA scale
Returns
-------
g2_loc : numpy float
G2 location on log DNA scale
"""
# Get G2 peak and location
# Only consider subset of cells witt LDR internsity within ldr_gates and
# DNA content > (g1_loc + 0.4 * log10(2))
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
log_dna_g2_range = log_dna[(log_dna > (g1_loc + 0.4 * np.log10(2))) &
(ldr_gates[1] >= ldrtxt) &
(ldrtxt >= ldr_gates[0])]
f_dna_g2_range = get_kde(log_dna_g2_range, x_dna)
f_smooth = smooth.smooth(f_dna_g2_range, 5, 'flat')
peak_amp, peak_loc, _ = findpeaks(f_smooth.tolist())
peak_loc = peak_loc[peak_amp > np.max(peak_amp/10)]
xdna_loc = x_dna[peak_loc]
xdna_loc = xdna_loc[xdna_loc > (g1_loc + 0.5 * np.log10(2))]
if len(xdna_loc) > 1:
g2_loc = xdna_loc[np.argmin(
np.abs((xdna_loc - (g1_loc + np.log10(2))))
)]
elif len(xdna_loc) == 1:
g2_loc = xdna_loc[0]
else:
g2_loc = g1_loc + np.log10(2)
return g2_loc
def get_g1_g2_position(log_dna, x_dna, ldrtxt, ldr_gates):
"""Wrapper function that returns G1 and G2 location
based on log DNA content
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
Returns
-------
g1_g2_pos : list of floats
G1 and G2 location on log DNA scale
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
g1_loc = get_g1_location(log_dna, x_dna, ldrtxt, ldr_gates)
g2_loc = get_g2_location(log_dna, x_dna, ldrtxt, ldr_gates, g1_loc)
g1_g2_pos = [g1_loc, g2_loc]
return g1_g2_pos
def get_dnalims(log_dna, x_dna=None):
""" Outer bounds on DNA content to use as x_lim for plots
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
Returns
-------
dna_lims : list of floats
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
dna_lims = (quantile(log_dna, [5e-3, 0.995]) +
[(2.5 * (x_dna[1] - x_dna[0])) * x for x in [-1, 1]])
return dna_lims
def get_dna_gating(dna, ldrtxt, ldr_gates, x_dna=None, ax=None):
"""Computes gating to claissfy live/dead cells based on DNA content
Parameters
----------
dna : 1d array
DNA content of cells in a given well
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ax : subplot object
provides positional reference for master plot
Returns
-------
dna_gates : list of floats
inner and outer gates to classify live/dead cells
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
log_dna = compute_log_dna(dna, x_dna)
f_dna = get_kde(np.array(log_dna), x_dna)
log_dna_low_ldr = log_dna[(ldr_gates[1] >= ldrtxt) &
(ldrtxt >= ldr_gates[0])]
f_dna_low_ldr = get_kde(log_dna_low_ldr, x_dna)
g1_loc = get_g1_location(log_dna, x_dna, ldrtxt, ldr_gates)
log_dna_g2_range = log_dna[(log_dna > (g1_loc + 0.4 * np.log10(2))) &
(ldr_gates[1] >= ldrtxt) &
(ldrtxt >= ldr_gates[0])]
f_dna_g2_range = get_kde(log_dna_g2_range, x_dna)
g1_g2_pos = get_g1_g2_position(log_dna, x_dna, ldrtxt, ldr_gates)
g1_loc = g1_g2_pos[0]
g2_loc = g1_g2_pos[1]
dna_gates = [a + b for a, b in zip(
[g1_g2_pos[i] for i in [0, 0, 1, 1]],
[(g2_loc-g1_loc) * s for s in [-1.5, -.9, 1.3, 2.2]]
)]
y_vals = [np.max(f_dna) * y for y in [0, 1.02, 1.02, 0]]
inner_x_vals = [dna_gates[i] for i in [1, 1, 2, 2]]
outer_x_vals = [dna_gates[i] for i in [0, 0, 3, 3]]
dna_lims = get_dnalims(log_dna, x_dna)
dna_lims = [np.min((dna_lims[0], dna_gates[0]-0.1)),
np.max((dna_lims[1], dna_gates[3]+0.1))]
if ax is not None:
ax.plot(x_dna, f_dna_low_ldr, '--r')
ax.plot(x_dna, f_dna, '-k')
ax.plot(x_dna, f_dna_g2_range, ':')
ax.plot(inner_x_vals, y_vals, '-r', linewidth=2)
ax.plot(outer_x_vals, y_vals, '-r')
ax.set_xlabel('log10 (DNA content)')
ax.set_ylabel('kernel density estimate')
ax.set_xlim(dna_lims)
return np.array(dna_gates)
def plot_ldr_dna_scatter(dna, ldrtxt, x_dna=None, x_ldr=None, ax=None):
"""Plot of LDR and DNA scatter with associated gates
Parameters
----------
dna : 1d array
DNA content of cells in a given well
ldrtxt : 1d array
ldr txt feature across all cells in a well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
x_ldr : 1d array
uniformly distributed 1d grid based on expected
range of ldr txt
ax : subplot object
provides positional reference for master plot
Returns
-------
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
if x_ldr is None:
mx = np.max(ldrtxt.tolist())+0.01
x_ldr = np.arange(-0.01, mx, 0.0002)
log_dna = compute_log_dna(dna, x_dna)
xy = np.vstack([log_dna, ldrtxt])
z = gaussian_kde(xy)(xy)
ldr_gates = get_ldrgates(ldrtxt, x_ldr)
g1_g2_pos = get_g1_g2_position(log_dna, x_dna, ldrtxt, ldr_gates)
g1_loc = g1_g2_pos[0]
g2_loc = g1_g2_pos[1]
dna_gates = [a + b for a, b in zip(
[g1_g2_pos[i] for i in [0, 0, 1, 1]],
[(g2_loc-g1_loc) * s for s in [-1.5, -.9, 1.3, 2.2]])]
ldr_gates = [0 if lg < 0 else lg for lg in ldr_gates]
# Plotting
# --------
if ax is None:
ax = plt.figure()
ax.scatter(log_dna, ldrtxt, c=z, s=10)
ax.plot([dna_gates[i] for i in [0, 0, 3, 3, 0]],
[ldr_gates[i] for i in [0, 1, 1, 0, 0]], '-r')
ax.plot([dna_gates[i] for i in [1, 1, 2, 2, 1]],
[ldr_gates[i] for i in [0, 1, 1, 0, 0]],
'-r', linewidth=2)
ax.plot(g1_g2_pos, [0, 0], 'xk', )
ax.plot(g1_g2_pos, [0, 0], 'ok', markersize=14, markerfacecolor='None')
ax.set_xlabel('log10 (DNA content)')
ax.set_ylabel('LDRtxt intensity')
dna_lims = get_dnalims(log_dna, x_dna)
dna_lims = [np.min((dna_lims[0], dna_gates[0]-0.1)),
np.max((dna_lims[1], dna_gates[3]+0.1))]
ldr_lims = get_ldrlims(ldrtxt, x_ldr)
ax.set_xlim(dna_lims)
ax.set_ylim(ldr_lims)
def live_dead(ldrtxt, ldr_gates=None,
dna=None, dna_gates=None,
x_ldr=None, x_dna=None, ax=None):
"""Assign classification to individual cells as live/dead based on
ldrtxt and DNA content.
If ax is not None, plots pie chart of fraction live/dead
1. alive = selected+others, where selected is within
inner DNA gate and within LDR
2. dead = anything outside of DNA outer gating and LDR gating
3. total = alive + dead; selected + others + dead
Parameters
----------
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
dna : 1d array
DNA content of cells in a given well
dna_gates : list of floats
x_ldr : 1d array
uniformly distributed 1d grid based on expected
range of ldr txt
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ax : subplot object
Returns
-------
alive : int
number of cells classied as alive
dead : int
numer of cells classified as dead
outcome : 1d array
classification of each cell as live(>=0) or dead (-1).
should have same length as ldrtxt
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
if x_ldr is None:
mx = np.max(ldrtxt.tolist())+0.01
x_ldr = np.arange(-0.01, mx, 0.0002)
outcome = [0] * len(ldrtxt)
if ldr_gates is None:
ldr_gates = get_ldrgates(ldrtxt, x_ldr)
ldr_outer = (ldrtxt < ldr_gates[0]) | (ldrtxt > ldr_gates[1])
outcome = [-1 if b else 0 for b in ldr_outer]
#dead = np.sum([1 for ot in outcome if ot == -1])
alive = np.sum([1 for ot in outcome if ot >= 0])
selected = 'DNA information unavailable'
others = 'DNA information unavailable'
dead_ldrpos = np.sum(ldr_outer)
cell_fate_dict = {'alive': alive, 'dead_ldrpos': dead_ldrpos}
if dna is not None:
log_dna = compute_log_dna(dna, x_dna)
dna_outermost = (log_dna < dna_gates[0]) | (log_dna > dna_gates[3])
dead_ldrpos = np.sum(ldr_outer)
dead_subg1 = np.sum((ldr_outer==False) & (log_dna < dna_gates[0]))
alive_beyondg2 = np.sum((ldr_outer==False) & (log_dna > dna_gates[2]))
alive_subg1 = np.sum((ldr_outer==False) & (log_dna > dna_gates[0]) & (log_dna < dna_gates[1]))
dna_inner = ((log_dna > dna_gates[1]) &
(log_dna < dna_gates[2]) &
(ldr_outer==False))
alive = np.sum(dna_inner)
#outcome = [-1 if d else 1 if s else 0
# for d, s in zip((ldr_outer | dna_outermost), dna_inner)]
outcome = ((1 * dna_inner) # normal live cells
+ (1.5 * ((ldr_outer==False) & (log_dna > dna_gates[2]))) # live but higher than G2
+ (-1 * ((ldr_outer==False) & (log_dna < dna_gates[0]))) # dead very low G1
+ (1.25 * ((ldr_outer==False) & (log_dna > dna_gates[0]) & (log_dna < dna_gates[1]))) # alive lower than G1
+ (-2 * ldr_outer))
cell_fate_dict = {'alive': alive, 'alive_subg1': alive_subg1, 'alive_beyondg2': alive_beyondg2,
'dead_ldrpos': dead_ldrpos, 'dead_subg1': dead_subg1}
#alive = np.sum([1 for ot in outcome if ot >= 0])
#dead = np.sum([1 for s in outcome if s == -1])
#selected = np.sum([1 for s in outcome if s == 1])
#others = np.sum([1 for s in outcome if s == 0])
if ax is not None:
ax.pie([alive, alive_subg1, alive_beyondg2, dead_ldrpos, dead_subg1],
labels=['alive', 'alive_subg1', 'alive_beyondg2', 'dead_ldrpos', 'dead_subg1'],
explode=(0.1, 0.1, 0.1, 0.1, 0.1), autopct='%1.1f%%')
ax.axis('equal')
else:
if ax is not None:
ax.pie([alive, dead_ldrpos], labels=['alive', 'dead_ldrpos'],
explode=(0.1, 0.1), autopct='%1.1f%%')
ax.axis('equal')
return cell_fate_dict, outcome
def plot_summary(ldr, dna, x_ldr=None, well=None):
"""Master summary plot which incorporates above plots as subplots
Parameters
----------
ldr : 1d array
ldr txt feature across all cells in a well
dna : 1d array
DNA content of cells in a given well
well : str
Returns
-------
fig : matplotlib figure object
"""
fig = plt.figure()
gridspec.GridSpec(2, 2)
ax1 = plt.subplot2grid((2, 2), (0, 0), colspan=1, rowspan=1)
ax2 = plt.subplot2grid((2, 2), (0, 1), colspan=1, rowspan=1)
ax3 = plt.subplot2grid((2, 2), (1, 0), colspan=1, rowspan=1)
ax4 = plt.subplot2grid((2, 2), (1, 1), colspan=1, rowspan=1)
ldr_gates, ldr_lims = plot_ldr_gating(ldr, x_ldr=x_ldr, ax=ax1)
dna_gates = get_dna_gating(dna, ldr, ldr_gates, ax=ax2)
plot_ldr_dna_scatter(dna, ldr, x_ldr=x_ldr, ax=ax3)
cell_fate_dict, outcome = live_dead(ldr, ldr_gates, dna, dna_gates, x_ldr=x_ldr, ax=ax4)
fig.tight_layout()
fig.set_size_inches(w=8, h=7)
if well:
fig.savefig('dead_cell_filter_%s.png' % well, dpi=300)
return fig
def get_ldrgates2(ldrtxt, x_ldr=None):
if x_ldr is None:
mx = np.max(ldrtxt.tolist())+0.01
x_ldr = np.arange(-0.01, mx, 0.0002)
f_ldr = get_kde(ldrtxt, x_ldr)
peak_locs, _ = find_peaks(-f_ldr)
ldr_cutoff = x_ldr[peak_locs[0]]
ldr_gates = [-np.inf, ldr_cutoff]
return np.array(ldr_gates)
def get_ldrintgates2(batch, well, cutoff=None):
fl = [s for s in os.listdir(batch)
if s.endswith('Nuclei Selected[0].txt')]
wl = "result.%s[" % well
fls = [s for s in fl if wl in s][0]
df = pd.read_table("%s/%s" % (batch, fls))
ldrint = df['Nuclei Selected - LDRINT']
fig, ax = plt.subplots()
total_cells = len(ldrint)
logint = np.log10(ldrint)
logint = logint[~np.isnan(logint)]
x, y = sns.kdeplot(logint, ax=ax).get_lines()[0].get_data()
#plt.close('all')
peak_locs, _ = find_peaks(-y)
cc = x[peak_locs]
if cutoff is None:
if len(cc) > 0:
try:
cutoff = cc[cc>1][0]
except IndexError:
cutoff = cc[-1]
dead_cells = len(logint[logint > cutoff])
else:
cutoff = np.nan
dead_cells = np.nan
ax.vlines(cutoff, 0, 0.2, linestyles='dashed', alpha=0.5)
ax.set_title("%s_%s" % (batch, well))
return ax, dead_cells, total_cells
def get_ldrintgates(ldrint, cutoff=None):
#ldrint = df['Nuclei Selected - LDRINT']
total_cells = len(ldrint)
logint = np.log10(ldrint)
logint = logint[~np.isnan(logint)]
fig, ax = plt.subplots()
x, y = sns.kdeplot(logint, ax=ax).get_lines()[0].get_data()
plt.close('all')
peak_locs, _ = find_peaks(-y)
cc = x[peak_locs]
if cutoff is None:
if len(cc) > 0:
try:
cutoff = cc[cc>1][0]
dead_cells = len(logint[logint > cutoff])
except IndexError:
cutoff = np.nan
dead_cells = np.nan
else:
cutoff = np.nan
dead_cells = np.nan
return cutoff, dead_cells, total_cells
|
the-stack_106_14915
|
import math
# correct method
def is_prime(number):
if number == 1:
return False
mult = False
for count in range(2, number):
if (number % count == 0):
mult = True
break
if mult:
return False
else:
return True
# candidate method
def is_prime_candidate(n):
if (n == 2 or n == 3 or n == 5 or n == 7):
return True
if ((n % 2 == 0 or n % 3 == 0 or n % 5 == 0 or n % 7 == 0)):
return False
qrd = math.sqrt(n)
if (qrd % 1 == 0):
return False
return True
# check if a candidate method is correct
is_correct = True
for number in range(1, 101):
candidate_result = is_prime_candidate(number)
system_result = is_prime(number)
if (candidate_result != system_result):
print('Number {} is prime: {}, expect {}'.format(number, candidate_result, system_result))
is_correct = False
if (is_correct):
print('All sounds good!')
else:
print('Problems found!!!!')
|
the-stack_106_14916
|
from flask_restful import fields, marshal_with, reqparse, Resource
import os
import config
import logging
import magic
import cv2
from flask.json import jsonify
from repositories import FoodRecognition
import keras
from keras.applications.inception_v3 import preprocess_input
def check_image_file_id(id):
if os.path.exists(os.path.join(config.FILE_STORAGE_PATH, id)) and os.path.isfile(os.path.join(config.FILE_STORAGE_PATH, id)):
f = magic.Magic(mime=True, uncompress=True)
fileType = f.from_file(os.path.join(config.FILE_STORAGE_PATH, id))
if fileType == 'image/jpeg' or fileType == 'image/jpg' or fileType == 'image/png':
logging.debug("file id %s is a valid %s image file" % (id, fileType))
return id
else:
logging.debug("file id %s is not a valid image file" % (id))
raise ValueError("file id {} doesn't exists".format(id))
else:
logging.debug("file id %s doesn't exists" % (id))
raise ValueError("file id {} doesn't exists".format(id))
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('Content-Type', location='headers', type=str, help='Please set Content-Type as application/json')
#parser.add_argument('image_file_id', location='json', type=check_image_file_id, help='Please provide valid image_file_id in JPEG/PNG format', required=True)
class RectResource(Resource):
model = None
process_image = None
def __init__(self):
if self.process_image is None:
self.process_image = preprocess_input
def get(self):
args = parser.parse_args()
food_recog = FoodRecognition(config.FILE_STORAGE_PATH, self.process_image)
recipe_name = food_recog.main()
keras.backend.tensorflow_backend.clear_session()
return {
'status': {
'code' : 200,
'message' : 'api successful'
},
'recipe_name': str(recipe_name)
}
|
the-stack_106_14917
|
#
# This file is part of MicroPython MPU9250 driver
# Copyright (c) 2018 Mika Tuupola
# Copyright (c) 2018 0x1abin (added the yaw,picth,roll api and complementary filtering)
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# Project home:
# https://github.com/tuupola/micropython-mpu9250
#
"""
MicroPython I2C driver for MPU6500 6-axis motion tracking device
"""
__version__ = "0.2.0-dev"
# pylint: disable=import-error
import ustruct
from machine import I2C, Pin
from micropython import const
import math
import utime as time
# pylint: enable=import-error
_SMPLRT_DIV = const(0x19)
_CONFIG = const(0x1a)
_PWR_MGMT_1 = const(0x6b)
_GYRO_CONFIG = const(0x1b)
_ACCEL_CONFIG = const(0x1c)
_ACCEL_CONFIG2 = const(0x1d)
_INT_PIN_CFG = const(0x37)
_ACCEL_XOUT_H = const(0x3b)
_ACCEL_XOUT_L = const(0x3c)
_ACCEL_YOUT_H = const(0x3d)
_ACCEL_YOUT_L = const(0x3e)
_ACCEL_ZOUT_H = const(0x3f)
_ACCEL_ZOUT_L= const(0x40)
_TEMP_OUT_H = const(0x41)
_TEMP_OUT_L = const(0x42)
_GYRO_XOUT_H = const(0x43)
_GYRO_XOUT_L = const(0x44)
_GYRO_YOUT_H = const(0x45)
_GYRO_YOUT_L = const(0x46)
_GYRO_ZOUT_H = const(0x47)
_GYRO_ZOUT_L = const(0x48)
_WHO_AM_I = const(0x75)
#_ACCEL_FS_MASK = const(0b00011000)
ACCEL_FS_SEL_2G = const(0b00000000)
ACCEL_FS_SEL_4G = const(0b00001000)
ACCEL_FS_SEL_8G = const(0b00010000)
ACCEL_FS_SEL_16G = const(0b00011000)
_ACCEL_SO_2G = 16384 # 1 / 16384 ie. 0.061 mg / digit
_ACCEL_SO_4G = 8192 # 1 / 8192 ie. 0.122 mg / digit
_ACCEL_SO_8G = 4096 # 1 / 4096 ie. 0.244 mg / digit
_ACCEL_SO_16G = 2048 # 1 / 2048 ie. 0.488 mg / digit
#_GYRO_FS_MASK = const(0b00011000)
GYRO_FS_SEL_250DPS = const(0b00000000)
GYRO_FS_SEL_500DPS = const(0b00001000)
GYRO_FS_SEL_1000DPS = const(0b00010000)
GYRO_FS_SEL_2000DPS = const(0b00011000)
_GYRO_SO_250DPS = 131
_GYRO_SO_500DPS = 65.5
_GYRO_SO_1000DPS = 32.8
_GYRO_SO_2000DPS = 16.4
# Used for enablind and disabling the i2c bypass access
_I2C_BYPASS_MASK = const(0b00000010)
_I2C_BYPASS_EN = const(0b00000010)
_I2C_BYPASS_DIS = const(0b00000000)
SF_G = 1
SF_M_S2 = 9.80665 # 1 g = 9.80665 m/s2 ie. standard gravity
SF_DEG_S = 1
SF_RAD_S = 57.295779513082 # 1 rad/s is 57.295779578552 deg/s
class MPU6050:
"""Class which provides interface to MPU6500 6-axis motion tracking device."""
def __init__(
self, i2c=None, address=0x68,
accel_fs=ACCEL_FS_SEL_2G, gyro_fs=GYRO_FS_SEL_500DPS,
accel_sf=SF_G, gyro_sf=SF_DEG_S
):
if i2c:
self.i2c = i2c
else:
import i2c_bus
self.i2c = i2c_bus.get(i2c_bus.M_BUS)
# from machine import I2C
# self.i2c = I2C(sda=21, scl=22, speed=400000)
self.address = address
# if 0x71 != self.whoami:
# raise RuntimeError("MPU6500 not found in I2C bus.")
# Init
self._register_char(_SMPLRT_DIV, 0x00)
self._register_char(_CONFIG, 0x00)
self._accel_so = self._accel_fs(accel_fs)
self._gyro_so = self._gyro_fs(gyro_fs)
self._accel_sf = accel_sf
self._gyro_sf = gyro_sf
self._register_char(_PWR_MGMT_1, 0x01)
# Enable I2C bypass to access for MPU9250 magnetometer access.
# char = self._register_char(_INT_PIN_CFG)
# char &= ~_I2C_BYPASS_MASK # clear I2C bits
# char |= _I2C_BYPASS_EN
# self._register_char(_INT_PIN_CFG, char)
self.preInterval = time.time()
self.accCoef = 0.02
self.gyroCoef = 0.98
self.angleGyroX = 0
self.angleGyroY = 0
self.angleGyroZ = 0
self.angleX = 0
self.angleZ = 0
self.angleY = 0
self.gyroXoffset = 0
self.gyroYoffset = 0
self.gyroZoffset = 0
def setGyroOffsets(self, x, y, z):
self.gyroXoffset = x
self.gyroYoffset = y
self.gyroZoffset = z
@property
def acceleration(self):
"""
Acceleration measured by the sensor. By default will return a
3-tuple of X, Y, Z axis acceleration values in m/s^2 as floats. Will
return values in g if constructor was provided `accel_sf=SF_M_S2`
parameter.
"""
so = self._accel_so
sf = self._accel_sf
xyz = self._register_three_shorts(_ACCEL_XOUT_H)
return tuple([value / so * sf for value in xyz])
@property
def gyro(self):
"""
X, Y, Z radians per second as floats.
"""
so = self._gyro_so
sf = self._gyro_sf
xyz = self._register_three_shorts(_GYRO_XOUT_H)
return tuple([value / so * sf for value in xyz])
@property
def ypr(self):
"""
yaw, pitch, roll as floats.
"""
accX, accY, accZ = self.acceleration
angleAccX = math.atan2(accY, accZ + abs(accX)) * SF_RAD_S
angleAccY = math.atan2(accX, accZ + abs(accY)) * (-SF_RAD_S);
gyroX, gyroY, gyroZ = self.gyro
gyroX -= self.gyroXoffset
gyroY -= self.gyroYoffset
gyroZ -= self.gyroZoffset
interval = (time.ticks_us() - self.preInterval) / 1000000
self.preInterval = time.ticks_us()
self.angleGyroX += gyroX * interval
self.angleGyroY += gyroY * interval
self.angleGyroZ += gyroZ * interval
self.angleX = (self.gyroCoef * (self.angleX + gyroX * interval)) + (self.accCoef * angleAccX);
self.angleY = (self.gyroCoef * (self.angleY + gyroY * interval)) + (self.accCoef * angleAccY);
self.angleZ = self.angleGyroZ
return tuple([round(self.angleZ, 3), round(self.angleX, 3), round(self.angleY, 3)])
@property
def whoami(self):
""" Value of the whoami register. """
return self._register_char(_WHO_AM_I)
def _register_short(self, register, value=None, buf=bytearray(2)):
if value is None:
self.i2c.readfrom_mem_into(self.address, register, buf)
return ustruct.unpack(">h", buf)[0]
ustruct.pack_into(">h", buf, 0, value)
return self.i2c.writeto_mem(self.address, register, buf)
def _register_three_shorts(self, register, buf=bytearray(6)):
self.i2c.readfrom_mem_into(self.address, register, buf)
return ustruct.unpack(">hhh", buf)
def _register_char(self, register, value=None, buf=bytearray(1)):
if value is None:
self.i2c.readfrom_mem_into(self.address, register, buf)
return buf[0]
ustruct.pack_into("<b", buf, 0, value)
return self.i2c.writeto_mem(self.address, register, buf)
def _accel_fs(self, value):
self._register_char(_ACCEL_CONFIG, value)
# Return the sensitivity divider
if ACCEL_FS_SEL_2G == value:
return _ACCEL_SO_2G
elif ACCEL_FS_SEL_4G == value:
return _ACCEL_SO_4G
elif ACCEL_FS_SEL_8G == value:
return _ACCEL_SO_8G
elif ACCEL_FS_SEL_16G == value:
return _ACCEL_SO_16G
def _gyro_fs(self, value):
self._register_char(_GYRO_CONFIG, value)
# Return the sensitivity divider
if GYRO_FS_SEL_250DPS == value:
return _GYRO_SO_250DPS
elif GYRO_FS_SEL_500DPS == value:
return _GYRO_SO_500DPS
elif GYRO_FS_SEL_1000DPS == value:
return _GYRO_SO_1000DPS
elif GYRO_FS_SEL_2000DPS == value:
return _GYRO_SO_2000DPS
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
|
the-stack_106_14919
|
# coding: utf8
"""
cairocffi
~~~~~~~~~
CFFI-based cairo bindings for Python. See README for details.
:copyright: Copyright 2013 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
import sys
from cffi import FFI
from . import constants
from .compat import FileNotFoundError
VERSION = '0.5.4'
# pycairo compat:
version = '1.10.0'
version_info = (1, 10, 0)
def dlopen(ffi, *names):
"""Try various names for the same library, for different platforms."""
for name in names:
try:
return ffi.dlopen(name)
except OSError:
pass
# Re-raise the exception.
return ffi.dlopen(names[0]) # pragma: no cover
ffi = FFI()
ffi.cdef(constants._CAIRO_HEADERS)
cairo = dlopen(ffi, 'libcairo.so.2', 'libcairo.2.dylib', 'libcairo-2.dll',
'cairo', 'libcairo-2')
class CairoError(Exception):
"""Raised when cairo returns an error status."""
def __init__(self, message, status):
super(CairoError, self).__init__(message)
self.status = status
Error = CairoError # pycairo compat
STATUS_TO_EXCEPTION = {
constants.STATUS_NO_MEMORY: MemoryError,
constants.STATUS_READ_ERROR: IOError,
constants.STATUS_WRITE_ERROR: IOError,
constants.STATUS_TEMP_FILE_ERROR: IOError,
constants.STATUS_FILE_NOT_FOUND: FileNotFoundError,
}
def _check_status(status):
"""Take a cairo status code and raise an exception if/as appropriate."""
if status != constants.STATUS_SUCCESS:
exception = STATUS_TO_EXCEPTION.get(status, CairoError)
status_name = ffi.string(ffi.cast("cairo_status_t", status))
message = 'cairo returned %s: %s' % (
status_name, ffi.string(cairo.cairo_status_to_string(status)))
raise exception(message, status)
def cairo_version():
"""Return the cairo version number as a single integer,
such as 11208 for ``1.12.8``.
Major, minor and micro versions are "worth" 10000, 100 and 1 respectively.
Can be useful as a guard for method not available in older cairo versions::
if cairo_version() >= 11000:
surface.set_mime_data('image/jpeg', jpeg_bytes)
"""
return cairo.cairo_version()
def cairo_version_string():
"""Return the cairo version number as a string, such as ``1.12.8``."""
return ffi.string(cairo.cairo_version_string()).decode('ascii')
def install_as_pycairo():
"""Install cairocffi so that ``import cairo`` imports it.
cairoffi’s API is compatible with pycairo as much as possible.
"""
sys.modules['cairo'] = sys.modules[__name__]
# Implementation is in submodules, but public API is all here.
from .surfaces import (Surface, ImageSurface, PDFSurface, PSSurface,
SVGSurface, RecordingSurface)
from .patterns import (Pattern, SolidPattern, SurfacePattern,
Gradient, LinearGradient, RadialGradient)
from .fonts import FontFace, ToyFontFace, ScaledFont, FontOptions
from .context import Context
from .matrix import Matrix
from .constants import *
|
the-stack_106_14920
|
from arjuna.tpi.engine.asserter import Asserter
class Info:
def __init__(self, pytest_request, attrs=None):
self.__attrs = attrs
self.__request = pytest_request
rnode = self.__request.node
self.__node_name = rnode.name
if self.__request.scope == "module":
self.__orig_name = rnode.name
elif self.__request.scope == "function":
self.__orig_name = rnode.originalname and rnode.originalname or rnode.name
def get_qual_name(self, with_params=False):
# if pytest name has params only then originalname is set else it is None
if self.__request.scope in {"module", "session"}:
return self.__node_name
else:
name = with_params and self.__node_name or self.__orig_name
return self.__request.module.__name__ + "." + name
def get_qual_name_with_data(self):
qname = self.get_qual_name(with_params=True)
if self.__request.fixturename:
return qname + ":" + self.__request.fixturename
else:
return qname
def __getattr__(self, name):
if name in self.__attrs:
return self.__attrs[name]
else:
try:
return getattr(self.__request, name)
except:
raise Exception("{name} is not a valid test information attribute. Built-in Arjuna attributes: {attrs}".format(name=name, attrs=str(self.__attrs)))
_LOOKUP_ORDER = {
"session" : ("session", ),
"module" : ("session", "module"),
"class" : ("function", "cls", "module"),
"function" : ("function", "cls", "module", "session")
}
_SCOPE_MAP = {
"function" : "function",
"class" : "cls",
"module" : "module",
"session" : "session"
}
class Space:
def __init__(self, pytest_request):
vars(self)['_request'] = pytest_request
try:
config = self.arj_config
except:
from arjuna import Arjuna
self.arj_config = Arjuna.get_config()
def __getitem__(self, name):
scopes = _LOOKUP_ORDER[self._request.scope]
from arjuna import log_trace
for scope in scopes:
log_trace("Space: Getting value for {} from {} scope".format(name, scope))
try:
container = getattr(self._request, _SCOPE_MAP[scope])
return getattr(container, name)
except Exception as e:
log_trace("Space: No value for {} in {} scope".format(name, scope))
continue
raise Exception("Attribute with name >>{}<< does not exist in request scope for {}".format(name, scopes))
def _get_container_for_scope(self):
return getattr(self._request, _SCOPE_MAP[self._request.scope])
def __setitem__(self, name, value):
container = self._get_container_for_scope()
setattr(container, name, value)
def __getattr__(self, name):
from arjuna import log_trace
if type(name) is str and not name.startswith("__"):
try:
val = self[name]
log_trace("Space: Got value {} for {}.".format(val, name))
return val
except Exception as e:
log_trace("Space: No value for {} in {} in any scope.".format(name))
raise AttributeError(str(e))
def __setattr__(self, name, value):
container = self._get_container_for_scope()
from arjuna import log_trace
log_trace("Space: Setting {}={} in {} scope".format(name, value, self._request.scope)) #, contexts="request")
setattr(container, name, value)
@property
def raw_request(self):
return self._request
class GroupSpace(Space):
def __init__(self, pytest_request):
super().__init__(pytest_request)
def _get_container_for_scope(self):
return getattr(self._request, "session") # Each Arjuna group represents a Pytest session in a thread.
class ModuleSpace(Space):
def __init__(self, pytest_request):
super().__init__(pytest_request)
def _get_container_for_scope(self):
return getattr(self._request, "module")
class Module:
def __init__(self, py_request):
self._space = ModuleSpace(py_request)
@property
def space(self):
return self._space
class Group:
def __init__(self, py_request):
self._space = GroupSpace(py_request)
self._info = py_request.session.group_info
@property
def space(self):
return self._space
@property
def thread_name(self):
return self._info.thread_name
@property
def name(self):
return self._info.name
@property
def config(self):
return self._info.config
class My:
def __init__(self, test_meta_data=None):
self._data = None
self._info = None
self._handler = None
self._qual_name = None
self._request = None
self._shared_objects = None
self._asserter = Asserter() #unittest.TestCase('__init__')
self._space = None
self._module = None
self._attrs = None
self._tags = None
self._bugs = None
self._envs = None
if test_meta_data:
self._attrs = test_meta_data['info']
if self._attrs['id'] is None:
self._attrs['id'] = self._attrs['qual_name']
self._tags = test_meta_data['tags']
self._bugs = test_meta_data['bugs']
self._envs = test_meta_data['envs']
self._group = None
@property
def config(self):
return self.space.arj_config
@property
def tags(self):
return self._tags
@property
def bugs(self):
return self._bugs
@property
def envs(self):
return self._envs
def get_config(self, name=None):
if name is None:
return self.config
else:
from arjuna import Arjuna
return Arjuna.get_config(name)
@property
def contextual_data_refs(self):
from arjuna import Arjuna
return Arjuna.get_data_references()
@property
def group(self):
'''
This info is available only within the body of a fixture or test function.
'''
# By this stage the Arjuna's built-in default group fixture has executed and group_info is available as pytest_request.session.group_info
if not self._group:
self._group = Group(self._request)
return self._group
@property
def module(self):
return self._module
@property
def data(self):
return self._data
@property
def asserter(self):
return self._asserter
@data.setter
def data(self, record):
self._data = record
@property
def space(self):
return self._space
def set_req_obj(self, pytest_request):
self._request = pytest_request
self._info = Info(pytest_request, self._attrs)
self._space = Space(pytest_request)
if pytest_request.scope in {"function"}:
if not self._module:
self._module = Module(pytest_request)
if not self._group:
self._group = Group(self._request)
if pytest_request.scope in {"module"}:
if not self._group:
self._group = Group(self._request)
@property
def info(self):
return self._info
@property
def resources(self):
return self._resources
@property
def raw_request(self):
return self._request
|
the-stack_106_14922
|
import torch
from pytest import approx
from torch.nn import functional as F
from homura.modules import functional as HF
def test_gumbel_sigmoid():
input = torch.tensor([10.0, -10.0])
samples = sum([HF.gumbel_sigmoid(input, 0.01) for _ in range(400)]) / 400
assert samples.tolist() == approx([1, 0], abs=1e-2)
def test_ste():
input = torch.randn(3, requires_grad=True)
dummy = input.clone().detach().requires_grad_(True)
HF.straight_through_estimator(input).sum().backward()
dummy.sum().backward()
assert all(input.grad == dummy.grad)
def test_custom_ste():
fwd = torch.randn(3)
fwd2 = fwd.clone()
bwd = torch.randn(3, requires_grad=True)
bwd2 = bwd.detach().clone().requires_grad_(True)
x = HF.custom_straight_through_estimator(fwd, bwd)
assert torch.equal(x, fwd)
(x ** 2).sum().backward()
x2 = fwd2 + (bwd2 - bwd2.detach())
assert torch.equal(x2, fwd2)
(x2 ** 2).sum().backward()
print(x, x2)
assert torch.equal(bwd.grad.data, bwd2.grad.data)
def test_semantic_hashing():
from homura.modules.functional.discretizations import _saturated_sigmoid
for _ in range(10):
input = torch.randn(3, requires_grad=True)
dummy = input.clone().detach().requires_grad_(True)
HF.semantic_hashing(input, is_training=True).sum().backward()
_saturated_sigmoid(dummy).sum().backward()
assert all(input.grad == dummy.grad)
def test_cross_entropy():
input = torch.randn(1, 10)
target = torch.tensor([4]).long()
onehot_target = torch.zeros(1, 10)
onehot_target[0, 4] = 1
output = HF.cross_entropy_with_softlabels(input, onehot_target)
expected = F.cross_entropy(input, target)
assert output.item() == approx(expected.item())
def test_knn():
k = 5
keys = torch.randn(10, 6)
qu = torch.randn(20, 6)
s, i = HF.k_nearest_neighbor(keys, qu, k, "l2")
assert s.size() == torch.Size([20, k])
|
the-stack_106_14923
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The facts class for exos
this file validates each subset of facts and selectively
calls the appropriate facts gathering function
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.exos.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.common.facts.facts import FactsBase
from ansible.module_utils.network.exos.facts.lldp_global.lldp_global import Lldp_globalFacts
from ansible.module_utils.network.exos.facts.legacy.base import Default, Hardware, Interfaces, Config
FACT_LEGACY_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config)
FACT_RESOURCE_SUBSETS = dict(
lldp_global=Lldp_globalFacts,
)
class Facts(FactsBase):
""" The fact class for exos
"""
VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys())
VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys())
def __init__(self, module):
super(Facts, self).__init__(module)
def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None):
""" Collect the facts for exos
:param legacy_facts_type: List of legacy facts types
:param resource_facts_type: List of resource fact types
:param data: previously collected conf
:rtype: dict
:return: the facts gathered
"""
if self.VALID_RESOURCE_SUBSETS:
self.get_network_resources_facts(FACT_RESOURCE_SUBSETS, resource_facts_type, data)
if self.VALID_LEGACY_GATHER_SUBSETS:
self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type)
return self.ansible_facts, self._warnings
|
the-stack_106_14925
|
from MpuRm3100 import IMU
import time
DRDY = 27 #GPIO 27
SSN = 17 #GPIO 17
imu = IMU(SSN,DRDY)
imu.start()
while True:
time.sleep(0.01)
if imu.Readings !=None:
print(imu.Readings['Yaw'])
|
the-stack_106_14926
|
from argparse import ArgumentParser
import codecs
import json
import logging
import os
import pickle
import sys
import tempfile
from typing import Union
import numpy as np
from rusenttokenize import ru_sent_tokenize
try:
from deep_ner.bert_ner import BERT_NER, bert_ner_logger
from deep_ner.utils import factrueval2016_to_json, load_dataset_from_json, load_dataset_from_brat, set_total_seed
from deep_ner.utils import divide_dataset_by_sentences
from deep_ner.quality import calculate_prediction_quality
from deep_ner.dataset_splitting import sample_from_dataset, split_dataset
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from deep_ner.bert_ner import BERT_NER, bert_ner_logger
from deep_ner.utils import factrueval2016_to_json, load_dataset_from_json, load_dataset_from_brat, set_total_seed
from deep_ner.quality import calculate_prediction_quality
from deep_ner.dataset_splitting import sample_from_dataset, split_dataset
def train(factrueval2016_devset_dir: str, split_by_paragraphs: bool, bert_will_be_tuned: bool,
use_lang_features: bool, use_shapes: bool, lstm_layer_size: Union[int, None], l2: float,
max_epochs: int, patience: int, batch_size: int, gpu_memory_frac: float,
model_name: str, collection3_dir: Union[str, None]=None, n_max_samples: int=0) -> BERT_NER:
if os.path.isfile(model_name):
with open(model_name, 'rb') as fp:
recognizer = pickle.load(fp)
assert isinstance(recognizer, BERT_NER)
print('The NER has been successfully loaded from the file `{0}`...'.format(model_name))
print('')
else:
temp_json_name = tempfile.NamedTemporaryFile(mode='w').name
try:
factrueval2016_to_json(factrueval2016_devset_dir, temp_json_name, split_by_paragraphs)
X, y = load_dataset_from_json(temp_json_name)
finally:
if os.path.isfile(temp_json_name):
os.remove(temp_json_name)
print('The FactRuEval-2016 data for training have been loaded...')
print('Number of samples is {0}.'.format(len(y)))
print('')
if BERT_NER.PATH_TO_BERT is None:
bert_hub_module_handle = 'https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1'
else:
bert_hub_module_handle = None
recognizer = BERT_NER(
finetune_bert=bert_will_be_tuned, batch_size=batch_size, l2_reg=l2,
bert_hub_module_handle=bert_hub_module_handle, lstm_units=lstm_layer_size, validation_fraction=0.25,
max_epochs=max_epochs, patience=patience, gpu_memory_frac=gpu_memory_frac, verbose=True, random_seed=42,
lr=3e-6 if bert_will_be_tuned else 1e-4,
udpipe_lang='ru', use_nlp_features=use_lang_features, use_shapes=use_shapes
)
if collection3_dir is None:
if n_max_samples > 0:
train_index, test_index = split_dataset(y=y, test_part=recognizer.validation_fraction)
X_train = np.array(X, dtype=object)[train_index]
y_train = np.array(y, dtype=object)[train_index]
X_val = np.array(X, dtype=object)[test_index]
y_val = np.array(y, dtype=object)[test_index]
del train_index, test_index
index = sample_from_dataset(y=y_train, n=n_max_samples)
recognizer.fit(X_train[index], y_train[index], validation_data=(X_val, y_val))
else:
recognizer.fit(X, y)
else:
X_train, y_train = load_dataset_from_brat(collection3_dir, split_by_paragraphs=True)
if not split_by_paragraphs:
X_train, y_train = divide_dataset_by_sentences(X_train, y_train, sent_tokenize_func=ru_sent_tokenize)
for sample_idx in range(len(y_train)):
new_y_sample = dict()
for ne_type in sorted(list(y_train[sample_idx].keys())):
if ne_type == 'PER':
new_y_sample['PERSON'] = y_train[sample_idx][ne_type]
elif ne_type == 'LOC':
new_y_sample['LOCATION'] = y_train[sample_idx][ne_type]
else:
new_y_sample[ne_type] = y_train[sample_idx][ne_type]
y_train[sample_idx] = new_y_sample
del new_y_sample
print('The Collection3 data for training have been loaded...')
print('Number of samples is {0}.'.format(len(y_train)))
print('')
if n_max_samples > 0:
index = sample_from_dataset(y=y_train, n=n_max_samples)
X_train = np.array(X_train, dtype=object)[index]
y_train = np.array(y_train, dtype=object)[index]
del index
recognizer.fit(X_train, y_train, validation_data=(X, y))
with open(model_name, 'wb') as fp:
pickle.dump(recognizer, fp)
print('')
print('The NER has been successfully fitted and saved into the file `{0}`...'.format(model_name))
print('')
return recognizer
def recognize(factrueval2016_testset_dir: str, split_by_paragraphs: bool, recognizer: BERT_NER, results_dir: str):
temp_json_name = tempfile.NamedTemporaryFile(mode='w').name
try:
factrueval2016_to_json(factrueval2016_testset_dir, temp_json_name, split_by_paragraphs)
with codecs.open(temp_json_name, mode='r', encoding='utf-8', errors='ignore') as fp:
data_for_testing = json.load(fp)
_, true_entities = load_dataset_from_json(temp_json_name)
finally:
if os.path.isfile(temp_json_name):
os.remove(temp_json_name)
texts = []
additional_info = []
for cur_document in data_for_testing:
base_name = os.path.join(results_dir, cur_document['base_name'] + '.task1')
for cur_paragraph in cur_document['paragraph_bounds']:
texts.append(cur_document['text'][cur_paragraph[0]:cur_paragraph[1]])
additional_info.append((base_name, cur_paragraph))
print('Data for final testing have been loaded...')
print('Number of samples is {0}.'.format(len(true_entities)))
print('')
predicted_entities = recognizer.predict(texts)
assert len(predicted_entities) == len(true_entities)
f1, precision, recall, quality_by_entities = calculate_prediction_quality(
true_entities, predicted_entities, recognizer.classes_list_)
print('All entities:')
print(' F1-score is {0:.2%}.'.format(f1))
print(' Precision is {0:.2%}.'.format(precision))
print(' Recall is {0:.2%}.'.format(recall))
for ne_type in sorted(list(quality_by_entities.keys())):
print(' {0}'.format(ne_type))
print(' F1-score is {0:.2%}.'.format(quality_by_entities[ne_type][0]))
print(' Precision is {0:.2%}.'.format(quality_by_entities[ne_type][1]))
print(' Recall is {0:.2%}.'.format(quality_by_entities[ne_type][2]))
results_for_factrueval_2016 = dict()
for sample_idx, cur_result in enumerate(predicted_entities):
base_name, paragraph_bounds = additional_info[sample_idx]
for entity_type in cur_result:
if entity_type == 'ORG':
prepared_entity_type = 'org'
elif entity_type == 'PERSON':
prepared_entity_type = 'per'
elif entity_type == 'LOCATION':
prepared_entity_type = 'loc'
else:
prepared_entity_type = None
if prepared_entity_type is None:
raise ValueError('`{0}` is unknown entity type!'.format(entity_type))
for entity_bounds in cur_result[entity_type]:
postprocessed_entity = (
prepared_entity_type,
entity_bounds[0] + paragraph_bounds[0],
entity_bounds[1] - entity_bounds[0]
)
if base_name in results_for_factrueval_2016:
results_for_factrueval_2016[base_name].append(postprocessed_entity)
else:
results_for_factrueval_2016[base_name] = [postprocessed_entity]
for base_name in results_for_factrueval_2016:
with codecs.open(base_name, mode='w', encoding='utf-8', errors='ignore') as fp:
for cur_entity in sorted(results_for_factrueval_2016[base_name], key=lambda it: (it[1], it[2], it[0])):
fp.write('{0} {1} {2}\n'.format(cur_entity[0], cur_entity[1], cur_entity[2]))
def main():
parser = ArgumentParser()
parser.add_argument('-m', '--model', dest='model_name', type=str, required=True,
help='The binary file with the NER model.')
parser.add_argument('-d', '--data', dest='data_name', type=str, required=True,
help='Path to the FactRuEval-2016 repository.')
parser.add_argument('-n', '--number', dest='samples_number', type=int, required=False, default=None,
help='Number of samples of the training sub-set.')
parser.add_argument('-r', '--result', dest='result_name', type=str, required=True,
help='The directory into which all recognized named entity labels will be saved.')
parser.add_argument('-c', '--collection', dest='collection_data_name', type=str, required=False, default=None,
help='Path to the Collection-3 data set.')
parser.add_argument('--batch', dest='batch_size', type=int, required=False, default=16,
help='Size of mini-batch.')
parser.add_argument('--max_epochs', dest='max_epochs', type=int, required=False, default=100,
help='Maximal number of training epochs.')
parser.add_argument('--patience', dest='patience', type=int, required=False, default=10,
help='Number of iterations with no improvement to wait before stopping the training.')
parser.add_argument('--lstm', dest='lstm_units', type=int, required=False, default=None,
help='The LSTM layer size (if it is not specified, than the LSTM layer is not used).')
parser.add_argument('--l2', dest='l2_coeff', type=float, required=False, default=1e-2,
help='L2 regularization factor.')
parser.add_argument('--gpu_frac', dest='gpu_memory_frac', type=float, required=False, default=0.9,
help='Allocable part of the GPU memory for the NER model.')
parser.add_argument('--finetune_bert', dest='finetune_bert', required=False, action='store_true',
default=False, help='Will be the BERT and CRF finetuned together? Or the BERT will be frozen?')
parser.add_argument('--path_to_bert', dest='path_to_bert', required=False, type=str,
default=None, help='Path to the BERT model (if it is not specified, than the standard '
'multilingual BERT model from the TF-Hub will be used).')
parser.add_argument('--text', dest='text_unit', type=str, choices=['sentence', 'paragraph'], required=False,
default='sentence', help='Text unit: sentence or paragraph.')
parser.add_argument('--lang_features', dest='lang_features', required=False, action='store_true',
default=False, help='Will be morphology and syntax used as additional feautres?')
parser.add_argument('--shapes', dest='shapes', required=False, action='store_true',
default=False, help='Will be word shapes used as additional features?')
parser.add_argument('--seed', dest='random_seed', type=int, required=False, default=None,
help='The random seed.')
args = parser.parse_args()
if args.text_unit not in {'sentence', 'paragraph'}:
raise ValueError('`{0}` is wrong value for the `text_unit` parameter!'.format(args.text_unit))
if args.path_to_bert is None:
path_to_bert = None
else:
path_to_bert = os.path.normpath(args.path_to_bert)
if len(path_to_bert) == 0:
raise ValueError('The BERT model cannot be contained into the current directory!')
if not os.path.isdir(path_to_bert):
raise ValueError('The directory `{0}` does not exist!'.format(path_to_bert))
BERT_NER.PATH_TO_BERT = path_to_bert
collection3_dir_name = None if args.collection_data_name is None else os.path.normpath(args.collection_data_name)
devset_dir_name = os.path.join(os.path.normpath(args.data_name), 'devset')
testset_dir_name = os.path.join(os.path.normpath(args.data_name), 'testset')
if args.random_seed is not None:
set_total_seed(args.random_seed)
if args.samples_number is None:
samples_number = 0
else:
samples_number = args.samples_number
if samples_number < 1:
raise ValueError('The samples number in training sub-set is wrong! It must be a positive integer value.')
recognizer = train(factrueval2016_devset_dir=devset_dir_name, bert_will_be_tuned=args.finetune_bert,
use_lang_features=args.lang_features, use_shapes=args.shapes, max_epochs=args.max_epochs,
patience=args.patience, batch_size=args.batch_size, gpu_memory_frac=args.gpu_memory_frac,
model_name=os.path.normpath(args.model_name), lstm_layer_size=args.lstm_units, l2=args.l2_coeff,
split_by_paragraphs=(args.text_unit == 'paragraph'), collection3_dir=collection3_dir_name,
n_max_samples=samples_number)
recognize(factrueval2016_testset_dir=testset_dir_name, recognizer=recognizer,
results_dir=os.path.normpath(args.result_name), split_by_paragraphs=(args.text_unit == 'paragraph'))
if __name__ == '__main__':
bert_ner_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
bert_ner_logger.addHandler(handler)
main()
|
the-stack_106_14928
|
import boto3
import subprocess
# By default since code is run from the Organizations account, the account is trusted. Use this variable to add another account as trusted such as an Automation or Security account, from where CI/CD pipelines will be run. If you don't need or have a dedicated account, just use the Organization Account ID.
trusted_account = '<ENTER THE ACCOUNT ID YOU WILL RUN CI/CD PIPELINES FROM>'
def get_org_accounts(session):
org_client = session.client('organizations')
results = []
messages = []
paginator = org_client.get_paginator('list_accounts')
response_iterator = paginator.paginate()
for response in response_iterator:
results = results + response['Accounts']
for index in results:
messages = messages + (index['Id']).split()
return messages
def assume_role(session, aws_account_number, role_name):
resp = session.client('sts').assume_role(
RoleArn='arn:aws:iam::{}:role/{}'.format(aws_account_number,role_name),
RoleSessionName='CDKBootstrappin')
# Storing STS credentials
creds = boto3.Session(
aws_access_key_id = resp['Credentials']['AccessKeyId'],
aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
aws_session_token = resp['Credentials']['SessionToken']
)
print("Assumed session for {}.".format(
aws_account_number
))
return creds, resp
if __name__ == '__main__':
# Session credentials of current user and use this to assume roles.
org_session = boto3.Session()
accounts = get_org_accounts(org_session)
# Get the current AWS Organization Account ID
aws_org_account = boto3.client('sts').get_caller_identity()['Account']
# Used to obtain the list of AWS Regions using the EC2 service.
#ec2 = org_session.client('ec2', region_name='us-east-1')
#regions = ec2.describe_regions()['Regions']
for account in accounts:
try:
account = account.strip()
# Session of the assumed IAM Role in the corresponding member account using the session(OrganizationAccountAccessRole/ControlTowerExecution) role.
# If you have Control Tower enabled and necessary accounts enrolled, use `ControlTowerExecution`
# Under normal conditions this should be 'OrganizationAccountAccessRole'
session, resp = assume_role(org_session, account, 'OrganizationAccountAccessRole')
# Credentials of the assumed role which will be used to set environment variables.
aws_access_key_id = str(resp['Credentials']['AccessKeyId'])
aws_secret_access_key = str(resp['Credentials']['SecretAccessKey'])
aws_session_token = str(resp['Credentials']['SessionToken'])
# Iterate CDK Bootstrapping for all regions.
# Comment out this `for` loop and Shift-Tab below section, if bootstrapping is not necessary for all regions.
region_name = 'us-east-1' # Comment this out and un-comment lines 50-51 to enable bootstrapping for all regions.
#for region in regions:
#region_name = region['RegionName']
'''
Export environment variables
* AWS_ACCESS_KEY_ID
* AWS_SECRET_ACCESS_KEY
* AWS_SESSION_TOKEN
Execute `cdk bootstrap aws://<account>/<region> --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess --trust <CI/CD AWS Account>`
'''
command = "export AWS_ACCESS_KEY_ID=" + aws_access_key_id + ";export AWS_SECRET_ACCESS_KEY=" + aws_secret_access_key + ";export AWS_SESSION_TOKEN=" + aws_session_token + "; cdk bootstrap aws://" + account + "/" + region_name + " --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess --trust " + trusted_account
# Execute the command in a single process shell.
aws_cli = subprocess.run(command, shell=True)
except Exception as e:
print(e)
pass
|
the-stack_106_14929
|
import logging
import os
from glob import glob
from psg_utils.downloads.utils import download_dataset
logger = logging.getLogger(__name__)
# Get path to current module file
_FILE_PATH = os.path.split(__file__)[0]
# Server base URL
_SERVER_URL = "https://physionet.org/files/challenge-2018/1.0.0"
_CHECKSUM_FILE = "{}/phys_checksums.txt".format(_FILE_PATH)
def phys_paths_func(file_name, server_url, out_dataset_folder):
"""
See psg_utils/downloads/utils.py [download_dataset]
A callable of signature func(file_name, server_url, out_dataset_folder) which returns:
1) download_url (path to fetch file from on remote system)
2) out_file_path (path to store file on local system)
"""
download_url = server_url + "/{}".format(file_name)
out_subject_folder, file_name = file_name.replace("training/", "").split("/")
out_file_path = os.path.join(out_dataset_folder, out_subject_folder, file_name)
return download_url, out_file_path
def download_phys(out_dataset_folder, N_first=None):
""" Download the DCSM (255 records) dataset """
return download_dataset(
out_dataset_folder=out_dataset_folder,
server_url=_SERVER_URL,
checksums_path=_CHECKSUM_FILE,
paths_func=phys_paths_func,
N_first=N_first*3 if N_first else None # Three items per subject
)
def preprocess_phys_hypnograms(dataset_folder_path):
"""
Preprocesses files from the PHYS dataset.
OBS: Only processes the hypnogram (.arousal) files
Creates 1 new file in each PHYS subject dir (.ids format)
:param dataset_folder_path: path to PHYS file on local disk
:return: None
"""
import numpy as np
from wfdb.io import rdann
from psg_utils.io.file_writers import to_ids
from psg_utils.io.high_level_file_loaders import load_psg
from psg_utils.hypnogram import SparseHypnogram
from psg_utils import Defaults
# Get list of subject folders
subject_folders = glob(os.path.join(dataset_folder_path, "tr*"))
LABEL_MAP = {
'N1': "N1",
'N2': "N2",
'N3': "N3",
'R': "REM",
'W': "W",
}
for i, folder in enumerate(subject_folders):
name = os.path.split(os.path.abspath(folder))[-1]
print(f"{i+1}/{len(subject_folders)}", name)
# Get sleep-stages
edf_file = folder + f"/{name}.mat"
org_hyp_file = folder + f"/{name}.arousal"
new_hyp_file = folder + f"/{name}.arousal.st"
out_path = new_hyp_file.replace(".arousal.st", "-HYP.ids")
if os.path.exists(out_path):
print("Exists, skipping...")
continue
if os.path.exists(org_hyp_file):
os.rename(org_hyp_file, new_hyp_file)
psg, header = load_psg(edf_file, load_channels=['C3-M2'])
hyp = rdann(new_hyp_file[:-3], "st")
sample_rate = header["sample_rate"]
psg_length_sec = len(psg)/sample_rate
pairs = zip(hyp.aux_note, hyp.sample)
stages = [s for s in pairs if not ("(" in s[0] or ")" in s[0])]
stages = [(s[0], int(s[1]/sample_rate)) for s in stages]
stages, starts = map(list, zip(*stages))
if starts[0] != 0:
i = [0] + starts
s = ["UNKNOWN"] + [LABEL_MAP[s] for s in stages]
else:
i, s = starts, stages
diff = psg_length_sec - i[-1]
assert diff >= 0
d = list(np.diff(i)) + [(diff//30) * 30]
SparseHypnogram(i, d, [Defaults.get_stage_string_to_class_int()[s_] for s_ in s], 30)
to_ids(i, d, s, out_path)
|
the-stack_106_14930
|
# # -*- coding: utf-8 -*-
# """
# Created on Wed May 19 12:48:10 2021
# @author: fcosta
# """
# '''
# This works for a CW monostatic coherent lidar
# '''
from Utils.Qlunc_ImportModules import *
from Utils import Qlunc_Help_standAlone as SA
from Utils import Scanning_patterns as SP
from Utils import Qlunc_Plotting as QPlot
def UQ_Probe_volume (Lidar, Atmospheric_Scenario,cts,Qlunc_yaml_inputs,param1):
# Liqin jin model
if Qlunc_yaml_inputs['Components']['Lidar general inputs']['Type']=="CW":
# The focus distance varies with the focal length and the distance between the fiber-end and the telescope lens as well. So that, also the probe length varies with such distance.
# Calculating focus distance depending on the distance between the fiber-end and the telescope lens:
r = Qlunc_yaml_inputs['Components']['Telescope']['Focal length']
a = Qlunc_yaml_inputs['Components']['Telescope']['Fiber-lens distance']
a0 = Qlunc_yaml_inputs['Components']['Telescope']['Fiber-lens offset']
wavelength = Qlunc_yaml_inputs['Components']['Laser']['Wavelength']
rad_eff = Qlunc_yaml_inputs['Components']['Telescope']['Effective radius telescope']
Unc_r = Qlunc_yaml_inputs['Components']['Telescope']['stdv Focal length']
Unc_a = Qlunc_yaml_inputs['Components']['Telescope']['stdv Fiber-lens distance']
Unc_a0 = Qlunc_yaml_inputs['Components']['Telescope']['stdv Fiber-lens offset']
Unc_wavelength = Qlunc_yaml_inputs['Components']['Laser']['stdv Wavelength']
Unc_eff_radius_telescope = Qlunc_yaml_inputs['Components']['Telescope']['stdv Effective radius telescope']
# Focus distance
# focus_distance = 1/((1/r)-(1/(a+a0))) # This parameters are complicated to know for users. Maybe it is easier to put just a simple parameter representing the focus distance(*)
focus_distance = param1 #(*)
# Uncertainty in focus distance
# Unc_focus_distance = np.sqrt((((1/r**2)/(((1/r)-(1/(a+a0)))**2))*Unc_r)**2 + (((1/(a+a0)**2)/(((1/r)-(1/(a+a0)))**2))*Unc_a)**2 + (((1/(a+a0)**2)/(((1/r)-(1/(a+a0)))**2))*Unc_a0)**2) # This parameters are complicated to know for users. Maybe it is easier to put just a simple parameter representing the focus distance(**)
Unc_focus_distance = Qlunc_yaml_inputs['Components']['Scanner']['stdv focus distance'] #(**)
# Rayleigh length variation due to focus_distance variations (due to the distance between fiber-end and telescope lens)
# zr= (wavelength*(ind_focusdist**2))/(np.pi*(rad_eff)**2)# Rayleigh length (considered as the probe length) # half-width of the weighting function --> FWHM = 2*zr
# Unc_zr= np.sqrt(((ind_focusdist**2)*Unc_wavelength/(np.pi*rad_eff))**2 + ((2*wavelength*ind_focusdist*Unc_focus_distance)/(np.pi*rad_eff**2))**2 + ((2*wavelength*(ind_focusdist**2)*Unc_eff_radius_telescope)/(np.pi*rad_eff**3))**2)
Rayleigh_length=[]
Unc_Rayleigh_length=[]
for ind_focusdist in focus_distance:
Rayleigh_length.append( (wavelength*(ind_focusdist**2))/(np.pi*(rad_eff)**2))# Rayleigh length (considered as the probe length) # half-width of the weighting function --> FWHM = 2*zr
# Uncertainty rayleigh length
Unc_Rayleigh_length.append( np.sqrt(((ind_focusdist**2)*Unc_wavelength/(np.pi*rad_eff))**2 + ((2*wavelength*ind_focusdist*Unc_focus_distance)/(np.pi*rad_eff**2))**2 + ((2*wavelength*(ind_focusdist**2)*Unc_eff_radius_telescope)/(np.pi*rad_eff**3))**2))
# Probe volume:
#Probe_volume = np.pi*(Qlunc_yaml_inputs['Probe Volume']['Output beam radius']**2)*((4*(focus_distance**2)*Qlunc_yaml_inputs['Components']['Laser']['Wavelength'])/(Telescope_aperture)) # based on Marijn notes
#VolCil = np.pi*(Qlunc_yaml_inputs['Probe Volume']['Output beam radius']**2)*fwhm # calculated based on the fwhm
# vol_zr = np.pi*(Qlunc_yaml_inputs['Components']['Telescope']['Output beam radius']**2)*(2*zr) # based on the definition of Rayleigh length in Liqin Jin notes (Focus calibration formula)
# Lorentzian weighting function:
# phi = (Qlunc_yaml_inputs['Probe Volume']['Extinction coeficient']/np.pi)*(1/((1**2)+(36.55-focus_distance)**2))
# phi = (Qlunc_yaml_inputs['Probe Volume']['Extinction coeficient']/np.pi)*(1/((1**2)+(focus_distance)**2))
# F = (lamb/np.pi)/(a1**2+lamb**2) # Lorentzian Weighting function
elif Qlunc_yaml_inputs['Components']['Lidar general inputs']['Type']=="Pulsed":
# Variables
pdb.set_trace()
tau_meas = Qlunc_yaml_inputs['Components']['Telescope']['Gate length']
tau = Qlunc_yaml_inputs['Components']['Telescope']['Pulse shape']
stdv_tau_meas = Lidar.optics.telescope.stdv_tau_meas
stdv_tau = Lidar.optics.telescope.stdv_tau
# Definition from "LEOSPHERE pulsed lidar principles" --> Theory from Banakh and Smalikho 1994: “Estimation of the turbulence energy dissipation rate from the pulsed Doppler lidar data”.
Rayleigh_length = (cts.c*tau_meas)/(2*math.erf(np.sqrt(np.log(2))*(tau_meas)/(tau)))/2
dR_dtauMeas = (cts.c*2*math.erf(np.sqrt(np.log(2))*tau_meas/tau)-cts.c*tau_meas*2*(2/np.sqrt(np.pi))*np.exp(-np.sqrt(np.log(2))*tau_meas/tau)*np.sqrt(np.log(2))/tau)/((2*math.erf(np.sqrt(np.log(2))*(tau_meas)/(tau)))**2)
dR_dtau = (-cts.c*tau_meas*2*(2/np.sqrt(np.pi))*np.exp(-np.sqrt(np.log(2))*(tau_meas/tau))*(-np.sqrt(np.log(2))*tau_meas/(tau**2)))/((2*math.erf(np.sqrt(np.log(2))*tau_meas/tau))**2)
Unc_Rayleigh_length = np.sqrt((dR_dtauMeas*stdv_tau_meas)**2+(dR_dtau*stdv_tau)**2)
# focus_distance = random.randrange(1,500,1)
# # Weighting function calculation
# WeightingFunction=[]
# offset = 500
# focus_distance = 0
# z=np.linspace(focus_distance-offset,focus_distance+offset,1001)
# for ind_z in z:
# WeightingFunction.append((1/(tau_meas*cts.c))*(math.erf((4*np.sqrt(np.log(2))*(ind_z-focus_distance)/((cts.c*tau)))+(np.sqrt(np.log(2)))*tau_meas/tau)-math.erf((4*np.sqrt(np.log(2))*(ind_z-focus_distance)/((cts.c*tau)))-(np.sqrt(np.log(2)))*tau_meas/tau)))
# # find the two crossing points
# hmx = SA.half_max_x(z,WeightingFunction)
# # print the answer
# Rayleigh_length = (hmx[1] - hmx[0])/2
# print("Rayleigh distance1:{:.3f}".format(zr))
# print("Rayleigh distance2:{:.3f}".format(Rayleigh_length))
# pdb.set_trace()
# print("Zr uncertainty:{:.3f}".format(Unc_zr))
# Saving rayleigh length to a file in ./metadata to be read by matlab
if os.path.isfile('./metadata/rayleigh_distance.txt'):
os.remove('./metadata/rayleigh_distance.txt')
file=open('./metadata/rayleigh_distance.txt','w')
file.write(repr(Rayleigh_length))
file.close()
else:
file=open('./metadata/rayleigh_distance.txt','w')
file.write(repr(Rayleigh_length))
file.close()
Final_Output_UQ_ProbeVolume = {'Rayleigh Length':Rayleigh_length,'Rayleigh Length uncertainty':Unc_Rayleigh_length}
# pdb.set_trace()
# Plotting:
QPlot.plotting(Lidar,Qlunc_yaml_inputs,Final_Output_UQ_ProbeVolume,False,False,Qlunc_yaml_inputs['Flags']['Probe Volume parameters'],False)
return Final_Output_UQ_ProbeVolume
|
the-stack_106_14931
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 4 10:03:35 2019
@author: hamil
"""
import numpy as np
import matplotlib.pyplot as plt
def up_harmonic(value_n):
H_up = 0.0
summ_array1 = []
new_x = value_n + 1
x_array1 = np.arange(1,new_x)
for each_value in x_array1:
numm1 = 1/each_value
H_up = H_up + numm1
summ_array1.append(H_up)
return H_up, summ_array1
#test = up_harmonic(20)
#up_sum = test[0]
#up_values = test[1]
#print("Sum up value is:", test[0])
#print("The Up values are:", up_values)
def down_harmonic(value_n):
H_down = 0.0
summ_array2 = []
new_x = value_n + 1
x_array = np.arange(1,new_x)
x_array2 = x_array[::-1]
for each_value in x_array2:
numm2 = 1/each_value
H_down = H_down + numm2
summ_array2.append(H_down)
return H_down, summ_array2
#test1 = down_harmonic(20)
#down_sum = test1[0]
#down_values = test1[1]
#print("Sum down value is:", test1[0])
#print("The down values are:", down_values)
fraction_array = []
x_values = np.arange(1,50)
for new_value in x_values:
test1 = up_harmonic(new_value)
test2 = down_harmonic(new_value)
up_sum = test1[0]
down_sum = test2[0]
up_array = test1[1]
down_array = test2[1]
print("The up sum is:", up_sum)
print("The down sum is:", down_sum)
sub = up_sum - down_sum
abs_add = np.abs(up_sum) + np.abs(down_sum)
fraction = sub / abs_add
fraction_array.append(fraction)
plt.plot(x_values,fraction_array)
# When looking at the values for the sum up versus sum down, the sum down is more precise due to the fact that
#the larger the number decimal place-wise, the less values the computer can store, it will reach built in limit
#that computer can store values. Therefore when adding smaller and smaller value numbers to already larger
#decimal placed numbers,the computer will just drop them and will not change the value. But With the sum down
#approach, you start with the small numbers and then slowly add more and more larger valued numbers.
|
the-stack_106_14934
|
"""
Support for MySensors lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mysensors/
"""
import logging
from homeassistant.components import mysensors
from homeassistant.components.light import (ATTR_BRIGHTNESS, ATTR_RGB_COLOR,
Light)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.util.color import rgb_hex_to_rgb_list
_LOGGER = logging.getLogger(__name__)
ATTR_RGB_WHITE = 'rgb_white'
ATTR_VALUE = 'value'
ATTR_VALUE_TYPE = 'value_type'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the mysensors platform for sensors."""
# Only act if loaded via mysensors by discovery event.
# Otherwise gateway is not setup.
if discovery_info is None:
return
for gateway in mysensors.GATEWAYS.values():
# Define the S_TYPES and V_TYPES that the platform should handle as
# states. Map them in a dict of lists.
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
map_sv_types = {
pres.S_DIMMER: [set_req.V_DIMMER],
}
device_class_map = {
pres.S_DIMMER: MySensorsLightDimmer,
}
if float(gateway.version) >= 1.5:
# Add V_RGBW when rgb_white is implemented in the frontend
map_sv_types.update({
pres.S_RGB_LIGHT: [set_req.V_RGB],
})
map_sv_types[pres.S_DIMMER].append(set_req.V_PERCENTAGE)
device_class_map.update({
pres.S_RGB_LIGHT: MySensorsLightRGB,
})
devices = {}
gateway.platform_callbacks.append(mysensors.pf_callback_factory(
map_sv_types, devices, add_devices, device_class_map))
class MySensorsLight(mysensors.MySensorsDeviceEntity, Light):
"""Represent the value of a MySensors Light child node."""
def __init__(self, *args):
"""Setup instance attributes."""
mysensors.MySensorsDeviceEntity.__init__(self, *args)
self._state = None
self._brightness = None
self._rgb = None
self._white = None
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def rgb_color(self):
"""Return the RGB color value [int, int, int]."""
return self._rgb
@property
def rgb_white(self): # not implemented in the frontend yet
"""Return the white value in RGBW, value between 0..255."""
return self._white
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return self.gateway.optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def _turn_on_light(self):
"""Turn on light child device."""
set_req = self.gateway.const.SetReq
if not self._state and set_req.V_LIGHT in self._values:
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_LIGHT, 1)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._state = True
self.update_ha_state()
def _turn_on_dimmer(self, **kwargs):
"""Turn on dimmer child device."""
set_req = self.gateway.const.SetReq
brightness = self._brightness
if ATTR_BRIGHTNESS in kwargs and \
kwargs[ATTR_BRIGHTNESS] != self._brightness:
brightness = kwargs[ATTR_BRIGHTNESS]
percent = round(100 * brightness / 255)
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_DIMMER, percent)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._brightness = brightness
self.update_ha_state()
def _turn_on_rgb_and_w(self, hex_template, **kwargs):
"""Turn on RGB or RGBW child device."""
rgb = self._rgb
white = self._white
if ATTR_RGB_WHITE in kwargs and \
kwargs[ATTR_RGB_WHITE] != self._white:
white = kwargs[ATTR_RGB_WHITE]
if ATTR_RGB_COLOR in kwargs and \
kwargs[ATTR_RGB_COLOR] != self._rgb:
rgb = kwargs[ATTR_RGB_COLOR]
if white is not None and hex_template == '%02x%02x%02x%02x':
rgb.append(white)
hex_color = hex_template % tuple(rgb)
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, hex_color)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._rgb = rgb
self._white = white
self.update_ha_state()
def _turn_off_light(self, value_type=None, value=None):
"""Turn off light child device."""
set_req = self.gateway.const.SetReq
value_type = (
set_req.V_LIGHT
if set_req.V_LIGHT in self._values else value_type)
value = 0 if set_req.V_LIGHT in self._values else value
return {ATTR_VALUE_TYPE: value_type, ATTR_VALUE: value}
def _turn_off_dimmer(self, value_type=None, value=None):
"""Turn off dimmer child device."""
set_req = self.gateway.const.SetReq
value_type = (
set_req.V_DIMMER
if set_req.V_DIMMER in self._values else value_type)
value = 0 if set_req.V_DIMMER in self._values else value
return {ATTR_VALUE_TYPE: value_type, ATTR_VALUE: value}
def _turn_off_rgb_or_w(self, value_type=None, value=None):
"""Turn off RGB or RGBW child device."""
if float(self.gateway.version) >= 1.5:
set_req = self.gateway.const.SetReq
if self.value_type == set_req.V_RGB:
value = '000000'
elif self.value_type == set_req.V_RGBW:
value = '00000000'
return {ATTR_VALUE_TYPE: self.value_type, ATTR_VALUE: value}
def _turn_off_main(self, value_type=None, value=None):
"""Turn the device off."""
if value_type is None or value is None:
_LOGGER.warning(
'%s: value_type %s, value = %s, '
'None is not valid argument when setting child value'
'', self._name, value_type, value)
return
self.gateway.set_child_value(
self.node_id, self.child_id, value_type, value)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._state = False
self.update_ha_state()
def _update_light(self):
"""Update the controller with values from light child."""
value_type = self.gateway.const.SetReq.V_LIGHT
if value_type in self._values:
self._values[value_type] = (
STATE_ON if int(self._values[value_type]) == 1 else STATE_OFF)
self._state = self._values[value_type] == STATE_ON
def _update_dimmer(self):
"""Update the controller with values from dimmer child."""
set_req = self.gateway.const.SetReq
value_type = set_req.V_DIMMER
if value_type in self._values:
self._brightness = round(255 * int(self._values[value_type]) / 100)
if self._brightness == 0:
self._state = False
if set_req.V_LIGHT not in self._values:
self._state = self._brightness > 0
def _update_rgb_or_w(self):
"""Update the controller with values from RGB or RGBW child."""
set_req = self.gateway.const.SetReq
value = self._values[self.value_type]
color_list = rgb_hex_to_rgb_list(value)
if set_req.V_LIGHT not in self._values and \
set_req.V_DIMMER not in self._values:
self._state = max(color_list) > 0
if len(color_list) > 3:
self._white = color_list.pop()
self._rgb = color_list
def _update_main(self):
"""Update the controller with the latest value from a sensor."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
self.battery_level = node.battery_level
for value_type, value in child.values.items():
_LOGGER.debug(
'%s: value_type %s, value = %s', self._name, value_type, value)
self._values[value_type] = value
class MySensorsLightDimmer(MySensorsLight):
"""Dimmer child class to MySensorsLight."""
def turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
def turn_off(self, **kwargs):
"""Turn the device off."""
ret = self._turn_off_dimmer()
ret = self._turn_off_light(
value_type=ret[ATTR_VALUE_TYPE], value=ret[ATTR_VALUE])
self._turn_off_main(
value_type=ret[ATTR_VALUE_TYPE], value=ret[ATTR_VALUE])
def update(self):
"""Update the controller with the latest value from a sensor."""
self._update_main()
self._update_light()
self._update_dimmer()
class MySensorsLightRGB(MySensorsLight):
"""RGB child class to MySensorsLight."""
def turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
self._turn_on_rgb_and_w('%02x%02x%02x', **kwargs)
def turn_off(self, **kwargs):
"""Turn the device off."""
ret = self._turn_off_rgb_or_w()
ret = self._turn_off_dimmer(
value_type=ret[ATTR_VALUE_TYPE], value=ret[ATTR_VALUE])
ret = self._turn_off_light(
value_type=ret[ATTR_VALUE_TYPE], value=ret[ATTR_VALUE])
self._turn_off_main(
value_type=ret[ATTR_VALUE_TYPE], value=ret[ATTR_VALUE])
def update(self):
"""Update the controller with the latest value from a sensor."""
self._update_main()
self._update_light()
self._update_dimmer()
self._update_rgb_or_w()
class MySensorsLightRGBW(MySensorsLightRGB):
"""RGBW child class to MySensorsLightRGB."""
def turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
self._turn_on_rgb_and_w('%02x%02x%02x%02x', **kwargs)
|
the-stack_106_14935
|
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.33.0"
class GainputConan(ConanFile):
name = "gainput"
description = "Cross-platform C++ input library supporting gamepads, keyboard, mouse, touch."
license = "MIT"
topics = ("conan", "gainput", "input", "keyboard", "gamepad", "mouse", "multi-touch")
homepage = "https://gainput.johanneskuhlmann.de"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
if self.settings.os == "Linux":
self.requires("xorg/system")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["GAINPUT_SAMPLES"] = False
self._cmake.definitions["GAINPUT_TESTS"] = False
self._cmake.definitions["GAINPUT_BUILD_SHARED"] = self.options.shared
self._cmake.definitions["GAINPUT_BUILD_STATIC"] = not self.options.shared
self._cmake.configure()
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
suffix = "{}{}".format("" if self.options.shared else "static",
"-d" if self.settings.build_type == "Debug" else "")
self.cpp_info.libs = ["gainput" + suffix]
if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["xinput", "ws2_32"])
elif self.settings.os == "Android":
self.cpp_info.system_libs.extend(["native_app_glue", "log", "android"])
elif tools.is_apple_os(self.settings.os):
self.cpp_info.frameworks.extend(["Foundation", "IOKit", "GameController"])
if self.settings.os == "iOS":
self.cpp_info.frameworks.extend(["UIKit", "CoreMotion"])
else:
self.cpp_info.frameworks.append("AppKit")
|
the-stack_106_14938
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create an API definition by interpreting a discovery document.
This module interprets a discovery document to create a tree of classes which
represent the API structure in a way that is useful for generating a library.
For each discovery element (e.g. schemas, resources, methods, ...) there is
a class to represent it which is directly usable in the templates. The
instances of those classes are annotated with extra variables for use
in the template which are language specific.
The current way to make use of this class is to create a programming language
specific subclass of Api, which adds annotations and template variables
appropriate for that language.
TODO(user): Refactor this so that the API can be loaded first, then annotated.
"""
__author__ = '[email protected] (Tony Aiuto)'
import collections
import json
import logging
import operator
import urlparse
from googleapis.codegen import data_types
from googleapis.codegen import template_objects
from googleapis.codegen import utilities
from googleapis.codegen.utilities import convert_size
_ADDITIONAL_PROPERTIES = 'additionalProperties'
_DEFAULT_SERVICE_HOST = 'www.googleapis.com'
_DEFAULT_OWNER_DOMAIN = 'google.com'
_DEFAULT_OWNER_NAME = 'Google'
_RECOGNIZED_GOOGLE_DOMAINS = (
'google.com',
'googleapis.com',
'googleplex.com'
)
_LOGGER = logging.getLogger('codegen')
class ApiException(Exception):
"""The base class for all API parsing exceptions."""
def __init__(self, reason, def_dict=None):
"""Create an exception.
Args:
reason: (str) The human readable explanation of this exception.
def_dict: (dict) The discovery dictionary we failed on.
"""
super(ApiException, self).__init__()
self._reason = reason
self._def_dict = def_dict
def __str__(self):
if self._def_dict:
return '%s: %s' % (self._reason, self._def_dict)
return self._reason
class Api(template_objects.CodeObject):
"""An API definition.
This class holds a discovery centric definition of an API. It contains
members such as "resources" and "schemas" which relate directly to discovery
concepts. It defines several properties that can be used in code generation
templates:
name: The API name.
version: The API version.
versionNoDots: The API version with all '.' characters replaced with '_'.
This is typically used in class names.
versionNoDash: The API version with all '-' characters replaced with '_'.
This is typically used in file names where '-' has meaning.
authScopes: The list of the OAuth scopes used by this API.
dataWrapper: True if the API definition contains the 'dataWrapper' feature.
methods: The list of top level API methods.
models: The list of API data models, both from the schema section of
discovery and from anonymous objects defined in method definitions.
parameters: The list of global method parameters (applicable to all methods)
resources: The list of API resources
"""
def __init__(self, discovery_doc, language=None):
super(Api, self).__init__(discovery_doc, self,
wire_name=discovery_doc['name'])
name = self.values['name']
self._validator.ValidateApiName(name)
if name != 'freebase':
self._validator.ValidateApiVersion(self.values['version'])
canonical_name = self.values.get('canonicalName', name)
self._class_name = self.ToClassName(canonical_name, self)
# Guard against language implementor not taking care of spaces
self._class_name = self._class_name.replace(' ', '')
self._NormalizeOwnerInformation()
self._language = language
self._template_dir = None
self._surface_features = {}
self._schemas = {}
self._methods_by_name = {}
self._all_methods = []
self.SetTemplateValue('className', self._class_name)
self.SetTemplateValue('versionNoDots',
self.values['version'].replace('.', '_'))
self.SetTemplateValue('versionNoDash',
self.values['version'].replace('-', '_'))
self.SetTemplateValue('dataWrapper',
'dataWrapper' in discovery_doc.get('features', []))
self.values.setdefault('title', name)
if not self.values.get('revision'):
self.values['revision'] = 'snapshot'
self._NormalizeUrlComponents()
# Information for variant subtypes, a dictionary of the format:
#
# { 'wireName': {'discriminant': discriminant, 'value': value,
# 'schema': schema},
# ... }
#
# ... where wireName is the name of variant subtypes, discriminant
# the field name of the discriminant, value the discriminant value
# for this variant, and schema the base schema.
#
# This information cannot be stored in the referred schema at
# reading time because at the time we read it from the base
# schema, the referenced variant schemas may not yet be loaded. So
# we first store it here, and after all schemas have been loaded,
# update the schema template properties.
self._variant_info = {}
# Build data types and methods
self._SetupModules()
self.void_type = data_types.Void(self)
self._BuildSchemaDefinitions()
self._BuildResourceDefinitions()
self.SetTemplateValue('resources', self._resources)
# Make data models part of the api dictionary
self.SetTemplateValue('models', self.ModelClasses())
# Replace methods dict with Methods
self._top_level_methods = []
method_dict = self.values.get('methods') or {}
for name in sorted(method_dict):
self._top_level_methods.append(Method(self, name, method_dict[name]))
self.SetTemplateValue('methods', self._top_level_methods)
# Global parameters
self._parameters = []
param_dict = self.values.get('parameters') or {}
for name in sorted(param_dict):
parameter = Parameter(self, name, param_dict[name], self)
self._parameters.append(parameter)
if name == 'alt':
self.SetTemplateValue('alt', parameter)
self.SetTemplateValue('parameters', self._parameters)
# Auth scopes
self._authscopes = []
if (self.values.get('auth') and
self.values['auth'].get('oauth2') and
self.values['auth']['oauth2'].get('scopes')):
for value, auth_dict in sorted(
self.values['auth']['oauth2']['scopes'].iteritems()):
self._authscopes.append(AuthScope(self, value, auth_dict))
self.SetTemplateValue('authscopes', self._authscopes)
@property
def all_schemas(self):
"""The dictionary of all the schema objects found in the API."""
return self._schemas
def _SetupModules(self):
"""Compute and set the module(s) which this API belongs under."""
# The containing module is based on the owner information.
path = self.values.get('modulePath') or self.values.get('packagePath')
self._containing_module = template_objects.Module(
package_path=path,
owner_name=self.values.get('owner'),
owner_domain=self.values.get('ownerDomain'))
self.SetTemplateValue('containingModule', self._containing_module)
# The API is a child of the containing_module
base = self.values['name']
# TODO(user): Introduce a breaking change where we always prefer
# canonicalName.
if self.values.get('packagePath'):
base = self.values.get('canonicalName') or base
if self.values.get('version_module'):
base = '%s/%s' % (base, self.values['versionNoDots'])
self._module = template_objects.Module(package_path=base,
parent=self._containing_module)
self.SetTemplateValue('module', self._module)
# The default module for data models defined by this API.
self._model_module = template_objects.Module(package_path=None,
parent=self._module)
def _BuildResourceDefinitions(self):
"""Loop over the resources in the discovery doc and build definitions."""
self._resources = []
def_dict = self.values.get('resources') or {}
for name in sorted(def_dict):
resource = Resource(self, name, def_dict[name], parent=self)
self._resources.append(resource)
def _BuildSchemaDefinitions(self):
"""Loop over the schemas in the discovery doc and build definitions."""
schemas = self.values.get('schemas')
if schemas:
for name, def_dict in schemas.iteritems():
# Upgrade the string format schema to a dict.
if isinstance(def_dict, unicode):
def_dict = json.loads(def_dict)
self._schemas[name] = self.DataTypeFromJson(def_dict, name)
# Late bind info for variant types, and mark the discriminant
# field and value.
for name, info in self._variant_info.iteritems():
if name not in self._schemas:
# The error will be reported elsewhere
continue
schema = self._schemas[name]
for prop in schema.values.get('properties'):
if prop.values['wireName'] == info['discriminant']:
# Filter out the discriminant property as it is already
# contained in the base type.
schema.SetTemplateValue(
'properties',
[p for p in schema.values.get('properties') if p != prop])
break
else:
logging.warn("Variant schema '%s' for base schema '%s' "
"has not the expected discriminant property '%s'.",
name, info['schema'].values['wireName'],
info['discriminant'])
schema.SetTemplateValue('superClass', info['schema'].class_name)
# TODO(user): baseType is for backwards compatability only. It should
# have always been a different name. When the old Java generators roll
# off, remove it.
schema.SetTemplateValue('baseType', info['schema'].class_name)
schema.SetTemplateValue('discriminantValue', info['value'])
def _NormalizeOwnerInformation(self):
"""Ensure that owner and ownerDomain are set to sane values."""
owner_domain = self.get('ownerDomain', '')
if not owner_domain:
root_url = self.get('rootUrl')
if root_url:
owner_domain = urlparse.urlparse(root_url).hostname
# Normalize google domains.
if any(owner_domain.endswith(d) for d in _RECOGNIZED_GOOGLE_DOMAINS):
owner_domain = 'google.com'
if owner_domain:
owner_domain = utilities.SanitizeDomain(owner_domain)
else:
owner_domain = _DEFAULT_OWNER_DOMAIN
self.SetTemplateValue('ownerDomain', owner_domain)
if not self.get('ownerName'):
if owner_domain == _DEFAULT_OWNER_DOMAIN:
owner_name = _DEFAULT_OWNER_NAME
else:
owner_name = owner_domain.replace('.', '_')
self.SetTemplateValue('ownerName', owner_name)
if not self.get('owner'):
self.SetTemplateValue('owner', self['ownerName'].lower())
def _NormalizeUrlComponents(self):
"""Sets template values concerning the path to the service.
Sets baseUrl, basePath and serviceHost from the values given or defaults
based on what is available. Verifies them for safeness. The hierarchy of
the possible inputs is:
use rootUrl + servicePath as the best choice if it exists (v1new)
or use baseUrl (v1)
or use basePath (v1)
or restBasePath (v0.3)
or default to 'api/version'
Raises:
ValueError: if the values available are inconsistent or disallowed.
"""
# If both rootUrl and servicePath exist, they equal what is in baseUrl.
root_url = self.values.get('rootUrl')
service_path = self.values.get('servicePath')
if root_url:
# TODO(user): Revert to 'if not service_path' once oauth2 is fixed.
if service_path is None:
raise ValueError('servicePath is not defined')
base_url = root_url + service_path
else:
base_url = self.values.get('baseUrl')
# If we have a full path ('https://superman.appspot.com/kryptonite/hurts'),
# then go with that, otherwise just use the various things which might
# hint at the servicePath.
best_path = (base_url
or self.values.get('basePath')
or self.values.get('restBasePath')
or '/%s/%s/' % (self.values['name'], self.values['version']))
if best_path.find('..') >= 0:
raise ValueError('api path must not contain ".." (%s)' % best_path)
# And let urlparse to the grunt work of normalizing and parsing.
url_parts = urlparse.urlparse(best_path)
scheme = url_parts.scheme or 'https'
service_host = url_parts.netloc or _DEFAULT_SERVICE_HOST
base_path = url_parts.path
# TODO(user): Replace use of basePath & serviceHost in templates with
# rootUrl and servicePath
self._api.SetTemplateValue('basePath', base_path)
self._api.SetTemplateValue('serviceHost',
'%s://%s' % (scheme, service_host))
if not root_url:
self._api.SetTemplateValue('rootUrl', '%s://%s/' % (scheme, service_host))
# TODO(user): Revert to 'if not service_path' once oauth2 is fixed.
if service_path is None:
self._api.SetTemplateValue('servicePath', base_path[1:])
# Make sure template writers do not revert
self._api.DeleteTemplateValue('baseUrl')
def ModelClasses(self):
"""Return all the model classes."""
ret = set(
s for s in self._schemas.itervalues()
if isinstance(s, Schema) or isinstance(s, data_types.MapDataType))
return sorted(ret, key=operator.attrgetter('class_name'))
def TopLevelModelClasses(self):
"""Return the models which are not children of another model."""
return [m for m in self.ModelClasses() if not m.parent]
def DataTypeFromJson(self, type_dict, default_name, parent=None,
wire_name=None):
"""Returns a schema object represented by a JSON Schema dictionary.
Evaluate a JSON schema dictionary and return an appropriate schema object.
If a data type is defined in-line, then create the schema dynamically. If
the schema is a $ref to another, return the previously created schema or
a lazy reference.
If the type_dict is None, a blank schema will be created.
Args:
type_dict: A dict of the form expected of a request or response member
of a method description. See the Discovery specification for more.
default_name: The unique name to give the schema if we have to create it.
parent: The schema where I was referenced. If we cannot determine that
this is a top level schema, set the parent to this.
wire_name: The name which will identify objects of this type in data on
the wire.
Returns:
A Schema object.
"""
# new or not initialized, create a fresh one
schema = Schema.Create(self, default_name, type_dict or {}, wire_name,
parent)
# Only put it in our by-name list if it is a real object
if isinstance(schema, Schema) or isinstance(schema, data_types.MapDataType):
# Use the path to the schema as a key. This means that an anonymous class
# for the 'person' property under the schema 'Activity' will have the
# unique name 'Activity.person', rather than 'ActivityPerson'.
path = '.'.join(
[a.values.get('wireName', '<anon>') for a in schema.full_path])
_LOGGER.debug('DataTypeFromJson: add %s to cache', path)
self._schemas[path] = schema
return schema
def AddMethod(self, method):
"""Add a new method to the set of all methods."""
self._all_methods.append(method)
self._methods_by_name[method.values['rpcMethod']] = method
def MethodByName(self, method_name):
"""Find a method by name.
Args:
method_name: (str) the full RPC name of a method defined by this API.
Returns:
Method object or None if not found.
"""
return self._methods_by_name.get(method_name)
def SchemaByName(self, schema_name):
"""Find a schema by name.
Args:
schema_name: (str) name of a schema defined by this API.
Returns:
Schema object or None if not found.
"""
return self._schemas.get(schema_name, None)
def SetVariantInfo(self, ref, discriminant, value, schema):
"""Sets variant info for the given reference."""
if ref in self._variant_info:
logging.warning("Base type of '%s' changed from '%s' to '%s'. "
"This is an indication that a variant schema is used "
"from multiple base schemas and may result in an "
"inconsistent model.",
ref, self._base_type[ref].wireName, schema.wireName)
self._variant_info[ref] = {'discriminant': discriminant, 'value': value,
'schema': schema}
def VisitAll(self, func):
"""Visit all nodes of an API tree and apply a function to each.
Walks a tree and calls a function on each element of it. This should be
called after the API is fully loaded.
Args:
func: (function) Method to call on each object.
"""
_LOGGER.debug('Applying function to all nodes')
func(self._containing_module)
func(self._module)
func(self._model_module)
for resource in self.values['resources']:
self._VisitResource(resource, func)
# Top level methods
for method in self.values['methods']:
self._VisitMethod(method, func)
for parameter in self.values['parameters']:
func(parameter)
func(parameter.data_type)
for schema in self._schemas.values():
self._VisitSchema(schema, func)
for scope in self.GetTemplateValue('authscopes') or []:
func(scope)
def _VisitMethod(self, method, func):
"""Visit a method, calling a function on every child.
Args:
method: (Method) The Method to visit.
func: (function) Method to call on each object.
"""
func(method)
for parameter in method.parameters:
func(parameter)
def _VisitResource(self, resource, func):
"""Visit a resource tree, calling a function on every child.
Calls down recursively to sub resources.
Args:
resource: (Resource) The Resource to visit.
func: (function) Method to call on each object.
"""
func(resource)
for method in resource.values['methods']:
self._VisitMethod(method, func)
for r in resource.values['resources']:
self._VisitResource(r, func)
def _VisitSchema(self, schema, func):
"""Visit a schema tree, calling a function on every child.
Args:
schema: (Schema) The Schema to visit.
func: (function) Method to call on each object.
"""
func(schema)
func(schema.module)
for prop in schema.values.get('properties', []):
func(prop)
for child in self.children:
func(child)
# Do not warn about unused arguments, pylint: disable=unused-argument
def ToClassName(self, s, element, element_type=None):
"""Convert a name to a suitable class name in the target language.
This default implementation camel cases the string, which is appropriate
for some languages. Subclasses are encouraged to override this.
Args:
s: (str) A rosy name of data element.
element: (object) The object we are making a class name for.
element_type: (str) Deprecated. The kind of object we are making a class
name for. E.g. resource, method, schema.
TODO(user): replace type in favor of class of element, but that will
require changing the place where we call ToClassName with no element.
Returns:
A name suitable for use as a class in the generator's target language.
"""
return utilities.CamelCase(s).replace(' ', '')
def NestedClassNameForProperty(self, name, schema):
"""Returns the class name of an object nested in a property."""
# TODO(user): This functionality belongs in the language model, but
# because of the way the api is bootstrapped, that isn't available when we
# need it. When language model is available from the start, this should be
# moved.
return '%s%s' % (schema.class_name, utilities.CamelCase(name))
@property
def class_name(self):
return self.values['className']
@property
def model_module(self):
return self._model_module
@property
def containing_module(self):
return self._containing_module
@property
def all_methods(self):
"""All the methods in the entire API."""
return self._all_methods
@property
def top_level_methods(self):
"""All the methods at the API top level (not in a resource)."""
return self._top_level_methods
class Schema(data_types.ComplexDataType):
"""The definition of a schema."""
def __init__(self, api, default_name, def_dict, parent=None):
"""Construct a Schema object from a discovery dictionary.
Schemas represent data models in the API.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
parent: (Schema) The containing schema. To be used to establish unique
names for anonymous sub-schemas.
"""
super(Schema, self).__init__(default_name, def_dict, api, parent=parent)
name = def_dict.get('id', default_name)
_LOGGER.debug('Schema(%s)', name)
# Protect against malicious discovery
template_objects.CodeObject.ValidateName(name)
self.SetTemplateValue('wireName', name)
class_name = api.ToClassName(name, self, element_type='schema')
self.SetTemplateValue('className', class_name)
self.SetTemplateValue('isSchema', True)
self.SetTemplateValue('properties', [])
self._module = (template_objects.Module.ModuleFromDictionary(self.values)
or api.model_module)
@classmethod
def Create(cls, api, default_name, def_dict, wire_name, parent=None):
"""Construct a Schema or DataType from a discovery dictionary.
Schemas contain either object declarations, simple type declarations, or
references to other Schemas. Object declarations conceptually map to real
classes. Simple types will map to a target language built-in type.
References should effectively be replaced by the referenced Schema.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
wire_name: The name which will identify objects of this type in data on
the wire. The path of wire_names can trace an item back through
discovery.
parent: (Schema) The containing schema. To be used to establish nesting
for anonymous sub-schemas.
Returns:
A Schema or DataType.
Raises:
ApiException: If the definition dict is not correct.
"""
schema_id = def_dict.get('id')
if schema_id:
name = schema_id
else:
name = default_name
class_name = api.ToClassName(name, None, element_type='schema')
_LOGGER.debug('Create: %s, parent=%s', name,
parent.values.get('wireName', '<anon>') if parent else 'None')
# Schema objects come in several patterns.
#
# 1. Simple objects
# { type: object, properties: { "foo": {schema} ... }}
#
# 2. Maps of objects
# { type: object, additionalProperties: { "foo": {inner_schema} ... }}
#
# What we want is a data type which is Map<string, {inner_schema}>
# The schema we create here is essentially a built in type which we
# don't want to generate a class for.
#
# 3. Arrays of objects
# { type: array, items: { inner_schema }}
#
# Same kind of issue as the map, but with List<{inner_schema}>
#
# 4. Primitive data types, described by type and format.
# { type: string, format: int32 }
#
# 5. Refs to another schema.
# { $ref: name }
#
# 6. Variant schemas
# { type: object, variant: { discriminant: "prop", map:
# [ { 'type_value': value, '$ref': wireName }, ... ] } }
#
# What we do is map the variant schema to a schema with a single
# property for the discriminant. To that property, we attach
# the variant map which specifies which discriminator values map
# to which schema references. We also collect variant information
# in the api so we can later associate discriminator value and
# base type with the generated variant subtypes.
if 'type' in def_dict:
# The 'type' field of the schema can either be 'array', 'object', or a
# base json type.
json_type = def_dict['type']
if json_type == 'object':
# Look for variants
variant = def_dict.get('variant')
if variant:
return cls._CreateVariantType(variant, api, name,
def_dict, wire_name, parent)
# Look for full object definition. You can have properties or
# additionalProperties, but it does not do anything useful to have
# both.
# Replace properties dict with Property's
props = def_dict.get('properties')
if props:
# This case 1 from above
return cls._CreateObjectWithProperties(props, api, name,
def_dict, wire_name, parent)
# Look for case 2
additional_props = def_dict.get(_ADDITIONAL_PROPERTIES)
if additional_props:
return cls._CreateMapType(additional_props, api, name, wire_name,
class_name, parent)
# no properties
return cls._CreateSchemaWithoutProperties(api, name, def_dict,
wire_name, parent)
elif json_type == 'array':
# Case 3: Look for array definition
return cls._CreateArrayType(api, def_dict, wire_name, class_name,
schema_id, parent)
else:
# Case 4: This must be a basic type. Create a DataType for it.
return data_types.PrimitiveDataType(def_dict, api, parent=parent)
referenced_schema = def_dict.get('$ref')
if referenced_schema:
# Case 5: Reference to another Schema.
#
# There are 4 ways you can see '$ref' in discovery.
# 1. In a property of a schema or a method request/response, pointing
# back to a previously defined schema
# 2. As above, pointing to something not defined yet.
# 3. In a method request or response or property of a schema pointing to
# something undefined.
#
# For case 1, the schema will be in the API name to schema map.
#
# For case 2, just creating this placeholder here is fine. When the
# actual schema is hit in the loop in _BuildSchemaDefinitions, we will
# replace the entry and DataTypeFromJson will resolve the to the new def.
#
# For case 3, we will end up with a dangling reference and fail later.
schema = api.SchemaByName(referenced_schema)
# The stored "schema" may not be an instance of Schema, but rather a
# data_types.PrimitiveDataType, which has no 'wireName' value.
if schema:
_LOGGER.debug('Schema.Create: %s => %s',
default_name, schema.values.get('wireName', '<unknown>'))
return schema
return data_types.SchemaReference(referenced_schema, api)
raise ApiException('Cannot decode JSON Schema for: %s' % def_dict)
@classmethod
def _CreateObjectWithProperties(cls, props, api, name, def_dict,
wire_name, parent):
properties = []
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
for prop_name in sorted(props):
prop_dict = props[prop_name]
_LOGGER.debug(' adding prop: %s to %s', prop_name, name)
properties.append(Property(api, schema, prop_name, prop_dict))
# Some APIs express etag directly in the response, others don't.
# Knowing that we have it explicitly makes special case code generation
# easier
if prop_name == 'etag':
schema.SetTemplateValue('hasEtagProperty', True)
schema.SetTemplateValue('properties', properties)
# check for @ clashing. E.g. No 'foo' and '@foo' in the same object.
names = set()
for p in properties:
wire_name = p.GetTemplateValue('wireName')
no_at_sign = wire_name.replace('@', '')
if no_at_sign in names:
raise ApiException(
'Property name clash in schema %s:'
' %s conflicts with another property' % (name, wire_name))
names.add(no_at_sign)
return schema
@classmethod
def _CreateVariantType(cls, variant, api, name, def_dict,
wire_name, parent):
"""Creates a variant type."""
variants = collections.OrderedDict()
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
discriminant = variant['discriminant']
# Walk over variants building the variant map and register
# variant info on the api.
for variant_entry in variant['map']:
discriminant_value = variant_entry['type_value']
variant_schema = api.DataTypeFromJson(variant_entry, name, parent=parent)
variants[discriminant_value] = variant_schema
# Set variant info. We get the original wire name from the JSON properties
# via '$ref' it is not currently accessible via variant_schema.
api.SetVariantInfo(variant_entry.get('$ref'), discriminant,
discriminant_value, schema)
prop = Property(api, schema, discriminant, {'type': 'string'},
key_for_variants=variants)
schema.SetTemplateValue('is_variant_base', True)
schema.SetTemplateValue('discriminant', prop)
schema.SetTemplateValue('properties', [prop])
return schema
@classmethod
def _CreateMapType(cls, additional_props, api, name, wire_name,
class_name, parent):
_LOGGER.debug('Have only additionalProps for %s, dict=%s',
name, additional_props)
# TODO(user): Remove this hack at the next large breaking change
# The "Items" added to the end is unneeded and ugly. This is for
# temporary backwards compatibility. Same for _CreateArrayType().
if additional_props.get('type') == 'array':
name = '%sItem' % name
subtype_name = additional_props.get('id', name + 'Element')
# Note, since this is an interim, non class just to hold the map
# make the parent schema the parent passed in, not myself.
_LOGGER.debug('name:%s, wire_name:%s, subtype name %s', name, wire_name,
subtype_name)
# When there is a parent, we synthesize a wirename when none exists.
# Purpose is to avoid generating an extremely long class name, since we
# don't do so for other nested classes.
if parent and wire_name:
base_wire_name = wire_name + 'Element'
else:
base_wire_name = None
base_type = api.DataTypeFromJson(
additional_props, subtype_name, parent=parent,
wire_name=base_wire_name)
map_type = data_types.MapDataType(name, base_type, parent=parent,
wire_name=wire_name)
map_type.SetTemplateValue('className', class_name)
_LOGGER.debug(' %s is MapOf<string, %s>',
class_name, base_type.class_name)
return map_type
@classmethod
def _CreateSchemaWithoutProperties(cls, api, name, def_dict, wire_name,
parent):
if parent:
# code objects have __getitem__(), but not .get()
try:
pname = parent['id']
except KeyError:
pname = '<unknown>'
name_to_log = '%s.%s' % (pname, name)
else:
name_to_log = name
logging.warning('object without properties %s: %s',
name_to_log, def_dict)
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
return schema
@classmethod
def _CreateArrayType(cls, api, def_dict, wire_name,
class_name, schema_id, parent):
items = def_dict.get('items')
if not items:
raise ApiException('array without items in: %s' % def_dict)
tentative_class_name = class_name
# TODO(user): THIS IS STUPID. We should not rename things items.
# if we have an anonymous type within a map or array, it should be
# called 'Item', and let the namespacing sort it out.
if schema_id:
_LOGGER.debug('Top level schema %s is an array', class_name)
tentative_class_name += 'Items'
base_type = api.DataTypeFromJson(items, tentative_class_name,
parent=parent, wire_name=wire_name)
_LOGGER.debug(' %s is ArrayOf<%s>', class_name, base_type.class_name)
array_type = data_types.ArrayDataType(tentative_class_name, base_type,
wire_name=wire_name,
parent=parent)
if schema_id:
array_type.SetTemplateValue('className', schema_id)
return array_type
@property
def class_name(self):
return self.values['className']
@property
def anonymous(self):
return 'id' not in self.raw
@property
def properties(self):
return self.values['properties']
@property
def isContainerWrapper(self):
"""Is this schema just a simple wrapper around another container.
A schema is just a wrapper for another datatype if it is an object that
contains just a single container datatype and (optionally) a kind and
etag field. This may be used by language generators to create iterators
directly on the schema. E.g. You could have
SeriesList ret = api.GetSomeSeriesMethod(args).Execute();
for (series in ret) { ... }
rather than
for (series in ret->items) { ... }
Returns:
None or ContainerDataType
"""
return self._GetPropertyWhichWeWrap() is not None
@property
def containerProperty(self):
"""If isContainerWrapper, returns the propery which holds the container."""
return self._GetPropertyWhichWeWrap()
def _GetPropertyWhichWeWrap(self):
"""Returns the property which is the type we are wrapping."""
container_property = None
for p in self.values['properties']:
if p.values['wireName'] == 'kind' or p.values['wireName'] == 'etag':
continue
if p.data_type.GetTemplateValue('isContainer'):
if container_property:
return None
container_property = p
else:
return None
return container_property
def __str__(self):
return '<%s Schema {%s}>' % (self.values['wireName'], self.values)
class Property(template_objects.CodeObject):
"""The definition of a schema property.
Example property in the discovery schema:
"id": {"type": "string"}
"""
def __init__(self, api, schema, name, def_dict, key_for_variants=None):
"""Construct a Property.
A Property requires several elements in its template value dictionary which
are set here:
wireName: the string which labels this Property in the JSON serialization.
dataType: the DataType of this property.
Args:
api: (Api) The Api which owns this Property
schema: (Schema) the schema this Property is part of
name: (string) the name for this Property
def_dict: (dict) the JSON schema dictionary
key_for_variants: (dict) if given, maps discriminator values to
variant schemas.
Raises:
ApiException: If we have an array type without object definitions.
"""
super(Property, self).__init__(def_dict, api, wire_name=name)
self.ValidateName(name)
self.schema = schema
self._key_for_variants = key_for_variants
# TODO(user): find a better way to mark a schema as an array type
# so we can display schemas like BlogList in method responses
try:
if self.values['wireName'] == 'items' and self.values['type'] == 'array':
self.schema.values['isList'] = True
except KeyError:
pass
# If the schema value for this property defines a new object directly,
# rather than refering to another schema, we will have to create a class
# name for it. We create a unique name by prepending the schema we are
# in to the object name.
tentative_class_name = api.NestedClassNameForProperty(name, schema)
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name)
@property
def code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.code_type
@property
def safe_code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.safe_code_type
@property
def data_type(self):
return self._data_type
@property
def is_variant_key(self):
return self._key_for_variants
@property
def variant_map(self):
return self._key_for_variants
class Resource(template_objects.CodeObject):
def __init__(self, api, name, def_dict, parent=None):
"""Creates a Resource.
Args:
api: (Api) The Api which owns this Resource.
name: (string) The discovery name of the Resource.
def_dict: (dict) The discovery dictionary for this Resource.
parent: (CodeObject) The resource containing this method, if any. Top
level resources have the API as a parent.
"""
super(Resource, self).__init__(def_dict, api, parent=parent, wire_name=name)
self.ValidateName(name)
class_name = api.ToClassName(name, self, element_type='resource')
self.SetTemplateValue('className', class_name)
# Replace methods dict with Methods
self._methods = []
method_dict = self.values.get('methods') or {}
for name in sorted(method_dict):
self._methods.append(Method(api, name, method_dict[name], parent=self))
self.SetTemplateValue('methods', self._methods)
# Get sub resources
self._resources = []
r_def_dict = self.values.get('resources') or {}
for name in sorted(r_def_dict):
r = Resource(api, name, r_def_dict[name], parent=self)
self._resources.append(r)
self.SetTemplateValue('resources', self._resources)
@property
def methods(self):
return self._methods
@property
def methods_dict(self):
return {method['wireName']: method for method in self._methods}
class AuthScope(template_objects.CodeObject):
"""The definition of an auth scope.
An AuthScope defines these template values
value: The scope url
name: a sanitized version of the value, transformed so it generally can
be used as an indentifier in code. Deprecated, use constantName
description: the description of the scope.
It also provides a template property which can be used after a language
binding is set.
constantName: A transformation of the value so it is suitable as a constant
name in the specific language.
"""
GOOGLE_PREFIX = 'https://www.googleapis.com/auth/'
HTTPS_PREFIX = 'https://'
def __init__(self, api, value, def_dict):
"""Construct an auth scope.
Args:
api: (Api) The Api which owns this Property
value: (string) The unique identifier of this scope, often a URL
def_dict: (dict) The discovery dictionary for this auth scope.
"""
super(AuthScope, self).__init__(def_dict, api, wire_name=value)
self._module = api.module
while value.endswith('/'):
value = value[:-1]
self.SetTemplateValue('value', value)
if 'description' not in self.values:
self.SetTemplateValue('description', value)
# Strip the common prefix to get a unique identifying name
if value.startswith(AuthScope.GOOGLE_PREFIX):
scope_id = value[len(AuthScope.GOOGLE_PREFIX):]
elif value.startswith(AuthScope.HTTPS_PREFIX):
# some comon scopes are are just a URL
scope_id = value[len(AuthScope.HTTPS_PREFIX):]
else:
scope_id = value
# We preserve the value stripped of the most common prefixes so we can
# use it for building constantName in templates.
self.SetTemplateValue('lastPart', scope_id)
# replace all non alphanumeric with '_' to form 'name'
name = ''.join([(c if c.isalnum() else '_') for c in scope_id.upper()])
self.SetTemplateValue('name', name)
@property
def constantName(self): # pylint: disable=g-bad-name
"""Overrides default behavior of constantName."""
return self._language_model.ApplyPolicy('constant', self,
self.values['lastPart'])
class Method(template_objects.CodeObject):
"""The definition of a method."""
def __init__(self, api, name, def_dict, parent=None):
"""Construct a method.
Methods in REST discovery are inside of a resource. Note that the method
name and id are calculable from each other. id will always be equal to
api_name.resource_name[.sub_resource...].method_name. At least it should
be, as that is the transformation Discovery makes from the API definition,
which is essentially a flat list of methods, into a hierarchy of resources.
Args:
api: (Api) The Api which owns this Method.
name: (string) The discovery name of the Method.
def_dict: (dict) The discovery dictionary for this Method.
parent: (CodeObject) The resource containing this Method, if any.
Raises:
ApiException: If the httpMethod type is not one we know how to
handle.
"""
super(Method, self).__init__(def_dict, api, parent=(parent or api))
# TODO(user): Fix java templates to name vs. wireName correctly. Then
# change the __init__ to have wire_name=def_dict.get('id') or name
# then eliminate this line.
self.SetTemplateValue('wireName', name)
self.ValidateName(name)
class_name = api.ToClassName(name, self, element_type='method')
if parent and class_name == parent.values['className']:
# Some languages complain when the collection name is the same as the
# method name.
class_name = '%sRequest' % class_name
# The name is the key of the dict defining use. The id field is what you
# have to use to call the method via RPC. That is unique, name might not be.
self.SetTemplateValue('name', name)
# Fix up very old discovery, which does not have an id.
if 'id' not in self.values:
self.values['id'] = name
self.SetTemplateValue('className', class_name)
http_method = def_dict.get('httpMethod', 'POST').upper()
self.SetTemplateValue('httpMethod', http_method)
self.SetTemplateValue('rpcMethod',
def_dict.get('rpcMethod') or def_dict['id'])
rest_path = def_dict.get('path') or def_dict.get('restPath')
# TODO(user): if rest_path is not set, raise a good error and fail fast.
self.SetTemplateValue('restPath', rest_path)
# Figure out the input and output types and schemas for this method.
expected_request = self.values.get('request')
if expected_request:
# TODO(user): RequestBody is only used if the schema is anonymous.
# When we go to nested models, this could be a nested class off the
# Method, making it unique without the silly name. Same for ResponseBody.
request_schema = api.DataTypeFromJson(expected_request,
'%sRequestContent' % name,
parent=self)
self.SetTemplateValue('requestType', request_schema)
expected_response = def_dict.get('response') or def_dict.get('returns')
if expected_response:
response_schema = api.DataTypeFromJson(expected_response,
'%sResponse' % name,
parent=self)
if self.values['wireName'] == 'get':
response_schema.values['associatedResource'] = parent
self.SetTemplateValue('responseType', response_schema)
else:
self.SetTemplateValue('responseType', api.void_type)
# Make sure we can handle this method type and do any fixups.
if http_method not in ['DELETE', 'GET', 'OPTIONS', 'PATCH', 'POST', 'PUT',
'PROPFIND', 'PROPPATCH', 'REPORT']:
raise ApiException('Unknown HTTP method: %s' % http_method, def_dict)
if http_method == 'GET':
self.SetTemplateValue('requestType', None)
# Replace parameters dict with Parameters. We try to order them by their
# position in the request path so that the generated code can track the
# more human readable definition, rather than the order of the parameters
# in the discovery doc.
order = self.values.get('parameterOrder', [])
req_parameters = []
opt_parameters = []
for name, def_dict in self.values.get('parameters', {}).iteritems():
param = Parameter(api, name, def_dict, self)
if name == 'alt':
# Treat the alt parameter differently
self.SetTemplateValue('alt', param)
continue
# Standard params are part of the generic request class
# We want to push all parameters that aren't declared inside
# parameterOrder after those that are.
if param.values['wireName'] in order:
req_parameters.append(param)
else:
# optional parameters are appended in the order they're declared.
opt_parameters.append(param)
# pylint: disable=g-long-lambda
req_parameters.sort(lambda x, y: cmp(order.index(x.values['wireName']),
order.index(y.values['wireName'])))
req_parameters.extend(opt_parameters)
self.SetTemplateValue('parameters', req_parameters)
self._InitMediaUpload(parent)
self._InitPageable(api)
api.AddMethod(self)
def _InitMediaUpload(self, parent):
media_upload = self.values.get('mediaUpload')
if media_upload:
if parent:
parent.SetTemplateValue('isMedia', True)
# Get which MIME Media Ranges are accepted for media uploads to this
# method.
accepted_mime_ranges = media_upload.get('accept')
self.SetTemplateValue('accepted_mime_ranges', accepted_mime_ranges)
max_size = media_upload.get('maxSize')
self.SetTemplateValue('max_size', max_size)
self.SetTemplateValue('max_size_bytes',
convert_size.ConvertSize(max_size))
# Find which upload protocols are supported.
upload_protocols = media_upload['protocols']
for upload_protocol in upload_protocols:
self._SetUploadTemplateValues(
upload_protocol, upload_protocols[upload_protocol])
def _InitPageable(self, api):
response_type = self.values.get('responseType')
if (response_type != api.void_type
and self.FindCodeObjectWithWireName(
response_type.values.get('properties'), 'nextPageToken')
and self.FindCodeObjectWithWireName(
self.optional_parameters, 'pageToken')):
self.SetTemplateValue('isPageable', True)
def _SetUploadTemplateValues(self, upload_protocol, protocol_dict):
"""Sets upload specific template values.
Args:
upload_protocol: (str) The name of the upload protocol. Eg: 'simple' or
'resumable'.
protocol_dict: (dict) The dictionary that corresponds to this upload
protocol. It typically contains keys like 'path', 'multipart' etc.
"""
self.SetTemplateValue('%s_upload_supported' % upload_protocol, True)
upload_path = protocol_dict.get('path')
if upload_path:
self.SetTemplateValue('%s_upload_path' % upload_protocol, upload_path)
self.SetTemplateValue('%s_upload_multipart' % upload_protocol,
protocol_dict.get('multipart', False))
@property
def media_upload_parameters(self):
return self.values.get('mediaUpload')
@property
def parameters(self):
return self.values['parameters']
@property
def optional_parameters(self):
return [p for p in self.values['parameters'] if not p.required]
@property
def required_parameters(self):
return [p for p in self.values['parameters'] if p.required]
@property
def path_parameters(self):
return [p for p in self.values['parameters'] if p.location == 'path']
@property
def query_parameters(self):
return [p for p in self.values['parameters'] if p.location == 'query']
@staticmethod
def FindCodeObjectWithWireName(things, wire_name):
"""Looks for an element having the given wire_name.
Args:
things: (array of DataType) List of parameters or properties to search.
wire_name: (str) The wireName we are looking to find.
Returns:
None or element with the given wire_name.
"""
if not things: return None
for e in things:
if e.values['wireName'] == wire_name: return e
return None
#
# Expose some properties with the naming convention we use in templates
#
def optionalParameters(self): # pylint: disable=g-bad-name
return self.optional_parameters
def requiredParameters(self): # pylint: disable=g-bad-name
return self.required_parameters
def pathParameters(self): # pylint: disable=g-bad-name
return self.path_parameters
def queryParameters(self): # pylint: disable=g-bad-name
return self.query_parameters
class Parameter(template_objects.CodeObject):
"""The definition of a method parameter."""
def __init__(self, api, name, def_dict, method):
super(Parameter, self).__init__(def_dict, api, parent=method,
wire_name=name)
self.ValidateName(name)
self.schema = api
# TODO(user): Deal with dots in names better. What we should do is:
# For x.y, x.z create a little class X, with members y and z. Then
# have the constructor method take an X.
self._repeated = self.values.get('repeated', False)
self._required = self.values.get('required', False)
self._location = (self.values.get('location')
or self.values.get('restParameterType')
or 'query')
if self.values.get('enum'):
self._data_type = data_types.Enum(def_dict,
api,
name,
self.values.get('enum'),
self.values.get('enumDescriptions'),
parent=method)
self.SetTemplateValue('enumType', self._data_type)
else:
self._data_type = data_types.PrimitiveDataType(def_dict, api, parent=self)
if self._repeated:
self._data_type = data_types.ArrayDataType(name, self._data_type,
parent=self)
@property
def repeated(self):
return self._repeated
@property
def required(self):
return self._required
@property
def location(self):
return self._location
@property
def code_type(self):
return self._data_type.code_type
@property
def data_type(self):
return self._data_type
|
the-stack_106_14939
|
#
# Copyright 2012 eNovance <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Telemetry Middleware for Swift Proxy
Configuration:
In /etc/swift/proxy-server.conf on the main pipeline add "ceilometer" just
before "proxy-server" and add the following filter in the file:
.. code-block:: python
[filter:ceilometer]
paste.filter_factory = ceilometermiddleware.swift:filter_factory
# Some optional configuration this allow to publish additional metadata
metadata_headers = X-TEST
# Set reseller prefix (defaults to "AUTH_" if not set)
reseller_prefix = AUTH_
# Set control_exchange to publish to.
control_exchange = swift
# Set transport url
url = rabbit://me:passwd@host:5672/virtual_host
# set messaging driver
driver = messagingv2
# set topic
topic = notifications
# skip metering of requests from listed project ids
ignore_projects = <proj_uuid>, <proj_uuid2>, <proj_name>
# Whether to send events to messaging driver in a background thread
nonblocking_notify = False
# Queue size for sending notifications in background thread (0=unlimited).
# New notifications will be discarded if the queue is full.
send_queue_size = 1000
# Logging level control
log_level = WARNING
# All keystoneauth1 options can be set to query project name for
# ignore_projects option, here is just a example:
auth_type = password
auth_url = https://[::1]:5000
project_name = service
project_domain_name = Default
username = user
user_domain_name = Default
password = a_big_secret
interface = public
"""
import datetime
import functools
import logging
from keystoneauth1 import exceptions as ksa_exc
from keystoneauth1.loading import adapter as ksa_adapter
from keystoneauth1.loading import base as ksa_base
from keystoneauth1.loading import session as ksa_session
from keystoneclient.v3 import client as ks_client
from oslo_config import cfg
import oslo_messaging
from oslo_utils import strutils
from pycadf import event as cadf_event
from pycadf.helper import api
from pycadf import measurement as cadf_measurement
from pycadf import metric as cadf_metric
from pycadf import resource as cadf_resource
import queue
import threading
import urllib.parse as urlparse
LOG = logging.getLogger(__name__)
def list_from_csv(comma_separated_str):
if comma_separated_str:
return list(
filter(lambda x: x,
map(lambda x: x.strip(),
comma_separated_str.split(','))))
return []
def _log_and_ignore_error(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
LOG.exception('An exception occurred processing '
'the API call: %s ', e)
return wrapper
class InputProxy(object):
"""File-like object that counts bytes read.
To be swapped in for wsgi.input for accounting purposes.
Borrowed from swift.common.utils. Duplicated here to avoid
dependency on swift package.
"""
def __init__(self, wsgi_input):
self.wsgi_input = wsgi_input
self.bytes_received = 0
def read(self, *args, **kwargs):
"""Pass read request to the underlying file-like object
Add bytes read to total.
"""
chunk = self.wsgi_input.read(*args, **kwargs)
self.bytes_received += len(chunk)
return chunk
def readline(self, *args, **kwargs):
"""Pass readline request to the underlying file-like object
Add bytes read to total.
"""
line = self.wsgi_input.readline(*args, **kwargs)
self.bytes_received += len(line)
return line
def close(self):
close_method = getattr(self.wsgi_input, 'close', None)
if callable(close_method):
close_method()
class KeystoneClientLoader(ksa_adapter.Adapter):
"""Keystone client adapter loader.
Keystone client and Keystoneauth1 adapter take exactly the same options, so
it's safe to create a keystone client with keystoneauth adapter options.
"""
@property
def plugin_class(self):
return ks_client.Client
class Swift(object):
"""Swift middleware used for counting requests."""
event_queue = None
threadLock = threading.Lock()
DEFAULT_IGNORE_PROJECT_NAMES = ['service']
def __init__(self, app, conf):
self._app = app
self.ignore_projects = self._get_ignore_projects(conf)
oslo_messaging.set_transport_defaults(conf.get('control_exchange',
'swift'))
self._notifier = oslo_messaging.Notifier(
oslo_messaging.get_notification_transport(cfg.CONF,
url=conf.get('url')),
publisher_id='ceilometermiddleware',
driver=conf.get('driver', 'messagingv2'),
topics=[conf.get('topic', 'notifications')])
self.metadata_headers = [h.strip().replace('-', '_').lower()
for h in conf.get(
"metadata_headers",
"").split(",") if h.strip()]
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
LOG.setLevel(getattr(logging, conf.get('log_level', 'WARNING')))
# NOTE: If the background thread's send queue fills up, the event will
# be discarded
#
# For backward compatibility we default to False and therefore wait for
# sending to complete. This causes swift proxy to hang if the
# destination is unavailable.
self.nonblocking_notify = strutils.bool_from_string(
conf.get('nonblocking_notify', False))
# Initialize the sending queue and thread, but only once
if self.nonblocking_notify and Swift.event_queue is None:
Swift.threadLock.acquire()
if Swift.event_queue is None:
send_queue_size = int(conf.get('send_queue_size', 1000))
Swift.event_queue = queue.Queue(send_queue_size)
self.start_sender_thread()
Swift.threadLock.release()
def _get_ignore_projects(self, conf):
if 'auth_type' not in conf:
LOG.info("'auth_type' is not set assuming ignore_projects are "
"only project uuid.")
return list_from_csv(conf.get('ignore_projects'))
if 'ignore_projects' in conf:
ignore_projects = list_from_csv(conf.get('ignore_projects'))
else:
ignore_projects = self.DEFAULT_IGNORE_PROJECT_NAMES
if not ignore_projects:
return []
def opt_getter(opt):
# TODO(sileht): This method does not support deprecated opt names
val = conf.get(opt.name)
if val is None:
val = conf.get(opt.dest)
return val
auth_type = conf.get('auth_type')
plugin = ksa_base.get_plugin_loader(auth_type)
auth = plugin.load_from_options_getter(opt_getter)
session = ksa_session.Session().load_from_options_getter(
opt_getter, auth=auth)
client = KeystoneClientLoader().load_from_options_getter(
opt_getter, session=session)
projects = []
for name_or_id in ignore_projects:
projects.extend(self._get_keystone_projects(client, name_or_id))
return projects
@staticmethod
def _get_keystone_projects(client, name_or_id):
try:
return [client.projects.get(name_or_id)]
except ksa_exc.NotFound:
pass
if isinstance(name_or_id, bytes):
name_or_id = name_or_id.decode('utf-8', 'strict')
projects = client.projects.list(name=name_or_id)
if not projects:
LOG.warning("fail to find project '%s' in keystone", name_or_id)
return [p.id for p in projects]
def __call__(self, env, start_response):
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def iter_response(iterable):
iterator = iter(iterable)
try:
chunk = next(iterator)
while not chunk:
chunk = next(iterator)
except StopIteration:
chunk = ''
if start_response_args[0]:
start_response(*start_response_args[0])
bytes_sent = 0
try:
while chunk:
bytes_sent += len(chunk)
yield chunk
try:
chunk = next(iterator)
except StopIteration:
chunk = ''
finally:
close_method = getattr(iterable, 'close', None)
if callable(close_method):
close_method()
self.emit_event(env, input_proxy.bytes_received, bytes_sent)
try:
iterable = self._app(env, my_start_response)
except Exception:
self.emit_event(env, input_proxy.bytes_received, 0, 'failure')
raise
else:
return iter_response(iterable)
@_log_and_ignore_error
def emit_event(self, env, bytes_received, bytes_sent, outcome='success'):
if (
(env.get('HTTP_X_SERVICE_PROJECT_ID')
or env.get('HTTP_X_PROJECT_ID')
or env.get('HTTP_X_TENANT_ID')) in self.ignore_projects
or env.get('swift.source') is not None):
return
path = urlparse.quote(env.get('swift.backend_path', env['PATH_INFO']))
method = env['REQUEST_METHOD']
headers = {}
for header in env:
if header.startswith('HTTP_') and env[header]:
key = header[5:]
if isinstance(env[header], str):
headers[key] = str(env[header])
else:
headers[key] = str(env[header])
try:
container = obj = None
path = path.replace('/', '', 1)
version, account, remainder = path.split('/', 2)
except ValueError:
try:
version, account = path.split('/', 1)
remainder = None
except ValueError:
return
try:
if not version or not account:
raise ValueError('Invalid path: %s' % path)
if remainder:
if '/' in remainder:
container, obj = remainder.split('/', 1)
else:
container = remainder
except ValueError:
return
now = datetime.datetime.utcnow().isoformat()
resource_metadata = {
"path": path,
"version": version,
"container": container,
"object": obj,
}
for header in self.metadata_headers:
if header.upper() in headers:
resource_metadata['http_header_%s' % header] = headers.get(
header.upper())
# build object store details
target = cadf_resource.Resource(
typeURI='service/storage/object',
id=account.partition(self.reseller_prefix)[2] or path)
target.metadata = resource_metadata
target.action = method.lower()
# build user details
initiator = cadf_resource.Resource(
typeURI='service/security/account/user',
id=env.get('HTTP_X_USER_ID'))
initiator.project_id = (env.get('HTTP_X_PROJECT_ID')
or env.get('HTTP_X_TENANT_ID'))
# build notification body
event = cadf_event.Event(eventTime=now, outcome=outcome,
action=api.convert_req_action(method),
initiator=initiator, target=target,
observer=cadf_resource.Resource(id='target'))
# measurements
if bytes_received:
event.add_measurement(cadf_measurement.Measurement(
result=bytes_received,
metric=cadf_metric.Metric(
name='storage.objects.incoming.bytes', unit='B')))
if bytes_sent:
event.add_measurement(cadf_measurement.Measurement(
result=bytes_sent,
metric=cadf_metric.Metric(
name='storage.objects.outgoing.bytes', unit='B')))
if self.nonblocking_notify:
try:
Swift.event_queue.put(event, False)
if not Swift.event_sender.is_alive():
Swift.threadLock.acquire()
self.start_sender_thread()
Swift.threadLock.release()
except queue.Full:
LOG.warning('Send queue FULL: Event %s not added', event.id)
else:
Swift.send_notification(self._notifier, event)
def start_sender_thread(self):
Swift.event_sender = SendEventThread(self._notifier)
Swift.event_sender.daemon = True
Swift.event_sender.start()
@staticmethod
def send_notification(notifier, event):
notifier.info({}, 'objectstore.http.request', event.as_dict())
class SendEventThread(threading.Thread):
def __init__(self, notifier):
super(SendEventThread, self).__init__()
self.notifier = notifier
def run(self):
"""Send events without blocking swift proxy."""
while True:
try:
LOG.debug('Wait for event from send queue')
event = Swift.event_queue.get()
LOG.debug('Got event %s from queue - now send it', event.id)
Swift.send_notification(self.notifier, event)
LOG.debug('Event %s sent.', event.id)
except BaseException:
LOG.exception("SendEventThread loop exception")
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def filter(app):
return Swift(app, conf)
return filter
|
the-stack_106_14941
|
import re
import pprint
import os
from subprocess import check_output
from optparse import OptionParser
# Constants
rtl_ext_end = ".dfinish"
rtl_ext = None # e.g. '.c.270r.dfinish'. The number '270' will change with gcc version and is auto-detected by the
# function find_rtl_ext
dir = r'.' # Working directory
su_ext = '.su'
obj_ext = '.o'
manual_ext = '.msu'
read_elf_path = "arm-none-eabi-readelf.exe" # You may need to enter the full path here
stdout_encoding = "utf-8" # System dependant
class Printable:
def __repr__(self):
return "<" + type(self).__name__ + "> " + pprint.pformat(vars(self), indent=4, width=1)
class Symbol(Printable):
pass
def read_symbols(file):
from subprocess import check_output
def to_symbol(read_elf_line):
v = read_elf_line.split()
s2 = Symbol()
s2.value = int(v[1], 16)
s2.size = int(v[2])
s2.type = v[3]
s2.binding = v[4]
if len(v) >= 8:
s2.name = v[7]
else:
s2.name = ""
return s2
output = check_output([read_elf_path, "-s", "-W", file]).decode(stdout_encoding)
lines = output.splitlines()[3:]
return [to_symbol(line) for line in lines]
def read_obj(tu, call_graph):
"""
Reads the file tu.o and gets the binding (global or local) for each function
:param tu: name of the translation unit (e.g. for main.c, this would be 'main')
:param call_graph: a object used to store information about each function, results go here
"""
symbols = read_symbols(tu[0:tu.rindex(".")] + obj_ext)
for s in symbols:
if s.type == 'FUNC':
if s.binding == 'GLOBAL':
# Check for multiple declarations
if s.name in call_graph['globals'] or s.name in call_graph['locals']:
raise Exception('Multiple declarations of {}'.format(s.name))
call_graph['globals'][s.name] = {'tu': tu, 'name': s.name, 'binding': s.binding}
elif s.binding == 'LOCAL':
# Check for multiple declarations
if s.name in call_graph['locals'] and tu in call_graph['locals'][s.name]:
raise Exception('Multiple declarations of {}'.format(s.name))
if s.name not in call_graph['locals']:
call_graph['locals'][s.name] = {}
call_graph['locals'][s.name][tu] = {'tu': tu, 'name': s.name, 'binding': s.binding}
elif s.binding == 'WEAK':
if s.name in call_graph['weak']:
raise Exception('Multiple declarations of {}'.format(s.name))
call_graph['weak'][s.name] = {'tu': tu, 'name': s.name, 'binding': s.binding}
else:
raise Exception('Error Unknown Binding "{}" for symbol: {}'.format(s.binding, s.name))
def find_fxn(tu, fxn, call_graph):
"""
Looks up the dictionary associated with the function.
:param tu: The translation unit in which to look for locals functions
:param fxn: The function name
:param call_graph: a object used to store information about each function
:return: the dictionary for the given function or None
"""
if fxn in call_graph['globals']:
return call_graph['globals'][fxn]
else:
try:
return call_graph['locals'][fxn][tu]
except KeyError:
return None
def find_demangled_fxn(tu, fxn, call_graph):
"""
Looks up the dictionary associated with the function.
:param tu: The translation unit in which to look for locals functions
:param fxn: The function name
:param call_graph: a object used to store information about each function
:return: the dictionary for the given function or None
"""
for f in call_graph['globals'].values():
if 'demangledName' in f:
if f['demangledName'] == fxn:
return f
for f in call_graph['locals'].values():
if tu in f:
if 'demangledName' in f[tu]:
if f[tu]['demangledName'] == fxn:
return f[tu]
return None
def read_rtl(tu, call_graph):
"""
Read an RTL file and finds callees for each function and if there are calls via function pointer.
:param tu: the translation unit
:param call_graph: a object used to store information about each function, results go here
"""
# Construct A Call Graph
function = re.compile(r'^;; Function (.*) \((\S+), funcdef_no=\d+(, [a-z_]+=\d+)*\)( \([a-z ]+\))?$')
static_call = re.compile(r'^.*\(call.*"(.*)".*$')
other_call = re.compile(r'^.*call .*$')
for line_ in open(tu + rtl_ext).readlines():
m = function.match(line_)
if m:
fxn_name = m.group(2)
fxn_dict2 = find_fxn(tu, fxn_name, call_graph)
if not fxn_dict2:
pprint.pprint(call_graph)
raise Exception("Error locating function {} in {}".format(fxn_name, tu))
fxn_dict2['demangledName'] = m.group(1)
fxn_dict2['calls'] = set()
fxn_dict2['has_ptr_call'] = False
continue
m = static_call.match(line_)
if m:
fxn_dict2['calls'].add(m.group(1))
# print("Call: {0} -> {1}".format(current_fxn, m.group(1)))
continue
m = other_call.match(line_)
if m:
fxn_dict2['has_ptr_call'] = True
continue
def read_su(tu, call_graph):
"""
Reads the 'local_stack' for each function. Local stack ignores stack used by callees.
:param tu: the translation unit
:param call_graph: a object used to store information about each function, results go here
:return:
"""
su_line = re.compile(r'^([^ :]+):([\d]+):([\d]+):(.+)\t(\d+)\t(\S+)$')
i = 1
for line in open(tu[0:tu.rindex(".")] + su_ext).readlines():
m = su_line.match(line)
if m:
fxn = m.group(4)
fxn_dict2 = find_demangled_fxn(tu, fxn, call_graph)
fxn_dict2['local_stack'] = int(m.group(5))
else:
print("error parsing line {} in file {}".format(i, tu))
i += 1
def read_manual(file, call_graph):
"""
reads the manual stack useage files.
:param file: the file name
:param call_graph: a object used to store information about each function, results go here
"""
for line in open(file).readlines():
fxn, stack_sz = line.split()
if fxn in call_graph:
raise Exception("Redeclared Function {}".format(fxn))
call_graph['globals'][fxn] = {'wcs': int(stack_sz),
'calls': set(),
'has_ptr_call': False,
'local_stack': int(stack_sz),
'is_manual': True,
'name': fxn,
'tu': '#MANUAL',
'binding': 'GLOBAL'}
def validate_all_data(call_graph):
"""
Check that every entry in the call graph has the following fields:
.calls, .has_ptr_call, .local_stack, .scope, .src_line
"""
def validate_dict(d):
if not ('calls' in d and 'has_ptr_call' in d and 'local_stack' in d
and 'name' in d and 'tu' in d):
print("Error data is missing in fxn dictionary {}".format(d))
# Loop through every global and local function
# and resolve each call, save results in r_calls
for fxn_dict2 in call_graph['globals'].values():
validate_dict(fxn_dict2)
for l_dict in call_graph['locals'].values():
for fxn_dict2 in l_dict.values():
validate_dict(fxn_dict2)
def resolve_all_calls(call_graph):
def resolve_calls(fxn_dict2):
fxn_dict2['r_calls'] = []
fxn_dict2['unresolved_calls'] = set()
for call in fxn_dict2['calls']:
call_dict = find_fxn(fxn_dict2['tu'], call, call_graph)
if call_dict:
fxn_dict2['r_calls'].append(call_dict)
else:
fxn_dict2['unresolved_calls'].add(call)
# Loop through every global and local function
# and resolve each call, save results in r_calls
for fxn_dict in call_graph['globals'].values():
resolve_calls(fxn_dict)
for l_dict in call_graph['locals'].values():
for fxn_dict in l_dict.values():
resolve_calls(fxn_dict)
def calc_all_wcs(call_graph):
def calc_wcs(fxn_dict2, call_graph1, parents):
"""
Calculates the worst case stack for a fxn that is declared (or called from) in a given file.
:param parents: This function gets called recursively through the call graph. If a function has recursion the
tuple file, fxn will be in the parents stack and everything between the top of the stack and the matching entry
has recursion.
:return:
"""
# If the wcs is already known, then nothing to do
if 'wcs' in fxn_dict2:
return
# Check for pointer calls
if fxn_dict2['has_ptr_call']:
fxn_dict2['wcs'] = 'unbounded'
return
# Check for recursion
if fxn_dict2 in parents:
fxn_dict2['wcs'] = 'unbounded'
return
# Calculate WCS
call_max = 0
for call_dict in fxn_dict2['r_calls']:
# Calculate the WCS for the called function
parents.append(fxn_dict2)
calc_wcs(call_dict, call_graph1, parents)
parents.pop()
# If the called function is unbounded, so is this function
if call_dict['wcs'] == 'unbounded':
fxn_dict2['wcs'] = 'unbounded'
return
# Keep track of the call with the largest stack use
call_max = max(call_max, call_dict['wcs'])
# Propagate Unresolved Calls
for unresolved_call in call_dict['unresolved_calls']:
fxn_dict2['unresolved_calls'].add(unresolved_call)
fxn_dict2['wcs'] = call_max + fxn_dict2['local_stack']
# Loop through every global and local function
# and resolve each call, save results in r_calls
for fxn_dict in call_graph['globals'].values():
calc_wcs(fxn_dict, call_graph, [])
for l_dict in call_graph['locals'].values():
for fxn_dict in l_dict.values():
calc_wcs(fxn_dict, call_graph, [])
def print_all_fxns(call_graph):
def print_fxn(row_format, fxn_dict2):
unresolved = fxn_dict2['unresolved_calls']
stack = str(fxn_dict2['wcs'])
if unresolved:
unresolved_str = '({})'.format(' ,'.join(unresolved))
if stack != 'unbounded':
stack = "unbounded:" + stack
else:
unresolved_str = ''
print(row_format.format(fxn_dict2['tu'], fxn_dict2['demangledName'], stack, unresolved_str))
def get_order(val):
if val == 'unbounded':
return 1
else:
return -val
# Loop through every global and local function
# and resolve each call, save results in r_calls
d_list = []
for fxn_dict in call_graph['globals'].values():
d_list.append(fxn_dict)
for l_dict in call_graph['locals'].values():
for fxn_dict in l_dict.values():
d_list.append(fxn_dict)
d_list.sort(key=lambda item: get_order(item['wcs']))
# Calculate table width
tu_width = max(max([len(d['tu']) for d in d_list]), 16)
name_width = max(max([len(d['name']) for d in d_list]), 13)
row_format = "{:<" + str(tu_width + 2) + "} {:<" + str(name_width + 2) + "} {:>14} {:<17}"
# Print out the table
print("")
print(row_format.format('Translation Unit', 'Function Name', 'Stack', 'Unresolved Dependencies'))
for d in d_list:
print_fxn(row_format, d)
def find_rtl_ext():
# Find the rtl_extension
global rtl_ext
for root, directories, filenames in os.walk('.'):
for f in filenames:
if (f.endswith(rtl_ext_end)):
rtl_ext = f[f[:-len(rtl_ext_end)].rindex("."):]
print("rtl_ext = " + rtl_ext)
return
print("Could not find any files ending with '.dfinish'. Check that the script is being run from the correct "
"directory. Check that the code was compiled with the correct flags")
exit(-1)
def find_files():
tu = []
manual = []
all_files = []
for root, directories, filenames in os.walk(dir):
for filename in filenames:
all_files.append(os.path.join(root,filename))
files = [f for f in all_files if os.path.isfile(f) and f.endswith(rtl_ext)]
for f in files:
base = f[0:-len(rtl_ext)]
short_base = base[0:base.rindex(".")]
if short_base + su_ext in all_files and short_base + obj_ext in all_files:
tu.append(base)
print('Reading: {}{}, {}{}, {}{}'.format(base, rtl_ext, short_base, su_ext, short_base, obj_ext))
files = [f for f in all_files if os.path.isfile(f) and f.endswith(manual_ext)]
for f in files:
manual.append(f)
print('Reading: {}'.format(f))
# Print some diagnostic messages
if not tu:
print("Could not find any translation units to analyse")
exit(-1)
return tu, manual
def main():
# Find the appropriate RTL extension
find_rtl_ext()
# Find all input files
call_graph = {'locals': {}, 'globals': {}, 'weak': {}}
tu_list, manual_list = find_files()
# Read the input files
for tu in tu_list:
read_obj(tu, call_graph) # This must be first
for fxn in call_graph['weak'].values():
if fxn['name'] not in call_graph['globals'].keys():
call_graph['globals'][fxn['name']] = fxn
for tu in tu_list:
read_rtl(tu, call_graph)
for tu in tu_list:
read_su(tu, call_graph)
# Read manual files
for m in manual_list:
read_manual(m, call_graph)
# Validate Data
validate_all_data(call_graph)
# Resolve All Function Calls
resolve_all_calls(call_graph)
# Calculate Worst Case Stack For Each Function
calc_all_wcs(call_graph)
# Print A Nice Message With Each Function and the WCS
print_all_fxns(call_graph)
def ThreadStackStaticAnalysis(env):
print('Start thread stack static analysis...')
import rtconfig
read_elf_path = rtconfig.EXEC_PATH + r'\readelf.exe'
main()
print('\nThread stack static analysis done!')
return
|
the-stack_106_14943
|
# %%
"""
.. _plot_02_load_1D_NMR_spectrum_Kea:
=====================================
Load two 1D NMR spectra in Kea format
=====================================
In this example we demonstrate how to import two ODNP-enhanced NMR spectra, one recorded with a microwave power of 0 W (off-signal) and one with a microwave power of 2 W (on-signal). The spectra are recorded using a Magritek Kea system.
The example script has three different sections:
#. Load and Process Off-Signal
#. Load and Process On-Signal
#. Create a Figure and Plot On/Off Spectra
"""
# %%
# Make sure to start with importing DNPLab
import dnplab as dnp
# %%
# Load and Process Off-Signal
# -----------------------------
# The next section demonstrates how the FID is imported into DNPLab and processed. Processing involves removing any DC offset, followed by a 15 Hz linewidth appodization, prior to performing the Fourier transformation.
########## OFF Signal (P = 0 W) ##########
data_off = dnp.load("../../data/prospa/10mM_TEMPO_Water/1Pulse_20200929/35/data.1d")
data_off.attrs["experiment_type"] = "nmr_spectrum"
data_off = dnp.remove_background(data_off)
data_off = dnp.apodize(data_off, lw=15)
data_off = dnp.fourier_transform(data_off)
# %%
# Load and Process ON-Signal
# ----------------------------
# Importing the on-signal involves the same steps as importing the off-signal. Once processed the data is copied to the results buffer 'onSignal'.
########## ON Signal (P = 2 W) ##########
data_on = dnp.load("../../data/prospa/10mM_TEMPO_Water/1Pulse_20200929/51/data.1d")
data_on.attrs["experiment_type"] = "nmr_spectrum"
data_on = dnp.remove_background(data_on)
data_on = dnp.apodize(data_on, lw=15)
data_on = dnp.fourier_transform(data_on)
# %%
# Plot Microwave On/Off DNP Spectra
# ---------------------------------
# First plot spectra individually
sampleTag = "10 mM TEMPO in Water"
dnp.plt.figure()
dnp.fancy_plot(data_on, title=sampleTag + ", MW On Spectrum")
dnp.plt.figure()
dnp.fancy_plot(data_off, title=sampleTag + ", MW Off Spectrum")
dnp.plt.show()
# %%
# Next plot both spectra in the same figure
dnp.plt.figure()
dnp.fancy_plot(data_on, xlim=[-20, 20])
dnp.fancy_plot(data_off*50, xlim=[-20, 20])
dnp.plt.title(sampleTag + ", MW ON/OFF(*50)")
dnp.plt.show()
|
the-stack_106_14946
|
_base_ = [
'../../_base_/models/slowonly_r50.py', '../../_base_/default_runtime.py'
]
# model settings
model = dict(backbone=dict(in_channels=2, with_pool2=False))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt'
ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt'
ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt'
img_norm_cfg = dict(mean=[128, 128], std=[128, 128])
train_pipeline = [
dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=4,
frame_interval=16,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=4,
frame_interval=16,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=24,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
modality='Flow',
filename_tmpl='{}_{:05d}.jpg',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
modality='Flow',
filename_tmpl='{}_{:05d}.jpg',
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
modality='Flow',
filename_tmpl='{}_{:05d}.jpg',
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(
type='SGD', lr=0.06, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=34)
total_epochs = 256
# runtime settings
checkpoint_config = dict(interval=4)
work_dir = './work_dirs/slowonly_r50_4x16x1_256e_kinetics400_flow'
find_unused_parameters = False
|
the-stack_106_14947
|
#!/usr/bin/env python
# Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved
import os
import sys
import time
import argparse
import progressbar
from collections import deque
fields = [
"qname",
"flag",
"rname",
"pos",
"mapq",
"cigar",
"rnext",
"pnext",
"tlen",
"seq",
"qual",
"rg",
"pg",
"md",
"nm",
"as",
"fz",
"xa",
"xs",
"xt",
]
class Record(object):
unNamedRange = (0, 10)
def __init__(self, tokens):
self._set_attrs(tokens)
"""
self.fields_len = len(tokens)
for x in xrange(len(fields)):
try:
setattr(self, fields[x], tokens[x])
except:
print tokens
print fields
print "len(tokens)=", len(tokens)
print "len(fields)=", len(fields)
sys.exit(1)
"""
def __str__(self):
return "\t".join([getattr(self, fields[x]) for x in xrange(self.fields_len)])
def _set_attrs(self, tokens):
for x in xrange(self.unNamedRange[1]):
setattr(self, fields[x], tokens[x])
for x in xrange(self.unNamedRange[1] + 1, len(tokens)):
fieldName = tokens[x].split(":")[0].lower()
setattr(self, fieldName, tokens[x])
def remove_hard_clip(self):
attr = getattr(self, "cigar")
found = True
while found:
found = False
i = attr.find("H")
if -1 != i:
found = True
j = i - 1
while 0 <= j and "0" <= attr[j] and attr[j] <= "9":
j = j - 1
if j < 0:
attr = attr[i + 1 : len(attr)]
elif i == len(attr) - 1:
attr = attr[0 : j + 1]
else:
attr = attr[0 : j + 1] + attr[i + 1 : len(attr)]
break
setattr(self, fields[5], attr) # cigar is field 5
class Sam(object):
def __init__(self, sam, full_qname):
self.buf = deque()
self.buf_l = 10000
self.sam = sam
self.record = None
self.name = None
self.fp = open(self.sam, "r")
self.full_qname = full_qname
for line in self.fp:
if line[0] == "@":
# not supporting headers for now
continue
break
self.get_next()
def get_next(self):
self.record = None
self.name = None
if 0 == len(self.buf):
ctr = 0
for line in self.fp:
line = line.rstrip()
tokens = line.split("\t")
self.buf.append(Record(tokens))
ctr += 1
if self.buf_l == ctr:
break
if 0 < len(self.buf):
self.record = self.buf.popleft()
self.name = self._get_name(self.record.qname, self.full_qname)
if None == self.record and None != self.fp:
self.close()
return self.record
def close(self):
self.fp.close()
self.fp = None
def _get_name(self, name, full_qname):
if full_qname:
return name
else:
return ":".join(name.split(":")[1:])
def diff_field(field1, field2):
"""returns true if field1 == field2"""
return field1 == field2
def main(options):
sam1 = Sam(options.sam1, options.full_qname)
sam2 = Sam(options.sam2, options.full_qname)
fields = options.fields
# fields = options.fields.split(',')
# widgets = [
# "diffing: ",
# progressbar.Percentage(),
# progressbar.Bar()
# ]
# pbar = progressbar.ProgressBar( widgets=widgets, maxval=len( sam1.records) ).start()
# widgets = ['Processed: ', progressbar.Counter(), ' records (', progressbar.Timer(), ')']
# pbar = progressbar.ProgressBar( widgets=widgets).start()
sys.stderr.write("Processing")
counter = 1
while True:
r1 = sam1.get_next()
r2 = sam2.get_next()
if None == r1 and None == r2:
break
elif None == r1 and None != r2:
sys.stderr.write("Error: early EOF on input file #1.\n")
sys.exit(1)
elif None != r1 and None == r2:
sys.stderr.write("Error: early EOF on input file #2.\n")
sys.exit(1)
if sam1.name != sam2.name:
sys.stderr.write(
"Error: read names do not match: [%s] != [%s].\n"
% (sam1.name, sam2.name)
)
sys.exit(1)
if 0 < options.min_mapq:
mapq1 = int(getattr(r1, "mapq"))
mapq2 = int(getattr(r2, "mapq"))
if mapq1 < options.min_mapq and mapq2 < options.min_mapq:
continue
if options.ignore_hard_clip:
r1.remove_hard_clip()
r2.remove_hard_clip()
diff_str = "[%s]" % (sam1.name)
for field in fields:
cont = [
False,
False,
] # var to track these 2 exceptions below to see which one failed
try:
attr1 = getattr(r1, field)
except Exception:
# print sam1.sam, sam1.name, "doesn't have the: ", field, " tag"
cont[0] = True
try:
attr2 = getattr(r2, field)
except Exception:
cont[1] = True
if cont[0] and cont[1]:
continue
elif cont[0] and not cont[1]:
print(
sam1.name,
sam1.sam,
"has the field: ",
field,
" and",
sam2.sam,
"does not",
)
continue
elif not cont[0] and cont[1]:
print(
sam1.name,
sam2.sam,
"has the field: ",
field,
" and",
sam1.sam,
"does not",
)
continue
if not diff_field(attr1, attr2):
diff_str = "%s -- %s[%s]=%s %s[%s]=%s" % (
diff_str,
sam1.sam,
field,
str(attr1),
sam2.sam,
field,
str(attr2),
)
if len(diff_str) > len(sam1.name) + 2:
print(diff_str)
# pbar.update(counter)
if 0 == (counter % 10000):
sys.stderr.write("\rProcessed %d records" % counter)
sys.stderr.flush()
counter = counter + 1
sys.stderr.write("\rProcessed %d records\n" % counter)
# pbar.finish()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Diff two SAM files")
parser.add_argument(
"--sam1",
help="first sam file to diff. will be called sam1 in diff out",
dest="sam1",
)
parser.add_argument(
"--sam2",
help="second sam file to diff. will be called sam2 in diff out",
dest="sam2",
)
parser.add_argument(
"--fields",
help="comma seperated list of fields:%s\t\t\t\t\t to diff between the"
"sam records use names from the same spec. for optional tags"
"use their 2 letter abbreviation. Default: pos" % (str(fields)),
dest="fields",
action="append",
default=[],
)
parser.add_argument(
"--full-qname",
help="keep the full query name",
dest="full_qname",
action="store_true",
default=False,
)
parser.add_argument(
"--min-mapq",
help="examine only those records with a given minimum mapping quality",
type=int,
dest="min_mapq",
default=0,
)
parser.add_argument(
"--ignore-hard-clip",
help="ignore hard clips in the cigar",
dest="ignore_hard_clip",
action="store_true",
default=False,
)
options = parser.parse_args()
if None == options.sam1:
parser.print_help()
sys.stderr.write("Error: --sam1 not given\n")
sys.exit(1)
if None == options.sam2:
parser.print_help()
sys.stderr.write("Error: --sam2 not given\n")
sys.exit(1)
for field in options.fields:
if not field in fields:
sys.stderr.write("Error: field not recognized [%s]\n" % field)
parser.print_help()
sys.exit()
main(options)
|
the-stack_106_14948
|
import subprocess
import csv
import os
import platform
SYSTEM = platform.system()
COMMAND_MAPPING = {
'Linux': {"PASTE_CMD": ['xclip', '-o', '-selection', 'clipboard'], 'COPY_CMD': ['xclip', '-selection', 'clipboard']},
'Darwin': {"PASTE_CMD": ['pbpaste'], 'COPY_CMD': ['pbcopy']},
'Windows': {"PASTE_CMD": ['paste'], 'COPY_CMD': ['clip']},
}
PASTE_CMD = COMMAND_MAPPING.get(SYSTEM).get('PASTE_CMD')
COPY_CMD = COMMAND_MAPPING.get(SYSTEM).get('COPY_CMD')
def paste(selection=None):
with open(os.devnull, 'wb') as devnull:
pipe = subprocess.Popen(PASTE_CMD, stdout=subprocess.PIPE, stderr=devnull)
outdata, errdata = pipe.communicate()
if pipe.returncode:
return False
else:
return outdata
def copy(text):
with open(os.devnull, 'wb') as devnull:
pipe = subprocess.Popen(COPY_CMD, stdin=subprocess.PIPE, stderr=devnull)
pipe.communicate(text)
if pipe.returncode:
return False
else:
return True
def paste_table():
text = paste()
data = list(csv.reader(text.split('\n'), delimiter='\t'))
return data
|
the-stack_106_14949
|
from json import dumps, loads
from codecs import encode as c_encode, decode as c_decode
from base64 import b64encode, b64decode
from re import compile as regex
# JSON encoder, converts a python object to a string
def jots(data, readable=False):
kwargs = dict()
# If readable is set, it pretty prints the JSON to be more human-readable
if readable:
# kwargs["sort_keys"] = True
kwargs["indent"] = 4
kwargs["separators"] = (",", ":")
try:
return dumps(data, **kwargs)
except ValueError as e:
return None
# JSON decoder, converts a string to a python object
def jsto(data):
try:
return loads(data)
except ValueError as e:
return None
# Encodes data to base64
def b64e(data, altchars=None, url=False, use_bin=False):
if type(data) is str:
data = data.encode("utf-8")
if altchars is None and url:
altchars = "-_"
base64_data = b64encode(hex_data, altchars)
if use_bin:
return base64_data
return base64_data.decode("utf-8")
# Decodes data from base64
def b64d(base64_data, altchars=None, url=False, use_bin=False):
if type(base64_data) is str:
base64_data = base64_data.encode("utf-8")
if altchars is None and url:
altchars = "-_"
data = b64decode(base64_data, altchars)
if use_bin:
return data
return data.decode("utf-8")
# Converts hex to base64 encoding
def h2b64(hex_data, altchars=None, url=False, use_bin=False):
if type(hex_data) is str:
hex_data = c_decode(hex_data, "hex")
return b64e(hex_data, altchars, url, use_bin)
# Decodes base64 and converts to hex
def b642h(base64_data, altchars=None, url=False, use_bin=False):
base64_decoded = b64d(base64_data, altchars, url, use_bin)
hex_data = c_encode(base64_decoded, "hex")
if use_bin:
return hex_data
return hex_data.decode("utf-8")
# str(ClassName) -> "<class '__main__.ClassName'>"
# This function extracts the class name from the str output
def ctos(_class):
pattern = regex(r"[<'>]")
cleaned = pattern.sub("", str(_class))
return cleaned.split("class ", 1)[1].split(".")[-1]
|
the-stack_106_14950
|
# -*- coding: utf-8 -*-
from email.parser import BytesParser
from django.core.cache import cache
from servo.lib.utils import empty
from servo.exceptions import ConfigurationError
from servo.models import Configuration, User, Order, Note, Template
def get_rules():
"""
Get the rules from the JSON file and cache them.
Fail silently if not configured.
@TODO: Need GUI for managing local_rules.json!
"""
import json
try:
fh = open("local_rules.json", "r")
except IOError:
return []
rules = json.load(fh)
cache.set('rules', rules)
return rules
def apply_rules(event):
"""
Applies configured rules to an event
event is the Event object that was triggered
"""
counter = 0
rules = cache.get('rules', get_rules())
order = event.content_object
user = event.triggered_by
for r in rules:
match = r.get('match', event.description)
if (r['event'] == event.action and match == event.description):
if isinstance(r['data'], dict):
tpl_id = r['data']['template']
r['data'] = Template.objects.get(pk=tpl_id).render(order)
else:
r['data'] = Template(content=r['data']).render(order)
if r['action'] == "set_queue":
order.set_queue(r['data'], user)
if r['action'] == "set_priority":
pass
if r['action'] == "send_email":
try:
email = order.customer.valid_email()
except Exception:
continue # skip customers w/o valid emails
note = Note(order=order, created_by=user)
note.body = r['data']
note.recipient = email
note.render_subject({'note': note})
note.save()
try:
note.send_mail(user)
except ValueError as e:
print('Sending email failed (%s)' % e)
if r['action'] == "send_sms":
number = 0
try:
number = order.customer.get_standard_phone()
except Exception:
continue # skip customers w/o valid phone numbers
note = Note(order=order, created_by=user)
note.body = r['data']
note.save()
try:
note.send_sms(number, user)
except ValueError as e:
print('Sending SMS to %s failed (%s)' % (number, e))
counter += 1
return '%d/%d rules processed' % (counter, len(rules))
def batch_process(user, data):
"""
/orders/batch
"""
processed = 0
orders = data['orders'].strip().split("\r\n")
for o in orders:
try:
order = Order.objects.get(code=o)
except Exception as e:
continue
if data['status'] and order.queue:
status = order.queue.queuestatus_set.get(status_id=data['status'])
order.set_status(status, user)
if data['queue']:
order.set_queue(data['queue'], user)
if len(data['sms']) > 0:
try:
number = order.customer.get_standard_phone()
note = Note(order=order, created_by=user, body=data['sms'])
note.render_body({'order': order})
note.save()
try:
note.send_sms(number, user)
except Exception as e:
note.delete()
print("Failed to send SMS to: %s" % number)
except AttributeError as e: # customer has no phone number
continue
if len(data['email']) > 0:
note = Note(order=order, created_by=user, body=data['email'])
note.sender = user.email
try:
note.recipient = order.customer.email
note.render_subject({'note': note})
note.render_body({'order': order})
note.save()
note.send_mail(user)
except Exception as e:
# customer has no email address or some other error...
pass
if len(data['note']) > 0:
note = Note(order=order, created_by=user, body=data['note'])
note.render_body({'order': order})
note.save()
processed += 1
return '%d/%d orders processed' % (processed, len(orders))
def check_mail():
"""
Checks IMAP box for incoming mail
"""
uid = Configuration.conf('imap_act')
if empty(uid):
err = 'User account for incoming messages not configured'
raise ConfigurationError(err)
counter = 0
user = User.objects.get(pk=uid)
server = Configuration.get_imap_server()
typ, data = server.search(None, "UnSeen")
for num in data[0].split():
typ, data = server.fetch(num, "(RFC822)")
# parsestr() seems to return an email.message?
msg = BytesParser().parsebytes(data[0][1])
Note.from_email(msg, user)
#server.copy(num, 'servo')
server.store(num, '+FLAGS', '\\Seen')
counter += 1
server.close()
server.logout()
return '%d messages processed' % counter
|
the-stack_106_14954
|
import os
import pickle
import numpy as np
import pytest
from umap import UMAP
from jina.executors import BaseExecutor
from jina.executors.metas import get_default_metas
from .. import UMAPEncoder
@pytest.fixture(scope="function", autouse=True)
def metas(tmpdir):
metas = get_default_metas()
metas['workspace'] = str(tmpdir)
yield metas
@pytest.fixture(scope="function")
def train_data():
"""
Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1).
:return: a `B x T` numpy ``ndarray``, `B` is the size of the batch
"""
batch_size = 2000
input_dim = 28
train_data = np.random.rand(batch_size, input_dim)
return train_data
@pytest.fixture(scope="function")
def test_data():
"""
Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1).
:return: a `B x T` numpy ``ndarray``, `B` is the size of the batch
"""
batch_size = 10
input_dim = 28
test_data = np.random.rand(batch_size, input_dim)
return test_data
def get_encoder(metas, train_data, target_output_dim):
tmpdir = metas['workspace']
model_path = os.path.join(tmpdir, 'umap_model.model')
model = UMAP(n_components=target_output_dim, random_state=42)
model.fit(train_data)
pickle.dump(model, open(model_path, 'wb'))
return UMAPEncoder(model_path=model_path)
@pytest.mark.parametrize('target_output_dim', [2])
def test_encoding_results(metas, train_data, test_data, target_output_dim):
expected_batch_size = test_data.shape[0]
encoder = get_encoder(metas, train_data, target_output_dim)
encoded_data = encoder.encode(test_data)
assert encoded_data.shape == (expected_batch_size, target_output_dim)
assert type(encoded_data) is np.ndarray
@pytest.mark.parametrize('target_output_dim', [2])
def test_save_and_load(metas, train_data, test_data, target_output_dim):
encoder = get_encoder(metas, train_data, target_output_dim)
encoded_data_control = encoder.encode(test_data)
encoder.touch()
encoder.save()
assert os.path.exists(encoder.save_abspath)
encoder_loaded = BaseExecutor.load(encoder.save_abspath)
encoded_data_test = encoder_loaded.encode(test_data)
np.testing.assert_array_equal(encoded_data_test, encoded_data_control)
|
the-stack_106_14955
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Editor Widget"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from __future__ import print_function
import logging
import os
import os.path as osp
import sys
from collections import MutableSequence
import unicodedata
# Third party imports
import qdarkstyle
from qtpy.compat import getsavefilename
from qtpy.QtCore import (QByteArray, QFileInfo, QObject, QPoint, QSize, Qt,
QThread, QTimer, Signal, Slot)
from qtpy.QtGui import QFont
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QHBoxLayout,
QLabel, QMainWindow, QMessageBox, QMenu,
QSplitter, QVBoxLayout, QWidget, QListWidget,
QListWidgetItem)
# Local imports
from spyder.config.base import _, running_under_pytest
from spyder.config.gui import config_shortcut, is_dark_interface, get_shortcut
from spyder.config.utils import (get_edit_filetypes, get_edit_filters,
get_filter, is_kde_desktop, is_anaconda)
from spyder.py3compat import qbytearray_to_str, to_text_string
from spyder.utils import icon_manager as ima
from spyder.utils import (codeanalysis, encoding, sourcecode,
syntaxhighlighters)
from spyder.utils.qthelpers import (add_actions, create_action,
create_toolbutton, MENU_SEPARATOR,
mimedata2url)
from spyder.plugins.outlineexplorer.widgets import OutlineExplorerWidget
from spyder.plugins.outlineexplorer.editor import OutlineExplorerProxyEditor
from spyder.widgets.fileswitcher import FileSwitcher
from spyder.widgets.findreplace import FindReplace
from spyder.plugins.editor.utils.autosave import AutosaveForStack
from spyder.plugins.editor.widgets import codeeditor
from spyder.plugins.editor.widgets.base import TextEditBaseWidget # analysis:ignore
from spyder.plugins.editor.widgets.codeeditor import Printer # analysis:ignore
from spyder.plugins.editor.widgets.codeeditor import get_file_language
from spyder.widgets.status import (CursorPositionStatus, EncodingStatus,
EOLStatus, ReadWriteStatus)
from spyder.widgets.tabs import BaseTabs
from spyder.config.main import CONF
from spyder.plugins.explorer.widgets import show_in_external_file_explorer
logger = logging.getLogger(__name__)
class AnalysisThread(QThread):
"""Analysis thread"""
def __init__(self, parent, checker, source_code):
super(AnalysisThread, self).__init__(parent)
self.checker = checker
self.results = None
self.source_code = source_code
def run(self):
"""Run analysis"""
try:
self.results = self.checker(self.source_code)
except Exception as e:
logger.error(e, exc_info=True)
class ThreadManager(QObject):
"""Analysis thread manager"""
def __init__(self, parent, max_simultaneous_threads=2):
super(ThreadManager, self).__init__(parent)
self.max_simultaneous_threads = max_simultaneous_threads
self.started_threads = {}
self.pending_threads = []
self.end_callbacks = {}
def close_threads(self, parent):
"""Close threads associated to parent_id"""
logger.debug("Call ThreadManager's 'close_threads'")
if parent is None:
# Closing all threads
self.pending_threads = []
threadlist = []
for threads in list(self.started_threads.values()):
threadlist += threads
else:
parent_id = id(parent)
self.pending_threads = [(_th, _id) for (_th, _id)
in self.pending_threads
if _id != parent_id]
threadlist = self.started_threads.get(parent_id, [])
for thread in threadlist:
logger.debug("Waiting for thread %r to finish" % thread)
while thread.isRunning():
# We can't terminate thread safely, so we simply wait...
QApplication.processEvents()
def close_all_threads(self):
"""Close all threads"""
logger.debug("Call ThreadManager's 'close_all_threads'")
self.close_threads(None)
def add_thread(self, checker, end_callback, source_code, parent):
"""Add thread to queue"""
parent_id = id(parent)
thread = AnalysisThread(self, checker, source_code)
self.end_callbacks[id(thread)] = end_callback
self.pending_threads.append((thread, parent_id))
logger.debug("Added thread %r to queue" % thread)
QTimer.singleShot(50, self.update_queue)
def update_queue(self):
"""Update queue"""
started = 0
for parent_id, threadlist in list(self.started_threads.items()):
still_running = []
for thread in threadlist:
if thread.isFinished():
end_callback = self.end_callbacks.pop(id(thread))
if thread.results is not None:
# The thread was executed successfully
end_callback(thread.results)
thread.setParent(None)
thread = None
else:
still_running.append(thread)
started += 1
threadlist = None
if still_running:
self.started_threads[parent_id] = still_running
else:
self.started_threads.pop(parent_id)
logger.debug("Updating queue:")
logger.debug(" started: %d" % started)
logger.debug(" pending: %d" % len(self.pending_threads))
if self.pending_threads and started < self.max_simultaneous_threads:
thread, parent_id = self.pending_threads.pop(0)
thread.finished.connect(self.update_queue)
threadlist = self.started_threads.get(parent_id, [])
self.started_threads[parent_id] = threadlist+[thread]
logger.debug("===>starting: %r" % thread)
thread.start()
class FileInfo(QObject):
"""File properties"""
todo_results_changed = Signal()
text_changed_at = Signal(str, int)
edit_goto = Signal(str, int, str)
send_to_help = Signal(str, str, str, str, bool)
sig_filename_changed = Signal(str)
def __init__(self, filename, encoding, editor, new, threadmanager):
QObject.__init__(self)
self.threadmanager = threadmanager
self._filename = filename
self.newly_created = new
self.default = False # Default untitled file
self.encoding = encoding
self.editor = editor
self.path = []
self.classes = (filename, None, None)
self.todo_results = []
self.lastmodified = QFileInfo(filename).lastModified()
self.editor.textChanged.connect(self.text_changed)
self.sig_filename_changed.connect(self.editor.sig_filename_changed)
@property
def filename(self):
return self._filename
@filename.setter
def filename(self, value):
self._filename = value
self.sig_filename_changed.emit(value)
def text_changed(self):
"""Editor's text has changed"""
self.default = False
self.editor.document().changed_since_autosave = True
self.text_changed_at.emit(self.filename,
self.editor.get_position('cursor'))
def get_source_code(self):
"""Return associated editor source code"""
return to_text_string(self.editor.toPlainText())
def run_todo_finder(self):
"""Run TODO finder"""
if self.editor.is_python():
self.threadmanager.add_thread(codeanalysis.find_tasks,
self.todo_finished,
self.get_source_code(), self)
def todo_finished(self, results):
"""Code analysis thread has finished"""
self.set_todo_results(results)
self.todo_results_changed.emit()
def set_todo_results(self, results):
"""Set TODO results and update markers in editor"""
self.todo_results = results
self.editor.process_todo(results)
def cleanup_todo_results(self):
"""Clean-up TODO finder results"""
self.todo_results = []
class StackHistory(MutableSequence):
"""Handles editor stack history.
Works as a list of numbers corresponding to tab indexes.
Internally elements are saved using objects id's.
"""
def __init__(self, editor):
self.history = list()
self.id_list = list()
self.editor = editor
def _update_id_list(self):
"""Update list of corresponpding ids and tabs."""
self.id_list = [id(self.editor.tabs.widget(_i))
for _i in range(self.editor.tabs.count())]
def refresh(self):
"""Remove editors that are not longer open."""
self._update_id_list()
for _id in self.history[:]:
if _id not in self.id_list:
self.history.remove(_id)
def __len__(self):
return len(self.history)
def __getitem__(self, i):
self._update_id_list()
try:
return self.id_list.index(self.history[i])
except ValueError:
self.refresh()
raise IndexError
def __delitem__(self, i):
del self.history[i]
def __setitem__(self, i, v):
_id = id(self.editor.tabs.widget(v))
self.history[i] = _id
def __str__(self):
return str(list(self))
def insert(self, i, tab_index):
"""Insert the widget (at tab index) in the position i (index)."""
_id = id(self.editor.tabs.widget(tab_index))
self.history.insert(i, _id)
def remove(self, tab_index):
"""Remove the widget at the corresponding tab_index."""
_id = id(self.editor.tabs.widget(tab_index))
if _id in self.history:
self.history.remove(_id)
def remove_and_append(self, index):
"""Remove previous entrances of a tab, and add it as the latest."""
while index in self:
self.remove(index)
self.append(index)
class TabSwitcherWidget(QListWidget):
"""Show tabs in mru order and change between them."""
def __init__(self, parent, stack_history, tabs):
QListWidget.__init__(self, parent)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog)
self.editor = parent
self.stack_history = stack_history
self.tabs = tabs
self.setSelectionMode(QListWidget.SingleSelection)
self.itemActivated.connect(self.item_selected)
self.id_list = []
self.load_data()
size = CONF.get('main', 'completion/size')
self.resize(*size)
self.set_dialog_position()
self.setCurrentRow(0)
config_shortcut(lambda: self.select_row(-1), context='Editor',
name='Go to previous file', parent=self)
config_shortcut(lambda: self.select_row(1), context='Editor',
name='Go to next file', parent=self)
def load_data(self):
"""Fill ListWidget with the tabs texts.
Add elements in inverse order of stack_history.
"""
for index in reversed(self.stack_history):
text = self.tabs.tabText(index)
text = text.replace('&', '')
item = QListWidgetItem(ima.icon('TextFileIcon'), text)
self.addItem(item)
def item_selected(self, item=None):
"""Change to the selected document and hide this widget."""
if item is None:
item = self.currentItem()
# stack history is in inverse order
try:
index = self.stack_history[-(self.currentRow()+1)]
except IndexError:
pass
else:
self.editor.set_stack_index(index)
self.editor.current_changed(index)
self.hide()
def select_row(self, steps):
"""Move selected row a number of steps.
Iterates in a cyclic behaviour.
"""
row = (self.currentRow() + steps) % self.count()
self.setCurrentRow(row)
def set_dialog_position(self):
"""Positions the tab switcher in the top-center of the editor."""
left = self.editor.geometry().width()/2 - self.width()/2
top = self.editor.tabs.tabBar().geometry().height()
self.move(self.editor.mapToGlobal(QPoint(left, top)))
def keyReleaseEvent(self, event):
"""Reimplement Qt method.
Handle "most recent used" tab behavior,
When ctrl is released and tab_switcher is visible, tab will be changed.
"""
if self.isVisible():
qsc = get_shortcut(context='Editor', name='Go to next file')
for key in qsc.split('+'):
key = key.lower()
if ((key == 'ctrl' and event.key() == Qt.Key_Control) or
(key == 'alt' and event.key() == Qt.Key_Alt)):
self.item_selected()
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method to allow cyclic behavior."""
if event.key() == Qt.Key_Down:
self.select_row(1)
elif event.key() == Qt.Key_Up:
self.select_row(-1)
def focusOutEvent(self, event):
"""Reimplement Qt method to close the widget when loosing focus."""
event.ignore()
# Inspired from CompletionWidget.focusOutEvent() in file
# widgets/sourcecode/base.py line 212
if sys.platform == "darwin":
if event.reason() != Qt.ActiveWindowFocusReason:
self.close()
else:
self.close()
class EditorStack(QWidget):
reset_statusbar = Signal()
readonly_changed = Signal(bool)
encoding_changed = Signal(str)
sig_editor_cursor_position_changed = Signal(int, int)
sig_refresh_eol_chars = Signal(str)
starting_long_process = Signal(str)
ending_long_process = Signal(str)
redirect_stdio = Signal(bool)
exec_in_extconsole = Signal(str, bool)
run_cell_in_ipyclient = Signal(str, str, str, bool)
update_plugin_title = Signal()
editor_focus_changed = Signal()
zoom_in = Signal()
zoom_out = Signal()
zoom_reset = Signal()
sig_open_file = Signal(dict)
sig_close_file = Signal(str, str)
file_saved = Signal(str, str, str)
file_renamed_in_data = Signal(str, str, str)
opened_files_list_changed = Signal()
active_languages_stats = Signal(set)
todo_results_changed = Signal()
update_code_analysis_actions = Signal()
refresh_file_dependent_actions = Signal()
refresh_save_all_action = Signal()
sig_breakpoints_saved = Signal()
text_changed_at = Signal(str, int)
current_file_changed = Signal(str, int)
plugin_load = Signal((str,), ())
edit_goto = Signal(str, int, str)
sig_split_vertically = Signal()
sig_split_horizontally = Signal()
sig_new_file = Signal((str,), ())
sig_save_as = Signal()
sig_prev_edit_pos = Signal()
sig_prev_cursor = Signal()
sig_next_cursor = Signal()
sig_prev_warning = Signal()
sig_next_warning = Signal()
sig_go_to_definition = Signal(str, int, int)
perform_lsp_request = Signal(str, str, dict)
sig_option_changed = Signal(str, object) # config option needs changing
def __init__(self, parent, actions):
QWidget.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.threadmanager = ThreadManager(self)
self.new_window = False
self.horsplit_action = None
self.versplit_action = None
self.close_action = None
self.__get_split_actions()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.menu = None
self.fileswitcher_dlg = None
# self.filelist_btn = None
# self.previous_btn = None
# self.next_btn = None
self.tabs = None
self.tabs_switcher = None
self.stack_history = StackHistory(self)
self.setup_editorstack(parent, layout)
self.find_widget = None
self.data = []
fileswitcher_action = create_action(self, _("File switcher..."),
icon=ima.icon('filelist'),
triggered=self.open_fileswitcher_dlg)
symbolfinder_action = create_action(self,
_("Find symbols in file..."),
icon=ima.icon('symbol_find'),
triggered=self.open_symbolfinder_dlg)
copy_to_cb_action = create_action(self, _("Copy path to clipboard"),
icon=ima.icon('editcopy'),
triggered=lambda:
QApplication.clipboard().setText(self.get_current_filename()))
close_right = create_action(self, _("Close all to the right"),
triggered=self.close_all_right)
close_all_but_this = create_action(self, _("Close all but this"),
triggered=self.close_all_but_this)
if sys.platform == 'darwin':
text=_("Show in Finder")
else:
text= _("Show in external file explorer")
external_fileexp_action = create_action(self, text,
triggered=self.show_in_external_file_explorer)
self.menu_actions = actions + [external_fileexp_action,
None, fileswitcher_action,
symbolfinder_action,
copy_to_cb_action, None, close_right,
close_all_but_this]
self.outlineexplorer = None
self.help = None
self.unregister_callback = None
self.is_closable = False
self.new_action = None
self.open_action = None
self.save_action = None
self.revert_action = None
self.tempfile_path = None
self.title = _("Editor")
self.pyflakes_enabled = True
self.pep8_enabled = False
self.todolist_enabled = True
self.realtime_analysis_enabled = False
self.is_analysis_done = False
self.linenumbers_enabled = True
self.blanks_enabled = False
self.scrollpastend_enabled = False
self.edgeline_enabled = True
self.edgeline_columns = (79,)
self.codecompletion_auto_enabled = True
self.codecompletion_case_enabled = False
self.codecompletion_enter_enabled = False
self.calltips_enabled = True
self.go_to_definition_enabled = True
self.close_parentheses_enabled = True
self.close_quotes_enabled = True
self.add_colons_enabled = True
self.auto_unindent_enabled = True
self.indent_chars = " "*4
self.tab_stop_width_spaces = 4
self.show_class_func_dropdown = False
self.help_enabled = False
self.default_font = None
self.wrap_enabled = False
self.tabmode_enabled = False
self.intelligent_backspace_enabled = True
self.highlight_current_line_enabled = False
self.highlight_current_cell_enabled = False
self.occurrence_highlighting_enabled = True
self.occurrence_highlighting_timeout=1500
self.checkeolchars_enabled = True
self.always_remove_trailing_spaces = False
self.convert_eol_on_save = False
self.convert_eol_on_save_to = 'LF'
self.focus_to_editor = True
self.run_cell_copy = False
self.create_new_file_if_empty = True
self.indent_guides = False
ccs = 'Spyder'
if ccs not in syntaxhighlighters.COLOR_SCHEME_NAMES:
ccs = syntaxhighlighters.COLOR_SCHEME_NAMES[0]
self.color_scheme = ccs
self.__file_status_flag = False
# Real-time code analysis
self.analysis_timer = QTimer(self)
self.analysis_timer.setSingleShot(True)
self.analysis_timer.setInterval(2000)
self.analysis_timer.timeout.connect(self.analyze_script)
# Update filename label
self.editor_focus_changed.connect(self.update_fname_label)
# Accepting drops
self.setAcceptDrops(True)
# Local shortcuts
self.shortcuts = self.create_shortcuts()
#For opening last closed tabs
self.last_closed_files = []
# Reference to save msgbox and avoid memory to be freed.
self.msgbox = None
# File types and filters used by the Save As dialog
self.edit_filetypes = None
self.edit_filters = None
# For testing
self.save_dialog_on_tests = not running_under_pytest()
# Autusave component
self.autosave = AutosaveForStack(self)
@Slot()
def show_in_external_file_explorer(self, fnames=None):
"""Show file in external file explorer"""
if fnames is None:
fnames = self.get_current_filename()
show_in_external_file_explorer(fnames)
def create_shortcuts(self):
"""Create local shortcuts"""
# --- Configurable shortcuts
inspect = config_shortcut(self.inspect_current_object, context='Editor',
name='Inspect current object', parent=self)
set_breakpoint = config_shortcut(self.set_or_clear_breakpoint,
context='Editor', name='Breakpoint',
parent=self)
set_cond_breakpoint = config_shortcut(
self.set_or_edit_conditional_breakpoint,
context='Editor',
name='Conditional breakpoint',
parent=self)
gotoline = config_shortcut(self.go_to_line, context='Editor',
name='Go to line', parent=self)
tab = config_shortcut(lambda: self.tab_navigation_mru(forward=False),
context='Editor',
name='Go to previous file', parent=self)
tabshift = config_shortcut(self.tab_navigation_mru, context='Editor',
name='Go to next file', parent=self)
prevtab = config_shortcut(lambda: self.tabs.tab_navigate(-1),
context='Editor',
name='Cycle to previous file', parent=self)
nexttab = config_shortcut(lambda: self.tabs.tab_navigate(1),
context='Editor',
name='Cycle to next file', parent=self)
run_selection = config_shortcut(self.run_selection, context='Editor',
name='Run selection', parent=self)
new_file = config_shortcut(lambda : self.sig_new_file[()].emit(),
context='Editor', name='New file',
parent=self)
open_file = config_shortcut(lambda : self.plugin_load[()].emit(),
context='Editor', name='Open file',
parent=self)
save_file = config_shortcut(self.save, context='Editor',
name='Save file', parent=self)
save_all = config_shortcut(self.save_all, context='Editor',
name='Save all', parent=self)
save_as = config_shortcut(lambda : self.sig_save_as.emit(),
context='Editor', name='Save As',
parent=self)
close_all = config_shortcut(self.close_all_files, context='Editor',
name='Close all', parent=self)
prev_edit_pos = config_shortcut(lambda : self.sig_prev_edit_pos.emit(),
context="Editor",
name="Last edit location",
parent=self)
prev_cursor = config_shortcut(lambda : self.sig_prev_cursor.emit(),
context="Editor",
name="Previous cursor position",
parent=self)
next_cursor = config_shortcut(lambda : self.sig_next_cursor.emit(),
context="Editor",
name="Next cursor position",
parent=self)
zoom_in_1 = config_shortcut(lambda : self.zoom_in.emit(),
context="Editor",
name="zoom in 1",
parent=self)
zoom_in_2 = config_shortcut(lambda : self.zoom_in.emit(),
context="Editor",
name="zoom in 2",
parent=self)
zoom_out = config_shortcut(lambda : self.zoom_out.emit(),
context="Editor",
name="zoom out",
parent=self)
zoom_reset = config_shortcut(lambda: self.zoom_reset.emit(),
context="Editor",
name="zoom reset",
parent=self)
close_file_1 = config_shortcut(self.close_file,
context="Editor",
name="close file 1",
parent=self)
close_file_2 = config_shortcut(self.close_file,
context="Editor",
name="close file 2",
parent=self)
run_cell = config_shortcut(self.run_cell,
context="Editor",
name="run cell",
parent=self)
run_cell_and_advance = config_shortcut(self.run_cell_and_advance,
context="Editor",
name="run cell and advance",
parent=self)
go_to_next_cell = config_shortcut(self.advance_cell,
context="Editor",
name="go to next cell",
parent=self)
go_to_previous_cell = config_shortcut(lambda: self.advance_cell(reverse=True),
context="Editor",
name="go to previous cell",
parent=self)
re_run_last_cell = config_shortcut(self.re_run_last_cell,
context="Editor",
name="re-run last cell",
parent=self)
prev_warning = config_shortcut(lambda: self.sig_prev_warning.emit(),
context="Editor",
name="Previous warning",
parent=self)
next_warning = config_shortcut(lambda: self.sig_next_warning.emit(),
context="Editor",
name="Next warning",
parent=self)
split_vertically = config_shortcut(lambda: self.sig_split_vertically.emit(),
context="Editor",
name="split vertically",
parent=self)
split_horizontally = config_shortcut(lambda: self.sig_split_horizontally.emit(),
context="Editor",
name="split horizontally",
parent=self)
close_split = config_shortcut(self.close_split,
context="Editor",
name="close split panel",
parent=self)
# Return configurable ones
return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab,
tabshift, run_selection, new_file, open_file, save_file,
save_all, save_as, close_all, prev_edit_pos, prev_cursor,
next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset,
close_file_1, close_file_2, run_cell, run_cell_and_advance,
go_to_next_cell, go_to_previous_cell, re_run_last_cell,
prev_warning, next_warning, split_vertically,
split_horizontally, close_split, prevtab, nexttab]
def get_shortcut_data(self):
"""
Returns shortcut data, a list of tuples (shortcut, text, default)
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return [sc.data for sc in self.shortcuts]
def setup_editorstack(self, parent, layout):
"""Setup editorstack's layout"""
layout.setSpacing(1)
self.fname_label = QLabel()
self.fname_label.setStyleSheet(
"QLabel {margin: 0px; padding: 3px;}")
layout.addWidget(self.fname_label)
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
# Don't show menu arrow and remove padding
if is_dark_interface():
menu_btn.setStyleSheet(
("QToolButton::menu-indicator{image: none;}\n"
"QToolButton{margin: 1px; padding: 3px;}"))
else:
menu_btn.setStyleSheet(
"QToolButton::menu-indicator{image: none;}")
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
self.menu.aboutToShow.connect(self.__setup_menu)
corner_widgets = {Qt.TopRightCorner: [menu_btn]}
self.tabs = BaseTabs(self, menu=self.menu, menu_use_tooltips=True,
corner_widgets=corner_widgets)
self.tabs.tabBar().setObjectName('plugin-tab')
self.tabs.set_close_function(self.close_file)
self.tabs.tabBar().tabMoved.connect(self.move_editorstack_data)
self.tabs.setMovable(True)
self.stack_history.refresh()
if hasattr(self.tabs, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the editor is detached from the main window
# Fixes Issue 561
self.tabs.setDocumentMode(True)
self.tabs.currentChanged.connect(self.current_changed)
if sys.platform == 'darwin':
tab_container = QWidget()
tab_container.setObjectName('tab-container')
tab_layout = QHBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabs)
layout.addWidget(tab_container)
else:
layout.addWidget(self.tabs)
@Slot()
def update_fname_label(self):
"""Upadte file name label."""
filename = to_text_string(self.get_current_filename())
if len(filename) > 100:
shorten_filename = u'...' + filename[-100:]
else:
shorten_filename = filename
self.fname_label.setText(shorten_filename)
def add_corner_widgets_to_tabbar(self, widgets):
self.tabs.add_corner_widgets(widgets)
@Slot()
def close_split(self):
"""Closes the editorstack if it is not the last one opened."""
if self.is_closable:
self.close()
def closeEvent(self, event):
"""Overrides QWidget closeEvent()."""
self.threadmanager.close_all_threads()
self.analysis_timer.timeout.disconnect(self.analyze_script)
# Remove editor references from the outline explorer settings
if self.outlineexplorer is not None:
for finfo in self.data:
self.outlineexplorer.remove_editor(finfo.editor.oe_proxy)
QWidget.closeEvent(self, event)
def clone_editor_from(self, other_finfo, set_current):
fname = other_finfo.filename
enc = other_finfo.encoding
new = other_finfo.newly_created
finfo = self.create_new_editor(fname, enc, "",
set_current=set_current, new=new,
cloned_from=other_finfo.editor)
finfo.set_todo_results(other_finfo.todo_results)
return finfo.editor
def clone_from(self, other):
"""Clone EditorStack from other instance"""
for other_finfo in other.data:
self.clone_editor_from(other_finfo, set_current=True)
self.set_stack_index(other.get_stack_index())
@Slot()
def open_fileswitcher_dlg(self):
"""Open file list management dialog box"""
if not self.tabs.count():
return
if self.fileswitcher_dlg is not None and \
self.fileswitcher_dlg.is_visible:
self.fileswitcher_dlg.hide()
self.fileswitcher_dlg.is_visible = False
return
self.fileswitcher_dlg = FileSwitcher(self, self, self.tabs, self.data,
ima.icon('TextFileIcon'))
self.fileswitcher_dlg.sig_goto_file.connect(self.set_stack_index)
self.fileswitcher_dlg.show()
self.fileswitcher_dlg.is_visible = True
@Slot()
def open_symbolfinder_dlg(self):
self.open_fileswitcher_dlg()
self.fileswitcher_dlg.set_search_text('@')
def get_current_tab_manager(self):
"""Get the widget with the TabWidget attribute."""
return self
def go_to_line(self, line=None):
"""Go to line dialog"""
if line is not None:
# When this method is called from the flileswitcher, a line
# number is specified, so there is no need for the dialog.
self.get_current_editor().go_to_line(line)
else:
if self.data:
self.get_current_editor().exec_gotolinedialog()
def set_or_clear_breakpoint(self):
"""Set/clear breakpoint"""
if self.data:
editor = self.get_current_editor()
editor.debugger.toogle_breakpoint()
def set_or_edit_conditional_breakpoint(self):
"""Set conditional breakpoint"""
if self.data:
editor = self.get_current_editor()
editor.debugger.toogle_breakpoint(edit_condition=True)
def inspect_current_object(self):
"""Inspect current object in the Help plugin"""
editor = self.get_current_editor()
editor.sig_display_signature.connect(self.display_signature_help)
line, col = editor.get_cursor_line_column()
editor.request_hover(line, col)
def display_signature_help(self, signature):
editor = self.get_current_editor()
name = editor.get_current_word()
self.help.switch_to_editor_source()
editor.sig_display_signature.disconnect(self.display_signature_help)
self.send_to_help(name, signature, force=True)
#------ Editor Widget Settings
def set_closable(self, state):
"""Parent widget must handle the closable state"""
self.is_closable = state
def set_io_actions(self, new_action, open_action,
save_action, revert_action):
self.new_action = new_action
self.open_action = open_action
self.save_action = save_action
self.revert_action = revert_action
def set_find_widget(self, find_widget):
self.find_widget = find_widget
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
self.outlineexplorer.is_visible.connect(self._refresh_outlineexplorer)
def initialize_outlineexplorer(self):
"""This method is called separately from 'set_oulineexplorer' to avoid
doing unnecessary updates when there are multiple editor windows"""
for index in range(self.get_stack_count()):
if index != self.get_stack_index():
self._refresh_outlineexplorer(index=index)
def add_outlineexplorer_button(self, editor_plugin):
oe_btn = create_toolbutton(editor_plugin)
oe_btn.setDefaultAction(self.outlineexplorer.visibility_action)
self.add_corner_widgets_to_tabbar([5, oe_btn])
def set_help(self, help_plugin):
self.help = help_plugin
def set_tempfile_path(self, path):
self.tempfile_path = path
def set_title(self, text):
self.title = text
def set_classfunc_dropdown_visible(self, state):
self.show_class_func_dropdown = state
if self.data:
for finfo in self.data:
if finfo.editor.is_python_like():
finfo.editor.classfuncdropdown.setVisible(state)
def __update_editor_margins(self, editor):
editor.linenumberarea.setup_margins(linenumbers=self.linenumbers_enabled,
markers=self.has_markers())
def __codeanalysis_settings_changed(self, current_finfo):
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
def set_pyflakes_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'code_analysis/pyflakes')
self.pyflakes_enabled = state
self.__codeanalysis_settings_changed(current_finfo)
def set_pep8_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'code_analysis/pep8')
self.pep8_enabled = state
self.__codeanalysis_settings_changed(current_finfo)
def has_markers(self):
"""Return True if this editorstack has a marker margin for TODOs or
code analysis"""
return self.todolist_enabled or self.pyflakes_enabled\
or self.pep8_enabled
def set_todolist_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'todo_list')
self.todolist_enabled = state
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
finfo.cleanup_todo_results()
if state and current_finfo is not None:
if current_finfo is not finfo:
finfo.run_todo_finder()
def set_realtime_analysis_enabled(self, state):
self.realtime_analysis_enabled = state
def set_realtime_analysis_timeout(self, timeout):
self.analysis_timer.setInterval(timeout)
def set_linenumbers_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'line_numbers')
self.linenumbers_enabled = state
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
def set_blanks_enabled(self, state):
self.blanks_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_blanks_enabled(state)
def set_scrollpastend_enabled(self, state):
self.scrollpastend_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_scrollpastend_enabled(state)
def set_edgeline_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'edge_line')
self.edgeline_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.edge_line.set_enabled(state)
def set_edgeline_columns(self, columns):
# CONF.get(self.CONF_SECTION, 'edge_line_column')
self.edgeline_columns = columns
if self.data:
for finfo in self.data:
finfo.editor.edge_line.set_columns(columns)
def set_indent_guides(self, state):
self.indent_guides = state
if self.data:
for finfo in self.data:
finfo.editor.indent_guides.set_enabled(state)
def set_codecompletion_auto_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'codecompletion_auto')
self.codecompletion_auto_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_codecompletion_auto(state)
def set_codecompletion_case_enabled(self, state):
self.codecompletion_case_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_codecompletion_case(state)
def set_codecompletion_enter_enabled(self, state):
self.codecompletion_enter_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_codecompletion_enter(state)
def set_calltips_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'calltips')
self.calltips_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_calltips(state)
def set_go_to_definition_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'go_to_definition')
self.go_to_definition_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_go_to_definition_enabled(state)
def set_close_parentheses_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_parentheses')
self.close_parentheses_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_parentheses_enabled(state)
def set_close_quotes_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_quotes')
self.close_quotes_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_quotes_enabled(state)
def set_add_colons_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'add_colons')
self.add_colons_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_add_colons_enabled(state)
def set_auto_unindent_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'auto_unindent')
self.auto_unindent_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_auto_unindent_enabled(state)
def set_indent_chars(self, indent_chars):
# CONF.get(self.CONF_SECTION, 'indent_chars')
indent_chars = indent_chars[1:-1] # removing the leading/ending '*'
self.indent_chars = indent_chars
if self.data:
for finfo in self.data:
finfo.editor.set_indent_chars(indent_chars)
def set_tab_stop_width_spaces(self, tab_stop_width_spaces):
# CONF.get(self.CONF_SECTION, 'tab_stop_width')
self.tab_stop_width_spaces = tab_stop_width_spaces
if self.data:
for finfo in self.data:
finfo.editor.tab_stop_width_spaces = tab_stop_width_spaces
finfo.editor.update_tab_stop_width_spaces()
def set_help_enabled(self, state):
self.help_enabled = state
def set_default_font(self, font, color_scheme=None):
self.default_font = font
if color_scheme is not None:
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_font(font, color_scheme)
def set_color_scheme(self, color_scheme):
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_color_scheme(color_scheme)
def set_wrap_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'wrap')
self.wrap_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_wrap_mode(state)
def set_tabmode_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'tab_always_indent')
self.tabmode_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_tab_mode(state)
def set_intelligent_backspace_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'intelligent_backspace')
self.intelligent_backspace_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_intelligent_backspace(state)
def set_occurrence_highlighting_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'occurrence_highlighting')
self.occurrence_highlighting_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_occurrence_highlighting(state)
def set_occurrence_highlighting_timeout(self, timeout):
# CONF.get(self.CONF_SECTION, 'occurrence_highlighting/timeout')
self.occurrence_highlighting_timeout = timeout
if self.data:
for finfo in self.data:
finfo.editor.set_occurrence_timeout(timeout)
def set_highlight_current_line_enabled(self, state):
self.highlight_current_line_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_line(state)
def set_highlight_current_cell_enabled(self, state):
self.highlight_current_cell_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_cell(state)
def set_checkeolchars_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'check_eol_chars')
self.checkeolchars_enabled = state
def set_always_remove_trailing_spaces(self, state):
# CONF.get(self.CONF_SECTION, 'always_remove_trailing_spaces')
self.always_remove_trailing_spaces = state
def set_convert_eol_on_save(self, state):
"""If `state` is `True`, saving files will convert line endings."""
# CONF.get(self.CONF_SECTION, 'convert_eol_on_save')
self.convert_eol_on_save = state
def set_convert_eol_on_save_to(self, state):
"""`state` can be one of ('LF', 'CRLF', 'CR')"""
# CONF.get(self.CONF_SECTION, 'convert_eol_on_save_to')
self.convert_eol_on_save_to = state
def set_focus_to_editor(self, state):
self.focus_to_editor = state
def set_run_cell_copy(self, state):
"""If `state` is ``True``, code cells will be copied to the console."""
self.run_cell_copy = state
#------ Stacked widget management
def get_stack_index(self):
return self.tabs.currentIndex()
def get_current_finfo(self):
if self.data:
return self.data[self.get_stack_index()]
def get_current_editor(self):
return self.tabs.currentWidget()
def get_stack_count(self):
return self.tabs.count()
def set_stack_index(self, index, instance=None):
if instance == self or instance == None:
self.tabs.setCurrentIndex(index)
def set_tabbar_visible(self, state):
self.tabs.tabBar().setVisible(state)
def remove_from_data(self, index):
self.tabs.blockSignals(True)
self.tabs.removeTab(index)
self.data.pop(index)
self.tabs.blockSignals(False)
self.update_actions()
def __modified_readonly_title(self, title, is_modified, is_readonly):
if is_modified is not None and is_modified:
title += "*"
if is_readonly is not None and is_readonly:
title = "(%s)" % title
return title
def get_tab_text(self, index, is_modified=None, is_readonly=None):
"""Return tab title."""
files_path_list = [finfo.filename for finfo in self.data]
fname = self.data[index].filename
fname = sourcecode.disambiguate_fname(files_path_list, fname)
return self.__modified_readonly_title(fname,
is_modified, is_readonly)
def get_tab_tip(self, filename, is_modified=None, is_readonly=None):
"""Return tab menu title"""
text = u"%s — %s"
text = self.__modified_readonly_title(text,
is_modified, is_readonly)
if self.tempfile_path is not None\
and filename == encoding.to_unicode_from_fs(self.tempfile_path):
temp_file_str = to_text_string(_("Temporary file"))
return text % (temp_file_str, self.tempfile_path)
else:
return text % (osp.basename(filename), osp.dirname(filename))
def add_to_data(self, finfo, set_current, add_where='end'):
finfo.editor.oe_proxy = None
index = 0 if add_where == 'start' else len(self.data)
self.data.insert(index, finfo)
index = self.data.index(finfo)
editor = finfo.editor
self.tabs.insertTab(index, editor, self.get_tab_text(index))
self.set_stack_title(index, False)
if set_current:
self.set_stack_index(index)
self.current_changed(index)
self.update_actions()
def __repopulate_stack(self):
self.tabs.blockSignals(True)
self.tabs.clear()
for finfo in self.data:
if finfo.newly_created:
is_modified = True
else:
is_modified = None
index = self.data.index(finfo)
tab_text = self.get_tab_text(index, is_modified)
tab_tip = self.get_tab_tip(finfo.filename)
index = self.tabs.addTab(finfo.editor, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
self.tabs.blockSignals(False)
def rename_in_data(self, original_filename, new_filename):
index = self.has_filename(original_filename)
if index is None:
return
finfo = self.data[index]
if osp.splitext(finfo.filename)[1] != osp.splitext(new_filename)[1]:
# File type has changed!
txt = to_text_string(finfo.editor.get_text_with_eol())
language = get_file_language(new_filename, txt)
finfo.editor.set_language(language)
set_new_index = index == self.get_stack_index()
current_fname = self.get_current_filename()
finfo.filename = new_filename
new_index = self.data.index(finfo)
self.__repopulate_stack()
if set_new_index:
self.set_stack_index(new_index)
else:
# Fixes Issue 1287
self.set_current_filename(current_fname)
if self.outlineexplorer is not None:
self.outlineexplorer.file_renamed(
finfo.editor.oe_proxy, finfo.filename)
return new_index
def set_stack_title(self, index, is_modified):
finfo = self.data[index]
fname = finfo.filename
is_modified = (is_modified or finfo.newly_created) and not finfo.default
is_readonly = finfo.editor.isReadOnly()
tab_text = self.get_tab_text(index, is_modified, is_readonly)
tab_tip = self.get_tab_tip(fname, is_modified, is_readonly)
# Only update tab text if have changed, otherwise an unwanted scrolling
# will happen when changing tabs. See Issue #1170.
if tab_text != self.tabs.tabText(index):
self.tabs.setTabText(index, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
#------ Context menu
def __setup_menu(self):
"""Setup tab context menu before showing it"""
self.menu.clear()
if self.data:
actions = self.menu_actions
else:
actions = (self.new_action, self.open_action)
self.setFocus() # --> Editor.__get_focus_editortabwidget
add_actions(self.menu, list(actions) + self.__get_split_actions())
self.close_action.setEnabled(self.is_closable)
#------ Hor/Ver splitting
def __get_split_actions(self):
if self.parent() is not None:
plugin = self.parent().plugin
else:
plugin = None
# New window
if plugin is not None:
self.new_window_action = create_action(
self, _("New window"),
icon=ima.icon('newwindow'),
tip=_("Create a new editor window"),
triggered=plugin.create_new_window)
# Splitting
self.versplit_action = create_action(self, _("Split vertically"),
icon=ima.icon('versplit'),
tip=_("Split vertically this editor window"),
triggered=lambda: self.sig_split_vertically.emit(),
shortcut=get_shortcut(context='Editor', name='split vertically'),
context=Qt.WidgetShortcut)
self.horsplit_action = create_action(self, _("Split horizontally"),
icon=ima.icon('horsplit'),
tip=_("Split horizontally this editor window"),
triggered=lambda: self.sig_split_horizontally.emit(),
shortcut=get_shortcut(context='Editor', name='split horizontally'),
context=Qt.WidgetShortcut)
self.close_action = create_action(self, _("Close this panel"),
icon=ima.icon('close_panel'),
triggered=self.close_split,
shortcut=get_shortcut(context='Editor', name='close split panel'),
context=Qt.WidgetShortcut)
# Regular actions
actions = [MENU_SEPARATOR, self.versplit_action,
self.horsplit_action, self.close_action]
if self.new_window:
window = self.window()
close_window_action = create_action(
self, _("Close window"),
icon=ima.icon('close_pane'),
triggered=window.close)
actions += [MENU_SEPARATOR, self.new_window_action,
close_window_action]
elif plugin is not None:
if plugin.undocked_window is not None:
actions += [MENU_SEPARATOR, plugin.dock_action]
else:
actions += [MENU_SEPARATOR, self.new_window_action,
plugin.undock_action, plugin.close_plugin_action]
return actions
def reset_orientation(self):
self.horsplit_action.setEnabled(True)
self.versplit_action.setEnabled(True)
def set_orientation(self, orientation):
self.horsplit_action.setEnabled(orientation == Qt.Horizontal)
self.versplit_action.setEnabled(orientation == Qt.Vertical)
def update_actions(self):
state = self.get_stack_count() > 0
self.horsplit_action.setEnabled(state)
self.versplit_action.setEnabled(state)
# ------ Accessors
def get_current_filename(self):
if self.data:
return self.data[self.get_stack_index()].filename
def get_filenames(self):
"""
Return a list with the names of all the files currently opened in
the editorstack.
"""
return [finfo.filename for finfo in self.data]
def has_filename(self, filename):
"""Return the self.data index position for the filename.
Args:
filename: Name of the file to search for in self.data.
Returns:
The self.data index for the filename. Returns None
if the filename is not found in self.data.
"""
fixpath = lambda path: osp.normcase(osp.realpath(path))
for index, finfo in enumerate(self.data):
if fixpath(filename) == fixpath(finfo.filename):
return index
return None
def set_current_filename(self, filename, focus=True):
"""Set current filename and return the associated editor instance."""
index = self.has_filename(filename)
if index is not None:
if focus:
self.set_stack_index(index)
editor = self.data[index].editor
if focus:
editor.setFocus()
else:
self.stack_history.remove_and_append(index)
return editor
def is_file_opened(self, filename=None):
"""Return if filename is in the editor stack.
Args:
filename: Name of the file to search for. If filename is None,
then checks if any file is open.
Returns:
True: If filename is None and a file is open.
False: If filename is None and no files are open.
None: If filename is not None and the file isn't found.
integer: Index of file name in editor stack.
"""
if filename is None:
# Is there any file opened?
return len(self.data) > 0
else:
return self.has_filename(filename)
def get_index_from_filename(self, filename):
"""
Return the position index of a file in the tab bar of the editorstack
from its name.
"""
filenames = [d.filename for d in self.data]
return filenames.index(filename)
@Slot(int, int)
def move_editorstack_data(self, start, end):
"""Reorder editorstack.data so it is synchronized with the tab bar when
tabs are moved."""
if start < 0 or end < 0:
return
else:
steps = abs(end - start)
direction = (end-start) // steps # +1 for right, -1 for left
data = self.data
self.blockSignals(True)
for i in range(start, end, direction):
data[i], data[i+direction] = data[i+direction], data[i]
self.blockSignals(False)
self.refresh()
#------ Close file, tabwidget...
def close_file(self, index=None, force=False):
"""Close file (index=None -> close current file)
Keep current file index unchanged (if current file
that is being closed)"""
current_index = self.get_stack_index()
count = self.get_stack_count()
if index is None:
if count > 0:
index = current_index
else:
self.find_widget.set_editor(None)
return
new_index = None
if count > 1:
if current_index == index:
new_index = self._get_previous_file_index()
else:
new_index = current_index
is_ok = force or self.save_if_changed(cancelable=True, index=index)
if is_ok:
finfo = self.data[index]
self.threadmanager.close_threads(finfo)
# Removing editor reference from outline explorer settings:
if self.outlineexplorer is not None:
self.outlineexplorer.remove_editor(finfo.editor.oe_proxy)
filename = self.data[index].filename
self.remove_from_data(index)
finfo.editor.notify_close()
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms
# (see Issue 1094, Issue 1098)
self.sig_close_file.emit(str(id(self)), filename)
self.opened_files_list_changed.emit()
self.update_code_analysis_actions.emit()
self._refresh_outlineexplorer()
self.refresh_file_dependent_actions.emit()
self.update_plugin_title.emit()
editor = self.get_current_editor()
if editor:
editor.setFocus()
if new_index is not None:
if index < new_index:
new_index -= 1
self.set_stack_index(new_index)
self.add_last_closed_file(finfo.filename)
if self.get_stack_count() == 0 and self.create_new_file_if_empty:
self.sig_new_file[()].emit()
return False
self.__modify_stack_title()
return is_ok
def poll_open_file_languages(self):
"""Get list of current opened files' languages"""
languages = []
for index in range(self.get_stack_count()):
languages.append(
self.tabs.widget(index).language.lower())
return set(languages)
def notify_server_ready(self, language, config):
"""Notify language server availability to code editors."""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.start_lsp_services(config)
def close_all_files(self):
"""Close all opened scripts"""
while self.close_file():
pass
def close_all_right(self):
""" Close all files opened to the right """
num = self.get_stack_index()
n = self.get_stack_count()
for i in range(num, n-1):
self.close_file(num+1)
def close_all_but_this(self):
"""Close all files but the current one"""
self.close_all_right()
for i in range(0, self.get_stack_count()-1 ):
self.close_file(0)
def add_last_closed_file(self, fname):
"""Add to last closed file list."""
if fname in self.last_closed_files:
self.last_closed_files.remove(fname)
self.last_closed_files.insert(0, fname)
if len(self.last_closed_files) > 10:
self.last_closed_files.pop(-1)
def get_last_closed_files(self):
return self.last_closed_files
def set_last_closed_files(self, fnames):
self.last_closed_files = fnames
#------ Save
def save_if_changed(self, cancelable=False, index=None):
"""Ask user to save file if modified.
Args:
cancelable: Show Cancel button.
index: File to check for modification.
Returns:
False when save() fails or is cancelled.
True when save() is successful, there are no modifications,
or user selects No or NoToAll.
This function controls the message box prompt for saving
changed files. The actual save is performed in save() for
each index processed. This function also removes autosave files
corresponding to files the user chooses not to save.
"""
if index is None:
indexes = list(range(self.get_stack_count()))
else:
indexes = [index]
buttons = QMessageBox.Yes | QMessageBox.No
if cancelable:
buttons |= QMessageBox.Cancel
unsaved_nb = 0
for index in indexes:
if self.data[index].editor.document().isModified():
unsaved_nb += 1
if not unsaved_nb:
# No file to save
return True
if unsaved_nb > 1:
buttons |= QMessageBox.YesToAll | QMessageBox.NoToAll
yes_all = no_all = False
for index in indexes:
self.set_stack_index(index)
finfo = self.data[index]
if finfo.filename == self.tempfile_path or yes_all:
if not self.save(index):
return False
elif no_all:
self.autosave.remove_autosave_file(finfo)
elif (finfo.editor.document().isModified() and
self.save_dialog_on_tests):
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified."
"<br>Do you want to save changes?"
) % osp.basename(finfo.filename),
buttons,
parent=self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
if not self.save(index):
return False
elif answer == QMessageBox.No:
self.autosave.remove_autosave_file(finfo)
elif answer == QMessageBox.YesToAll:
if not self.save(index):
return False
yes_all = True
elif answer == QMessageBox.NoToAll:
self.autosave.remove_autosave_file(finfo)
no_all = True
elif answer == QMessageBox.Cancel:
return False
return True
def _write_to_file(self, fileinfo, filename):
"""Low-level function for writing text of editor to file.
Args:
fileinfo: FileInfo object associated to editor to be saved
filename: str with filename to save to
This is a low-level function that only saves the text to file in the
correct encoding without doing any error handling.
"""
txt = to_text_string(fileinfo.editor.get_text_with_eol())
fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding)
def save(self, index=None, force=False):
"""Write text of editor to a file.
Args:
index: self.data index to save. If None, defaults to
currentIndex().
force: Force save regardless of file state.
Returns:
True upon successful save or when file doesn't need to be saved.
False if save failed.
If the text isn't modified and it's not newly created, then the save
is aborted. If the file hasn't been saved before, then save_as()
is invoked. Otherwise, the file is written using the file name
currently in self.data. This function doesn't change the file name.
"""
if index is None:
# Save the currently edited file
if not self.get_stack_count():
return
index = self.get_stack_index()
finfo = self.data[index]
if not (finfo.editor.document().isModified() or
finfo.newly_created) and not force:
return True
if not osp.isfile(finfo.filename) and not force:
# File has not been saved yet
return self.save_as(index=index)
if self.always_remove_trailing_spaces:
self.remove_trailing_spaces(index)
if self.convert_eol_on_save:
# hack to account for the fact that the config file saves
# CR/LF/CRLF while set_os_eol_chars wants the os.name value.
osname_lookup = {'LF': 'posix', 'CRLF': 'nt', 'CR': 'mac'}
osname = osname_lookup[self.convert_eol_on_save_to]
self.set_os_eol_chars(osname=osname)
try:
self._write_to_file(finfo, finfo.filename)
self.autosave.remove_autosave_file(finfo)
finfo.newly_created = False
self.encoding_changed.emit(finfo.encoding)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms
# (see Issue 1094, Issue 1098)
# The filename is passed instead of an index in case the tabs
# have been rearranged (see issue 5703).
self.file_saved.emit(str(id(self)),
finfo.filename, finfo.filename)
finfo.editor.document().setModified(False)
self.modification_changed(index=index)
self.analyze_script(index)
#XXX CodeEditor-only: re-scan the whole text to rebuild outline
# explorer data from scratch (could be optimized because
# rehighlighting text means searching for all syntax coloring
# patterns instead of only searching for class/def patterns which
# would be sufficient for outline explorer data.
finfo.editor.rehighlight()
# rehighlight() calls textChanged(), so the change_since_autosave
# flag should be cleared after rehighlight()
finfo.editor.document().changed_since_autosave = False
self._refresh_outlineexplorer(index)
finfo.editor.notify_save()
return True
except EnvironmentError as error:
self.msgbox = QMessageBox(
QMessageBox.Critical,
_("Save Error"),
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(finfo.filename),
str(error)),
parent=self)
self.msgbox.exec_()
return False
def file_saved_in_other_editorstack(self, original_filename, filename):
"""
File was just saved in another editorstack, let's synchronize!
This avoids file being automatically reloaded.
The original filename is passed instead of an index in case the tabs
on the editor stacks were moved and are now in a different order - see
issue 5703.
Filename is passed in case file was just saved as another name.
"""
index = self.has_filename(original_filename)
if index is None:
return
finfo = self.data[index]
finfo.newly_created = False
finfo.filename = to_text_string(filename)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
def select_savename(self, original_filename):
"""Select a name to save a file.
Args:
original_filename: Used in the dialog to display the current file
path and name.
Returns:
Normalized path for the selected file name or None if no name was
selected.
"""
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
# Don't use filters on KDE to not make the dialog incredible
# slow
# Fixes issue 4156
if is_kde_desktop() and not is_anaconda():
filters = ''
selectedfilter = ''
else:
filters = self.edit_filters
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(original_filename)[1])
self.redirect_stdio.emit(False)
filename, _selfilter = getsavefilename(self, _("Save file"),
original_filename,
filters=filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails)
self.redirect_stdio.emit(True)
if filename:
return osp.normpath(filename)
return None
def save_as(self, index=None):
"""Save file as...
Args:
index: self.data index for the file to save.
Returns:
False if no file name was selected or if save() was unsuccessful.
True is save() was successful.
Gets the new file name from select_savename(). If no name is chosen,
then the save_as() aborts. Otherwise, the current stack is checked
to see if the selected name already exists and, if so, then the tab
with that name is closed.
The current stack (self.data) and current tabs are updated with the
new name and other file info. The text is written with the new
name using save() and the name change is propagated to the other stacks
via the file_renamed_in_data signal.
"""
if index is None:
# Save the currently edited file
index = self.get_stack_index()
finfo = self.data[index]
# The next line is necessary to avoid checking if the file exists
# While running __check_file_status
# See issues 3678 and 3026
finfo.newly_created = True
original_filename = finfo.filename
filename = self.select_savename(original_filename)
if filename:
ao_index = self.has_filename(filename)
# Note: ao_index == index --> saving an untitled file
if ao_index is not None and ao_index != index:
if not self.close_file(ao_index):
return
if ao_index < index:
index -= 1
new_index = self.rename_in_data(original_filename,
new_filename=filename)
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms
# (see Issue 1094, Issue 1098)
self.file_renamed_in_data.emit(str(id(self)),
original_filename, filename)
ok = self.save(index=new_index, force=True)
self.refresh(new_index)
self.set_stack_index(new_index)
return ok
else:
return False
def save_copy_as(self, index=None):
"""Save copy of file as...
Args:
index: self.data index for the file to save.
Returns:
False if no file name was selected or if save() was unsuccessful.
True is save() was successful.
Gets the new file name from select_savename(). If no name is chosen,
then the save_copy_as() aborts. Otherwise, the current stack is
checked to see if the selected name already exists and, if so, then the
tab with that name is closed.
Unlike save_as(), this calls write() directly instead of using save().
The current file and tab aren't changed at all. The copied file is
opened in a new tab.
"""
if index is None:
# Save the currently edited file
index = self.get_stack_index()
finfo = self.data[index]
original_filename = finfo.filename
filename = self.select_savename(original_filename)
if filename:
ao_index = self.has_filename(filename)
# Note: ao_index == index --> saving an untitled file
if ao_index is not None and ao_index != index:
if not self.close_file(ao_index):
return
if ao_index < index:
index -= 1
try:
self._write_to_file(finfo, filename)
# open created copy file
self.plugin_load.emit(filename)
return True
except EnvironmentError as error:
self.msgbox = QMessageBox(
QMessageBox.Critical,
_("Save Error"),
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(finfo.filename),
str(error)),
parent=self)
self.msgbox.exec_()
else:
return False
def save_all(self):
"""Save all opened files.
Iterate through self.data and call save() on any modified files.
"""
for index in range(self.get_stack_count()):
if self.data[index].editor.document().isModified():
self.save(index)
#------ Update UI
def start_stop_analysis_timer(self):
self.is_analysis_done = False
if self.realtime_analysis_enabled:
self.analysis_timer.stop()
self.analysis_timer.start()
def analyze_script(self, index=None):
"""Analyze current script with todos"""
if self.is_analysis_done:
return
if index is None:
index = self.get_stack_index()
if self.data:
finfo = self.data[index]
if self.todolist_enabled:
finfo.run_todo_finder()
self.is_analysis_done = True
def set_todo_results(self, filename, todo_results):
"""Synchronize todo results between editorstacks"""
index = self.has_filename(filename)
if index is None:
return
self.data[index].set_todo_results(todo_results)
def get_todo_results(self):
if self.data:
return self.data[self.get_stack_index()].todo_results
def current_changed(self, index):
"""Stack index has changed"""
# count = self.get_stack_count()
# for btn in (self.filelist_btn, self.previous_btn, self.next_btn):
# btn.setEnabled(count > 1)
editor = self.get_current_editor()
if editor.lsp_ready and not editor.document_opened:
editor.document_did_open()
if index != -1:
editor.setFocus()
logger.debug("Set focus to: %s" % editor.filename)
else:
self.reset_statusbar.emit()
self.opened_files_list_changed.emit()
self.stack_history.refresh()
self.stack_history.remove_and_append(index)
# Needed to avoid an error generated after moving/renaming
# files outside Spyder while in debug mode.
# See issue 8749.
try:
logger.debug("Current changed: %d - %s" %
(index, self.data[index].editor.filename))
except IndexError:
pass
self.update_plugin_title.emit()
if editor is not None:
# Needed in order to handle the close of files open in a directory
# that has been renamed. See issue 5157
try:
self.current_file_changed.emit(self.data[index].filename,
editor.get_position('cursor'))
except IndexError:
pass
def _get_previous_file_index(self):
"""Return the penultimate element of the stack history."""
try:
return self.stack_history[-2]
except IndexError:
return None
def tab_navigation_mru(self, forward=True):
"""
Tab navigation with "most recently used" behaviour.
It's fired when pressing 'go to previous file' or 'go to next file'
shortcuts.
forward:
True: move to next file
False: move to previous file
"""
self.tabs_switcher = TabSwitcherWidget(self, self.stack_history,
self.tabs)
self.tabs_switcher.show()
self.tabs_switcher.select_row(1 if forward else -1)
self.tabs_switcher.setFocus()
def focus_changed(self):
"""Editor focus has changed"""
fwidget = QApplication.focusWidget()
for finfo in self.data:
if fwidget is finfo.editor:
self.refresh()
self.editor_focus_changed.emit()
def _refresh_outlineexplorer(self, index=None, update=True, clear=False):
"""Refresh outline explorer panel"""
oe = self.outlineexplorer
if oe is None:
return
if index is None:
index = self.get_stack_index()
if self.data:
finfo = self.data[index]
oe.setEnabled(True)
if finfo.editor.oe_proxy is None:
finfo.editor.oe_proxy = OutlineExplorerProxyEditor(
finfo.editor, finfo.filename)
oe.set_current_editor(finfo.editor.oe_proxy,
update=update, clear=clear)
if index != self.get_stack_index():
# The last file added to the outline explorer is not the
# currently focused one in the editor stack. Therefore,
# we need to force a refresh of the outline explorer to set
# the current editor to the currently focused one in the
# editor stack. See PR #8015.
self._refresh_outlineexplorer(update=False)
return
self._sync_outlineexplorer_file_order()
def _sync_outlineexplorer_file_order(self):
"""
Order the root file items of the outline explorer as in the tabbar
of the current EditorStack.
"""
if self.outlineexplorer is not None:
self.outlineexplorer.treewidget.set_editor_ids_order(
[finfo.editor.get_document_id() for finfo in self.data])
def __refresh_statusbar(self, index):
"""Refreshing statusbar widgets"""
finfo = self.data[index]
self.encoding_changed.emit(finfo.encoding)
# Refresh cursor position status:
line, index = finfo.editor.get_cursor_line_column()
self.sig_editor_cursor_position_changed.emit(line, index)
def __refresh_readonly(self, index):
finfo = self.data[index]
read_only = not QFileInfo(finfo.filename).isWritable()
if not osp.isfile(finfo.filename):
# This is an 'untitledX.py' file (newly created)
read_only = False
finfo.editor.setReadOnly(read_only)
self.readonly_changed.emit(read_only)
def __check_file_status(self, index):
"""Check if file has been changed in any way outside Spyder:
1. removed, moved or renamed outside Spyder
2. modified outside Spyder"""
if self.__file_status_flag:
# Avoid infinite loop: when the QMessageBox.question pops, it
# gets focus and then give it back to the CodeEditor instance,
# triggering a refresh cycle which calls this method
return
self.__file_status_flag = True
finfo = self.data[index]
name = osp.basename(finfo.filename)
if finfo.newly_created:
# File was just created (not yet saved): do nothing
# (do not return because of the clean-up at the end of the method)
pass
elif not osp.isfile(finfo.filename):
# File doesn't exist (removed, moved or offline):
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> is unavailable "
"(this file may have been removed, moved "
"or renamed outside Spyder)."
"<br>Do you want to close it?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.close_file(index)
else:
finfo.newly_created = True
finfo.editor.document().setModified(True)
self.modification_changed(index=index)
else:
# Else, testing if it has been modified elsewhere:
lastm = QFileInfo(finfo.filename).lastModified()
if to_text_string(lastm.toString()) \
!= to_text_string(finfo.lastmodified.toString()):
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified outside Spyder."
"<br>Do you want to reload it and lose all "
"your changes?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.reload(index)
else:
finfo.lastmodified = lastm
else:
self.reload(index)
# Finally, resetting temporary flag:
self.__file_status_flag = False
def __modify_stack_title(self):
for index, finfo in enumerate(self.data):
state = finfo.editor.document().isModified()
self.set_stack_title(index, state)
def refresh(self, index=None):
"""Refresh tabwidget"""
if index is None:
index = self.get_stack_index()
# Set current editor
if self.get_stack_count():
index = self.get_stack_index()
finfo = self.data[index]
editor = finfo.editor
editor.setFocus()
self._refresh_outlineexplorer(index, update=False)
self.update_code_analysis_actions.emit()
self.__refresh_statusbar(index)
self.__refresh_readonly(index)
self.__check_file_status(index)
self.__modify_stack_title()
self.update_plugin_title.emit()
else:
editor = None
# Update the modification-state-dependent parameters
self.modification_changed()
# Update FindReplace binding
self.find_widget.set_editor(editor, refresh=False)
def modification_changed(self, state=None, index=None, editor_id=None):
"""
Current editor's modification state has changed
--> change tab title depending on new modification state
--> enable/disable save/save all actions
"""
if editor_id is not None:
for index, _finfo in enumerate(self.data):
if id(_finfo.editor) == editor_id:
break
# This must be done before refreshing save/save all actions:
# (otherwise Save/Save all actions will always be enabled)
self.opened_files_list_changed.emit()
# --
if index is None:
index = self.get_stack_index()
if index == -1:
return
finfo = self.data[index]
if state is None:
state = finfo.editor.document().isModified() or finfo.newly_created
self.set_stack_title(index, state)
# Toggle save/save all actions state
self.save_action.setEnabled(state)
self.refresh_save_all_action.emit()
# Refreshing eol mode
eol_chars = finfo.editor.get_line_separator()
self.refresh_eol_chars(eol_chars)
self.stack_history.refresh()
def refresh_eol_chars(self, eol_chars):
os_name = sourcecode.get_os_name_from_eol_chars(eol_chars)
self.sig_refresh_eol_chars.emit(os_name)
#------ Load, reload
def reload(self, index):
"""Reload file from disk"""
finfo = self.data[index]
txt, finfo.encoding = encoding.read(finfo.filename)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
position = finfo.editor.get_position('cursor')
finfo.editor.set_text(txt)
finfo.editor.document().setModified(False)
finfo.editor.set_cursor_position(position)
#XXX CodeEditor-only: re-scan the whole text to rebuild outline
# explorer data from scratch (could be optimized because
# rehighlighting text means searching for all syntax coloring
# patterns instead of only searching for class/def patterns which
# would be sufficient for outline explorer data.
finfo.editor.rehighlight()
# rehighlight() calls textChanged(), so the change_since_autosave
# flag should be cleared after rehighlight()
finfo.editor.document().changed_since_autosave = False
self._refresh_outlineexplorer(index)
def revert(self):
"""Revert file from disk"""
index = self.get_stack_index()
finfo = self.data[index]
filename = finfo.filename
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("All changes to <b>%s</b> will be lost."
"<br>Do you want to revert file from disk?"
) % osp.basename(filename),
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer != QMessageBox.Yes:
return
self.reload(index)
def create_new_editor(self, fname, enc, txt, set_current, new=False,
cloned_from=None, add_where='end'):
"""
Create a new editor instance
Returns finfo object (instead of editor as in previous releases)
"""
editor = codeeditor.CodeEditor(self)
editor.go_to_definition.connect(
lambda fname, line, column: self.sig_go_to_definition.emit(
fname, line, column))
finfo = FileInfo(fname, enc, editor, new, self.threadmanager)
self.add_to_data(finfo, set_current, add_where)
finfo.send_to_help.connect(self.send_to_help)
finfo.todo_results_changed.connect(
lambda: self.todo_results_changed.emit())
finfo.edit_goto.connect(lambda fname, lineno, name:
self.edit_goto.emit(fname, lineno, name))
editor.sig_run_selection.connect(self.run_selection)
editor.sig_run_cell.connect(self.run_cell)
editor.sig_run_cell_and_advance.connect(self.run_cell_and_advance)
editor.sig_re_run_last_cell.connect(self.re_run_last_cell)
editor.sig_new_file.connect(self.sig_new_file.emit)
editor.sig_breakpoints_saved.connect(self.sig_breakpoints_saved)
language = get_file_language(fname, txt)
editor.setup_editor(
linenumbers=self.linenumbers_enabled,
show_blanks=self.blanks_enabled,
scroll_past_end=self.scrollpastend_enabled,
edge_line=self.edgeline_enabled,
edge_line_columns=self.edgeline_columns, language=language,
markers=self.has_markers(), font=self.default_font,
color_scheme=self.color_scheme,
wrap=self.wrap_enabled, tab_mode=self.tabmode_enabled,
intelligent_backspace=self.intelligent_backspace_enabled,
highlight_current_line=self.highlight_current_line_enabled,
highlight_current_cell=self.highlight_current_cell_enabled,
occurrence_highlighting=self.occurrence_highlighting_enabled,
occurrence_timeout=self.occurrence_highlighting_timeout,
codecompletion_auto=self.codecompletion_auto_enabled,
codecompletion_case=self.codecompletion_case_enabled,
codecompletion_enter=self.codecompletion_enter_enabled,
calltips=self.calltips_enabled,
go_to_definition=self.go_to_definition_enabled,
close_parentheses=self.close_parentheses_enabled,
close_quotes=self.close_quotes_enabled,
add_colons=self.add_colons_enabled,
auto_unindent=self.auto_unindent_enabled,
indent_chars=self.indent_chars,
tab_stop_width_spaces=self.tab_stop_width_spaces,
cloned_from=cloned_from,
filename=fname,
show_class_func_dropdown=self.show_class_func_dropdown,
indent_guides=self.indent_guides)
if cloned_from is None:
editor.set_text(txt)
editor.document().setModified(False)
editor.document().changed_since_autosave = False
finfo.text_changed_at.connect(
lambda fname, position:
self.text_changed_at.emit(fname, position))
editor.sig_cursor_position_changed.connect(
self.editor_cursor_position_changed)
editor.textChanged.connect(self.start_stop_analysis_timer)
editor.sig_perform_lsp_request.connect(
lambda lang, method, params: self.perform_lsp_request.emit(
lang, method, params))
editor.modificationChanged.connect(
lambda state: self.modification_changed(state,
editor_id=id(editor)))
editor.focus_in.connect(self.focus_changed)
editor.zoom_in.connect(lambda: self.zoom_in.emit())
editor.zoom_out.connect(lambda: self.zoom_out.emit())
editor.zoom_reset.connect(lambda: self.zoom_reset.emit())
editor.sig_eol_chars_changed.connect(lambda eol_chars: self.refresh_eol_chars(eol_chars))
self.find_widget.set_editor(editor)
self.refresh_file_dependent_actions.emit()
self.modification_changed(index=self.data.index(finfo))
# Needs to reset the highlighting on startup in case the PygmentsSH
# is in use
editor.run_pygments_highlighter()
if cloned_from is None:
options = {
'language': editor.language,
'filename': editor.filename,
'codeeditor': editor
}
self.sig_open_file.emit(options)
if self.get_stack_index() == 0:
self.current_changed(0)
return finfo
def editor_cursor_position_changed(self, line, index):
"""Cursor position of one of the editor in the stack has changed"""
self.sig_editor_cursor_position_changed.emit(line, index)
def send_to_help(self, name, signature, force=False):
"""qstr1: obj_text, qstr2: argpspec, qstr3: note, qstr4: doc_text"""
if not force and not self.help_enabled:
return
if self.help is not None \
and (force or self.help.dockwidget.isVisible()):
signature = to_text_string(signature)
signature = unicodedata.normalize("NFKD", signature)
parts = signature.split('\n\n')
definition = parts[0]
documentation = '\n\n'.join(parts[1:])
args = ''
if '(' in definition:
args = definition[definition.find('('):]
doc = {'obj_text': '', 'name': name,
'argspec': args, 'note': '',
'docstring': documentation}
self.help.set_editor_doc(doc, force_refresh=force)
editor = self.get_current_editor()
editor.setFocus()
def new(self, filename, encoding, text, default_content=False,
empty=False):
"""
Create new filename with *encoding* and *text*
"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
if not empty:
finfo.editor.insert_text(os.linesep)
if default_content:
finfo.default = True
finfo.editor.document().setModified(False)
return finfo
def load(self, filename, set_current=True, add_where='end'):
"""
Load filename, create an editor instance and return it
*Warning* This is loading file, creating editor but not executing
the source code analysis -- the analysis must be done by the editor
plugin (in case multiple editorstack instances are handled)
"""
filename = osp.abspath(to_text_string(filename))
self.starting_long_process.emit(_("Loading %s...") % filename)
text, enc = encoding.read(filename)
finfo = self.create_new_editor(filename, enc, text, set_current,
add_where=add_where)
index = self.data.index(finfo)
self._refresh_outlineexplorer(index, update=True)
self.ending_long_process.emit("")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok,
self)
self.msgbox.exec_()
self.set_os_eol_chars(index)
self.is_analysis_done = False
return finfo
def set_os_eol_chars(self, index=None, osname=None):
"""Sets the EOL character(s) based on the operating system.
If `osname` is None, then the default line endings for the current
operating system (`os.name` value) will be used.
`osname` can be one of:
('posix', 'nt', 'java')
"""
if osname is None:
osname = os.name
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
eol_chars = sourcecode.get_eol_chars_from_os_name(osname)
finfo.editor.set_eol_chars(eol_chars)
finfo.editor.document().setModified(True)
def remove_trailing_spaces(self, index=None):
"""Remove trailing spaces"""
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.remove_trailing_spaces()
def fix_indentation(self, index=None):
"""Replace tab characters by spaces"""
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.fix_indentation()
#------ Run
def run_selection(self):
"""
Run selected text or current line in console.
If some text is selected, then execute that text in console.
If no text is selected, then execute current line, unless current line
is empty. Then, advance cursor to next line. If cursor is on last line
and that line is not empty, then add a new blank line and move the
cursor there. If cursor is on last line and that line is empty, then do
not move cursor.
"""
text = self.get_current_editor().get_selection_as_executable_code()
if text:
self.exec_in_extconsole.emit(text.rstrip(), self.focus_to_editor)
return
editor = self.get_current_editor()
line = editor.get_current_line()
text = line.lstrip()
if text:
self.exec_in_extconsole.emit(text, self.focus_to_editor)
if editor.is_cursor_on_last_line() and text:
editor.append(editor.get_line_separator())
editor.move_cursor_to_next('line', 'down')
def run_cell(self):
"""Run current cell."""
text, line = self.get_current_editor().get_cell_as_executable_code()
self._run_cell_text(text, line)
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
self.run_cell()
self.advance_cell()
def advance_cell(self, reverse=False):
"""Advance to the next cell.
reverse = True --> go to previous cell.
"""
if not reverse:
move_func = self.get_current_editor().go_to_next_cell
else:
move_func = self.get_current_editor().go_to_previous_cell
if self.focus_to_editor:
move_func()
else:
term = QApplication.focusWidget()
move_func()
term.setFocus()
term = QApplication.focusWidget()
move_func()
term.setFocus()
def re_run_last_cell(self):
"""Run the previous cell again."""
text, line = (self.get_current_editor()
.get_last_cell_as_executable_code())
self._run_cell_text(text, line)
def _run_cell_text(self, text, line):
"""Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file.
"""
finfo = self.get_current_finfo()
editor = self.get_current_editor()
oe_data = editor.highlighter.get_outlineexplorer_data()
try:
cell_name = oe_data.get(line-1).def_name
except AttributeError:
cell_name = ''
if finfo.editor.is_python() and text:
self.run_cell_in_ipyclient.emit(text, cell_name,
finfo.filename,
self.run_cell_copy)
editor.setFocus()
#------ Drag and drop
def dragEnterEvent(self, event):
"""Reimplement Qt method
Inform Qt about the types of data that the widget accepts"""
source = event.mimeData()
# The second check is necessary on Windows, where source.hasUrls()
# can return True but source.urls() is []
# The third check is needed since a file could be dropped from
# compressed files. In Windows mimedata2url(source) returns None
# Fixes issue 5218
if source.hasUrls() and source.urls() and mimedata2url(source):
all_urls = mimedata2url(source)
text = [encoding.is_text_file(url) for url in all_urls]
if any(text):
event.acceptProposedAction()
else:
event.ignore()
elif source.hasText():
event.acceptProposedAction()
elif os.name == 'nt':
# This covers cases like dragging from compressed files,
# which can be opened by the Editor if they are plain
# text, but doesn't come with url info.
# Fixes Issue 2032
event.acceptProposedAction()
else:
event.ignore()
def dropEvent(self, event):
"""Reimplement Qt method
Unpack dropped data and handle it"""
source = event.mimeData()
# The second check is necessary when mimedata2url(source)
# returns None.
# Fixes issue 7742
if source.hasUrls() and mimedata2url(source):
files = mimedata2url(source)
files = [f for f in files if encoding.is_text_file(f)]
files = set(files or [])
for fname in files:
self.plugin_load.emit(fname)
elif source.hasText():
editor = self.get_current_editor()
if editor is not None:
editor.insert_text(source.text())
else:
event.ignore()
event.acceptProposedAction()
class EditorSplitter(QSplitter):
"""QSplitter for editor windows."""
def __init__(self, parent, plugin, menu_actions, first=False,
register_editorstack_cb=None, unregister_editorstack_cb=None):
"""Create a splitter for dividing an editor window into panels.
Adds a new EditorStack instance to this splitter. If it's not
the first splitter, clones the current EditorStack from the plugin.
Args:
parent: Parent widget.
plugin: Plugin this widget belongs to.
menu_actions: QActions to include from the parent.
first: Boolean if this is the first splitter in the editor.
register_editorstack_cb: Callback to register the EditorStack.
Defaults to plugin.register_editorstack() to
register the EditorStack with the Editor plugin.
unregister_editorstack_cb: Callback to unregister the EditorStack.
Defaults to plugin.unregister_editorstack() to
unregister the EditorStack with the Editor plugin.
"""
QSplitter.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setChildrenCollapsible(False)
self.toolbar_list = None
self.menu_list = None
self.plugin = plugin
if register_editorstack_cb is None:
register_editorstack_cb = self.plugin.register_editorstack
self.register_editorstack_cb = register_editorstack_cb
if unregister_editorstack_cb is None:
unregister_editorstack_cb = self.plugin.unregister_editorstack
self.unregister_editorstack_cb = unregister_editorstack_cb
self.menu_actions = menu_actions
self.editorstack = EditorStack(self, menu_actions)
self.register_editorstack_cb(self.editorstack)
if not first:
self.plugin.clone_editorstack(editorstack=self.editorstack)
self.editorstack.destroyed.connect(lambda: self.editorstack_closed())
self.editorstack.sig_split_vertically.connect(
lambda: self.split(orientation=Qt.Vertical))
self.editorstack.sig_split_horizontally.connect(
lambda: self.split(orientation=Qt.Horizontal))
self.addWidget(self.editorstack)
def closeEvent(self, event):
"""Override QWidget closeEvent().
This event handler is called with the given event when Qt
receives a window close request from a top-level widget.
"""
QSplitter.closeEvent(self, event)
def __give_focus_to_remaining_editor(self):
focus_widget = self.plugin.get_focus_widget()
if focus_widget is not None:
focus_widget.setFocus()
def editorstack_closed(self):
logger.debug("method 'editorstack_closed':")
logger.debug(" self : %r" % self)
try:
self.unregister_editorstack_cb(self.editorstack)
self.editorstack = None
close_splitter = self.count() == 1
except RuntimeError:
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorstack just closed was the last widget in this QSplitter
self.close()
return
self.__give_focus_to_remaining_editor()
def editorsplitter_closed(self):
logger.debug("method 'editorsplitter_closed':")
logger.debug(" self : %r" % self)
try:
close_splitter = self.count() == 1 and self.editorstack is None
except RuntimeError:
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorsplitter just closed was the last widget in this QSplitter
self.close()
return
elif self.count() == 2 and self.editorstack:
# back to the initial state: a single editorstack instance,
# as a single widget in this QSplitter: orientation may be changed
self.editorstack.reset_orientation()
self.__give_focus_to_remaining_editor()
def split(self, orientation=Qt.Vertical):
"""Create and attach a new EditorSplitter to the current EditorSplitter.
The new EditorSplitter widget will contain an EditorStack that
is a clone of the current EditorStack.
A single EditorSplitter instance can be split multiple times, but the
orientation will be the same for all the direct splits. If one of
the child splits is split, then that split can have a different
orientation.
"""
self.setOrientation(orientation)
self.editorstack.set_orientation(orientation)
editorsplitter = EditorSplitter(self.parent(), self.plugin,
self.menu_actions,
register_editorstack_cb=self.register_editorstack_cb,
unregister_editorstack_cb=self.unregister_editorstack_cb)
self.addWidget(editorsplitter)
editorsplitter.destroyed.connect(lambda: self.editorsplitter_closed())
current_editor = editorsplitter.editorstack.get_current_editor()
if current_editor is not None:
current_editor.setFocus()
def iter_editorstacks(self):
"""Return the editor stacks for this splitter and every first child.
Note: If a splitter contains more than one splitter as a direct
child, only the first child's editor stack is included.
Returns:
List of tuples containing (EditorStack instance, orientation).
"""
editorstacks = [(self.widget(0), self.orientation())]
if self.count() > 1:
editorsplitter = self.widget(1)
editorstacks += editorsplitter.iter_editorstacks()
return editorstacks
def get_layout_settings(self):
"""Return the layout state for this splitter and its children.
Record the current state, including file names and current line
numbers, of the splitter panels.
Returns:
A dictionary containing keys {hexstate, sizes, splitsettings}.
hexstate: String of saveState() for self.
sizes: List for size() for self.
splitsettings: List of tuples of the form
(orientation, cfname, clines) for each EditorSplitter
and its EditorStack.
orientation: orientation() for the editor
splitter (which may be a child of self).
cfname: EditorStack current file name.
clines: Current line number for each file in the
EditorStack.
"""
splitsettings = []
for editorstack, orientation in self.iter_editorstacks():
clines = []
cfname = ''
# XXX - this overrides value from the loop to always be False?
orientation = False
if hasattr(editorstack, 'data'):
clines = [finfo.editor.get_cursor_line_number()
for finfo in editorstack.data]
cfname = editorstack.get_current_filename()
splitsettings.append((orientation == Qt.Vertical, cfname, clines))
return dict(hexstate=qbytearray_to_str(self.saveState()),
sizes=self.sizes(), splitsettings=splitsettings)
def set_layout_settings(self, settings, dont_goto=None):
"""Restore layout state for the splitter panels.
Apply the settings to restore a saved layout within the editor. If
the splitsettings key doesn't exist, then return without restoring
any settings.
The current EditorSplitter (self) calls split() for each element
in split_settings, thus recreating the splitter panels from the saved
state. split() also clones the editorstack, which is then
iterated over to restore the saved line numbers on each file.
The size and positioning of each splitter panel is restored from
hexstate.
Args:
settings: A dictionary with keys {hexstate, sizes, orientation}
that define the layout for the EditorSplitter panels.
dont_goto: Defaults to None, which positions the cursor to the
end of the editor. If there's a value, positions the
cursor on the saved line number for each editor.
"""
splitsettings = settings.get('splitsettings')
if splitsettings is None:
return
splitter = self
editor = None
for index, (is_vertical, cfname, clines) in enumerate(splitsettings):
if index > 0:
splitter.split(Qt.Vertical if is_vertical else Qt.Horizontal)
splitter = splitter.widget(1)
editorstack = splitter.widget(0)
for index, finfo in enumerate(editorstack.data):
editor = finfo.editor
# TODO: go_to_line is not working properly (the line it jumps
# to is not the corresponding to that file). This will be fixed
# in a future PR (which will fix issue #3857)
if dont_goto is not None:
# skip go to line for first file because is already there
pass
else:
try:
editor.go_to_line(clines[index])
except IndexError:
pass
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
sizes = settings.get('sizes')
if sizes is not None:
self.setSizes(sizes)
if editor is not None:
editor.clearFocus()
editor.setFocus()
class EditorWidget(QSplitter):
def __init__(self, parent, plugin, menu_actions, show_fullpath,
show_all_files, group_cells, show_comments,
sort_files_alphabetically):
QSplitter.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
statusbar = parent.statusBar() # Create a status bar
self.readwrite_status = ReadWriteStatus(self, statusbar)
self.eol_status = EOLStatus(self, statusbar)
self.encoding_status = EncodingStatus(self, statusbar)
self.cursorpos_status = CursorPositionStatus(self, statusbar)
self.editorstacks = []
self.plugin = plugin
self.find_widget = FindReplace(self, enable_replace=True)
self.plugin.register_widget_shortcuts(self.find_widget)
self.find_widget.hide()
self.outlineexplorer = OutlineExplorerWidget(
self,
show_fullpath=show_fullpath,
show_all_files=show_all_files,
group_cells=group_cells,
show_comments=show_comments,
sort_files_alphabetically=sort_files_alphabetically)
self.outlineexplorer.edit_goto.connect(
lambda filenames, goto, word:
plugin.load(filenames=filenames, goto=goto, word=word,
editorwindow=self.parent()))
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editorsplitter = EditorSplitter(self, plugin, menu_actions,
register_editorstack_cb=self.register_editorstack,
unregister_editorstack_cb=self.unregister_editorstack)
self.editorsplitter = editorsplitter
editor_layout.addWidget(editorsplitter)
editor_layout.addWidget(self.find_widget)
splitter = QSplitter(self)
splitter.setContentsMargins(0, 0, 0, 0)
splitter.addWidget(editor_widgets)
splitter.addWidget(self.outlineexplorer)
splitter.setStretchFactor(0, 5)
splitter.setStretchFactor(1, 1)
# Refreshing outline explorer
editorsplitter.editorstack.initialize_outlineexplorer()
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
logger.debug("EditorWidget.register_editorstack: %r" % editorstack)
self.__print_editorstacks()
self.plugin.last_focus_editorstack[self.parent()] = editorstack
editorstack.set_closable( len(self.editorstacks) > 1 )
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.readonly_changed)
editorstack.encoding_changed.connect(
self.encoding_status.encoding_changed)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.cursor_position_changed)
editorstack.sig_refresh_eol_chars.connect(self.eol_status.eol_changed)
self.plugin.register_editorstack(editorstack)
oe_btn = create_toolbutton(self)
oe_btn.setDefaultAction(self.outlineexplorer.visibility_action)
editorstack.add_corner_widgets_to_tabbar([5, oe_btn])
def __print_editorstacks(self):
logger.debug("%d editorstack(s) in editorwidget:" %
len(self.editorstacks))
for edst in self.editorstacks:
logger.debug(" %r" % edst)
def unregister_editorstack(self, editorstack):
logger.debug("EditorWidget.unregister_editorstack: %r" % editorstack)
self.plugin.unregister_editorstack(editorstack)
self.editorstacks.pop(self.editorstacks.index(editorstack))
self.__print_editorstacks()
class EditorMainWindow(QMainWindow):
def __init__(self, plugin, menu_actions, toolbar_list, menu_list,
show_fullpath, show_all_files, group_cells, show_comments,
sort_files_alphabetically):
QMainWindow.__init__(self)
self.setAttribute(Qt.WA_DeleteOnClose)
self.plugin = plugin
self.window_size = None
self.editorwidget = EditorWidget(self, plugin, menu_actions,
show_fullpath, show_all_files,
group_cells, show_comments,
sort_files_alphabetically)
self.setCentralWidget(self.editorwidget)
# Setting interface theme
if is_dark_interface():
self.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
# Give focus to current editor to update/show all status bar widgets
editorstack = self.editorwidget.editorsplitter.editorstack
editor = editorstack.get_current_editor()
if editor is not None:
editor.setFocus()
self.setWindowTitle("Spyder - %s" % plugin.windowTitle())
self.setWindowIcon(plugin.windowIcon())
if toolbar_list:
self.toolbars = []
for title, object_name, actions in toolbar_list:
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
add_actions(toolbar, actions)
self.toolbars.append(toolbar)
if menu_list:
quit_action = create_action(self, _("Close window"),
icon="close_panel.png",
tip=_("Close this window"),
triggered=self.close)
self.menus = []
for index, (title, actions) in enumerate(menu_list):
menu = self.menuBar().addMenu(title)
if index == 0:
# File menu
add_actions(menu, actions+[None, quit_action])
else:
add_actions(menu, actions)
self.menus.append(menu)
def get_toolbars(self):
"""Get the toolbars."""
return self.toolbars
def add_toolbars_to_menu(self, menu_title, actions):
"""Add toolbars to a menu."""
# Six is the position of the view menu in menus list
# that you can find in plugins/editor.py setup_other_windows.
view_menu = self.menus[6]
if actions == self.toolbars and view_menu:
toolbars = []
for toolbar in self.toolbars:
action = toolbar.toggleViewAction()
toolbars.append(action)
add_actions(view_menu, toolbars)
def load_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbars:
dic[toolbar.objectName()] = toolbar
toolbar.toggleViewAction().setChecked(False)
toolbar.setVisible(False)
for name in toolbars_names:
if name in dic:
dic[name].toggleViewAction().setChecked(True)
dic[name].setVisible(True)
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.isFullScreen():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
def closeEvent(self, event):
"""Reimplement Qt method"""
if self.plugin.undocked_window is not None:
self.plugin.dockwidget.setWidget(self.plugin)
self.plugin.dockwidget.setVisible(True)
self.plugin.switch_to_plugin()
QMainWindow.closeEvent(self, event)
if self.plugin.undocked_window is not None:
self.plugin.undocked_window = None
def get_layout_settings(self):
"""Return layout state"""
splitsettings = self.editorwidget.editorsplitter.get_layout_settings()
return dict(size=(self.window_size.width(), self.window_size.height()),
pos=(self.pos().x(), self.pos().y()),
is_maximized=self.isMaximized(),
is_fullscreen=self.isFullScreen(),
hexstate=qbytearray_to_str(self.saveState()),
splitsettings=splitsettings)
def set_layout_settings(self, settings):
"""Restore layout state"""
size = settings.get('size')
if size is not None:
self.resize( QSize(*size) )
self.window_size = self.size()
pos = settings.get('pos')
if pos is not None:
self.move( QPoint(*pos) )
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
if settings.get('is_maximized'):
self.setWindowState(Qt.WindowMaximized)
if settings.get('is_fullscreen'):
self.setWindowState(Qt.WindowFullScreen)
splitsettings = settings.get('splitsettings')
if splitsettings is not None:
self.editorwidget.editorsplitter.set_layout_settings(splitsettings)
class EditorPluginExample(QSplitter):
def __init__(self):
QSplitter.__init__(self)
self.dock_action = None
self.undock_action = None
self.close_plugin_action = None
self.undocked_window = None
menu_actions = []
self.editorstacks = []
self.editorwindows = []
self.last_focus_editorstack = {} # fake
self.find_widget = FindReplace(self, enable_replace=True)
self.outlineexplorer = OutlineExplorerWidget(self, show_fullpath=False,
show_all_files=False)
self.outlineexplorer.edit_goto.connect(self.go_to_file)
self.editor_splitter = EditorSplitter(self, self, menu_actions,
first=True)
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editor_layout.addWidget(self.editor_splitter)
editor_layout.addWidget(self.find_widget)
self.setContentsMargins(0, 0, 0, 0)
self.addWidget(editor_widgets)
self.addWidget(self.outlineexplorer)
self.setStretchFactor(0, 5)
self.setStretchFactor(1, 1)
self.menu_actions = menu_actions
self.toolbar_list = None
self.menu_list = None
self.setup_window([], [])
def go_to_file(self, fname, lineno, text='', start_column=None):
editorstack = self.editorstacks[0]
editorstack.set_current_filename(to_text_string(fname))
editor = editorstack.get_current_editor()
editor.go_to_line(lineno, word=text, start_column=start_column)
def closeEvent(self, event):
for win in self.editorwindows[:]:
win.close()
logger.debug("%d: %r" % (len(self.editorwindows), self.editorwindows))
logger.debug("%d: %r" % (len(self.editorstacks), self.editorstacks))
event.accept()
def load(self, fname):
QApplication.processEvents()
editorstack = self.editorstacks[0]
editorstack.load(fname)
editorstack.analyze_script()
def register_editorstack(self, editorstack):
logger.debug("FakePlugin.register_editorstack: %r" % editorstack)
self.editorstacks.append(editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
editorstack.set_closable(len(self.editorstacks) > 1)
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
oe_btn = create_toolbutton(self)
oe_btn.setDefaultAction(self.outlineexplorer.visibility_action)
editorstack.add_corner_widgets_to_tabbar([5, oe_btn])
action = QAction(self)
editorstack.set_io_actions(action, action, action, action)
font = QFont("Courier New")
font.setPointSize(10)
editorstack.set_default_font(font, color_scheme='Spyder')
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.plugin_load.connect(self.load)
def unregister_editorstack(self, editorstack):
logger.debug("FakePlugin.unregister_editorstack: %r" % editorstack)
self.editorstacks.pop(self.editorstacks.index(editorstack))
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
def setup_window(self, toolbar_list, menu_list):
self.toolbar_list = toolbar_list
self.menu_list = menu_list
def create_new_window(self):
window = EditorMainWindow(self, self.menu_actions,
self.toolbar_list, self.menu_list,
show_fullpath=False, show_all_files=False,
group_cells=True, show_comments=True,
sort_files_alphabetically=False)
window.resize(self.size())
window.show()
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
def register_editorwindow(self, window):
logger.debug("register_editorwindowQObject*: %r" % window)
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
logger.debug("unregister_editorwindow: %r" % window)
self.editorwindows.pop(self.editorwindows.index(window))
def get_focus_widget(self):
pass
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
def register_widget_shortcuts(self, widget):
"""Fake!"""
pass
def test():
from spyder.utils.qthelpers import qapplication
from spyder.config.base import get_module_path
spyder_dir = get_module_path('spyder')
app = qapplication(test_time=8)
test = EditorPluginExample()
test.resize(900, 700)
test.show()
import time
t0 = time.time()
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"editor.py"))
test.load(osp.join(spyder_dir, "plugins", "explorer", "widgets.py"))
test.load(osp.join(spyder_dir, "plugins", "variableexplorer", "widgets",
"collectionseditor.py"))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"codeeditor.py"))
print("Elapsed time: %.3f s" % (time.time()-t0)) # spyder: test-skip
sys.exit(app.exec_())
if __name__ == "__main__":
test()
|
the-stack_106_14956
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Bot to find all pages on the wiki with mixed latin and cyrilic alphabets."""
#
# (C) Pywikibot team, 2006-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
__version__ = '$Id: 04c0ac1f355094128848d4f2fb6072c22911f690 $'
import codecs
import itertools
import os
import re
from string import ascii_letters
import sys
import pywikibot
from pywikibot import i18n
from pywikibot.data import api
from pywikibot.tools import first_lower, first_upper, formatter
from scripts.category import CategoryMoveRobot as CategoryMoveBot
if sys.version_info[0] > 2:
xrange = range
class CaseChecker(object):
"""Case checker."""
# These words are always in one language, even though they could be typed
# in both
alwaysInLocal = [u'СССР', u'Как', u'как']
alwaysInLatin = [u'II', u'III']
localUpperLtr = u'ЁІЇЎАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯҐ'
localLowerLtr = u'ёіїўабвгдежзийклмнопрстуфхцчшщъыьэюяґ'
localLtr = localUpperLtr + localLowerLtr
localSuspects = u'АВЕКМНОРСТХІЁЇаеорсухіёї'
latinSuspects = u'ABEKMHOPCTXIËÏaeopcyxiëï'
# possibly try to fix one character mistypes in an alternative keyboard
# layout
localKeyboard = u'йцукенгшщзфывапролдячсмить'
latinKeyboard = u'qwertyuiopasdfghjklzxcvbnm'
romanNumChars = u'IVXLCDM'
# all letters that may be used as suffixes after roman numbers: "Iый"
romannumSuffixes = localLowerLtr
romanNumSfxPtrn = re.compile(
u'^[' + romanNumChars + ']+[' + localLowerLtr + ']+$')
whitelists = {
'ru': u'ВП:КЛ/Проверенные',
}
lclClrFnt = u'<font color=green>'
latClrFnt = u'<font color=brown>'
suffixClr = u'</font>'
colorFormatLocalColor = '{green}'
colorFormatLatinColor = '{red}'
colorFormatSuffix = '{default}'
wordBreaker = re.compile(r'[ _\-/\|#[\]():]')
stripChars = u' \t,'
titles = True
links = False
aplimit = None
apfrom = u''
title = None
replace = False
stopAfter = -1
wikilog = None
wikilogfile = 'wikilog.txt'
failedTitles = 'failedTitles.txt'
nosuggestions = 'nosuggestions.txt'
doFailed = False
titleList = None
autonomous = False
namespaces = []
filterredir = 'nonredirects'
def __init__(self):
"""Constructor with arg parsing."""
for arg in pywikibot.handle_args():
if arg.startswith('-from'):
if arg.startswith('-from:'):
self.apfrom = arg[6:]
else:
self.apfrom = pywikibot.input(u'Which page to start from: ')
elif arg.startswith('-reqsize:'):
self.aplimit = int(arg[9:])
elif arg == '-links':
self.links = True
elif arg == '-linksonly':
self.links = True
self.titles = False
elif arg == '-replace':
self.replace = True
elif arg == '-redir':
self.filterredir = 'all'
elif arg == '-redironly':
self.filterredir = 'redirects'
elif arg.startswith('-limit:'):
self.stopAfter = int(arg[7:])
elif arg == '-autonomous' or arg == '-a':
self.autonomous = True
elif arg.startswith('-ns:'):
self.namespaces.append(int(arg[4:]))
elif arg.startswith('-wikilog:'):
self.wikilogfile = arg[9:]
elif arg.startswith('-failedlog:'):
self.failedTitles = arg[11:]
elif arg == '-failed':
self.doFailed = True
else:
pywikibot.output(u'Unknown argument %s.' % arg)
pywikibot.showHelp()
sys.exit()
if self.namespaces == [] and not self.doFailed:
if self.apfrom == u'':
# 0 should be after templates ns
self.namespaces = [14, 10, 12, 0]
else:
self.namespaces = [0]
if self.aplimit is None:
self.aplimit = 200 if self.links else 'max'
if not self.doFailed:
self.queryParams = {'action': 'query',
'generator': 'allpages',
'gaplimit': self.aplimit,
'gapfilterredir': self.filterredir}
else:
self.queryParams = {'action': 'query'}
if self.apfrom != u'':
pywikibot.output(u'Argument "-from" is ignored with "-failed"')
propParam = 'info'
if self.links:
propParam += '|links|categories'
self.queryParams['pllimit'] = 'max'
self.queryParams['cllimit'] = 'max'
self.queryParams['prop'] = propParam
self.site = pywikibot.Site()
if len(self.localSuspects) != len(self.latinSuspects):
raise ValueError(u'Suspects must be the same size')
if len(self.localKeyboard) != len(self.latinKeyboard):
raise ValueError(u'Keyboard info must be the same size')
if not os.path.isabs(self.wikilogfile):
self.wikilogfile = pywikibot.config.datafilepath(self.wikilogfile)
self.wikilog = self.OpenLogFile(self.wikilogfile)
if not os.path.isabs(self.failedTitles):
self.failedTitles = pywikibot.config.datafilepath(self.failedTitles)
if self.doFailed:
with codecs.open(self.failedTitles, 'r', 'utf-8') as f:
self.titleList = [self.Page(t) for t in f]
self.failedTitles += '.failed'
self.lclToLatDict = dict([(ord(self.localSuspects[i]),
self.latinSuspects[i])
for i in xrange(len(self.localSuspects))])
self.latToLclDict = dict([(ord(self.latinSuspects[i]),
self.localSuspects[i])
for i in xrange(len(self.localSuspects))])
if self.localKeyboard is not None:
self.lclToLatKeybDict = dict(
[(ord(self.localKeyboard[i]),
self.latinKeyboard[i])
for i in xrange(len(self.localKeyboard))])
self.latToLclKeybDict = dict(
[(ord(self.latinKeyboard[i]),
self.localKeyboard[i])
for i in xrange(len(self.localKeyboard))])
else:
self.lclToLatKeybDict = {}
self.latToLclKeybDict = {}
badPtrnStr = u'([%s][%s]|[%s][%s])' \
% (ascii_letters, self.localLtr,
self.localLtr, ascii_letters)
self.badWordPtrn = re.compile(u'[%s%s]*%s[%s%s]*'
% (ascii_letters, self.localLtr,
badPtrnStr, ascii_letters,
self.localLtr))
# Get whitelist
self.knownWords = set()
self.seenUnresolvedLinks = set()
# TODO: handle "continue"
if self.site.code in self.whitelists:
wlpage = self.whitelists[self.site.code]
pywikibot.output(u'Loading whitelist from %s' % wlpage)
wlparams = {
'action': 'query',
'prop': 'links',
'titles': wlpage,
'redirects': '',
'indexpageids': '',
'pllimit': 'max',
}
req = api.Request(site=self.site, parameters=wlparams)
data = req.submit()
if len(data['query']['pageids']) == 1:
pageid = data['query']['pageids'][0]
links = data['query']['pages'][pageid]['links']
allWords = [nn for n in links
for nn in self.FindBadWords(n['title'])]
self.knownWords = set(allWords)
else:
raise ValueError(u'The number of pageids is not 1')
pywikibot.output(u'Loaded whitelist with %i items'
% len(self.knownWords))
if len(self.knownWords) > 0:
pywikibot.log(u'Whitelist: %s'
% u', '.join([self.MakeLink(i, False)
for i in self.knownWords]))
else:
pywikibot.output(u'Whitelist is not known for language %s'
% self.site.code)
def RunQuery(self, params):
"""API query."""
while True:
# Get data
req = api.Request(**params)
data = req.submit()
# Process received data
yield data
# Clear any continuations first
if 'clcontinue' in params:
del params['clcontinue']
if 'plcontinue' in params:
del params['plcontinue']
if 'query-continue' not in data:
if 'gapcontinue' in params:
del params['gapcontinue']
break
qc = data['query-continue']
# First continue properties only, once done, continue with allpages
if 'categories' in qc or 'links' in qc:
if 'categories' in qc:
params.update(qc['categories'])
if 'links' in qc:
params.update(qc['links'])
elif 'allpages' in qc:
params.update(qc['allpages'])
else:
raise ValueError(u'Unexpected query-continue values: %s' % qc)
continue
def Run(self):
"""Run the bot."""
try:
self.lastLetter = ''
if not self.doFailed:
for namespace in self.namespaces:
self.currentTitle = None
self.queryParams['gapnamespace'] = namespace
self.queryParams['gapfrom'] = self.apfrom
for data in self.RunQuery(self.queryParams):
self.ProcessDataBlock(data)
else:
self.currentTitle = None
batchSize = 10
for batchStart in range(0, len(self.titleList), batchSize):
self.queryParams['titles'] = self.titleList[
batchStart:batchStart + batchSize]
for data in self.RunQuery(self.queryParams):
self.ProcessDataBlock(data)
except:
pywikibot.output(u'Exception at Title = %s, Next = %s'
% (self.currentTitle, self.apfrom))
try:
import traceback
pywikibot.output(traceback.format_exc())
except:
pywikibot.output(u'Unable to print exception info')
raise
def ProcessDataBlock(self, data):
"""Process data block given by RunQuery()."""
if 'query' not in data or 'pages' not in data['query']:
return
firstItem = True
for pageID, page in data['query']['pages'].items():
printed = False
title = page['title']
self.currentTitle = title
if 'missing' in page:
continue
if firstItem:
if self.lastLetter != title[0]:
pywikibot.ui.output('Processing %s\n' % title)
self.lastLetter = title[0]
firstItem = False
if self.titles:
err = self.ProcessTitle(title)
if err:
changed = False
if self.replace:
if len(err[1]) == 1:
newTitle = err[1][0]
editSummary = i18n.twtranslate(
self.site, "casechecker-rename")
dst = self.Page(newTitle)
if 'redirect' in page:
src = self.Page(title)
redir = src.getRedirectTarget()
redirTitle = redir.title(asLink=True,
textlink=True)
if not dst.exists():
src.move(newTitle, editSummary,
movesubpages=True)
changed = True
replErrors = False
for p in src.getReferences(
follow_redirects=False):
if p.namespace() == 2:
continue
oldText = p.text
newText = self.ReplaceLink(oldText, title,
newTitle)
if not self.PutNewPage(
p, newText, [
self.MakeMoveSummary(title,
newTitle)]):
replErrors = True
if not replErrors:
editSummary = i18n.twtranslate(
self.site, "casechecker-delete-summary")
newText = i18n.twtranslate(
self.site,
"casechecker-delete-reason", redirTitle)
if newText:
src.text = u'{{delete}}\n\n' + newText
src.save(editSummary, minor=False)
changed = True
elif not dst.exists():
src = self.Page(title)
if page['ns'] == 14:
dst = self.Page(newTitle)
bot = CategoryMoveBot(
src.title(withNamespace=False),
dst.title(withNamespace=False),
self.autonomous,
editSummary + u' ' +
self.MakeMoveSummary(title, newTitle),
True)
bot.run()
else:
src.move(newTitle, editSummary,
movesubpages=True)
changed = True
if not changed:
if len(err[1]) > 0:
self.AppendLineToLog(self.failedTitles, title)
else:
self.AddNoSuggestionTitle(title)
self.WikiLog(u"* " + err[0])
printed = True
if self.links:
allLinks = None
if 'links' in page:
allLinks = page['links']
if 'categories' in page:
if allLinks:
allLinks = allLinks + page['categories']
else:
allLinks = page['categories']
if allLinks:
pageObj = None
pageTxt = None
msg = []
foundSuggestions = False
for l in allLinks:
ltxt = l['title']
err = self.ProcessTitle(ltxt)
if err:
if len(err[1]) > 0:
foundSuggestions = True
elif self.AddNoSuggestionTitle(ltxt):
continue
newTitle = None
if self.replace:
newTitle = self.PickTarget(title, ltxt, err[1])
if newTitle:
if pageObj is None:
pageObj = self.Page(title)
pageTxt = pageObj.get()
msg.append(self.MakeMoveSummary(ltxt,
newTitle))
pageTxt = self.ReplaceLink(pageTxt, ltxt,
newTitle)
if not newTitle:
if not printed:
self.WikiLog(u"* %s: link to %s"
% (self.MakeLink(title, False),
err[0]))
printed = True
else:
self.WikiLog(u"** link to %s" % err[0])
if pageObj is not None:
if self.PutNewPage(pageObj, pageTxt, msg):
# done, no need to log anything
foundSuggestions = False
if foundSuggestions:
self.AppendLineToLog(self.failedTitles, title)
if self.stopAfter > 0:
self.stopAfter -= 1
if self.stopAfter == 0:
raise ValueError(u'Stopping because we are done')
def WikiLog(self, text):
"""Write log."""
pywikibot.output(text)
self.wikilog.write(text + u'\n')
self.wikilog.flush()
def FindBadWords(self, title):
"""Retrieve bad words."""
for m in self.badWordPtrn.finditer(title):
yield title[m.span()[0]:m.span()[1]]
def ProcessTitle(self, title):
"""Process title."""
badWords = list(self.FindBadWords(title))
if len(badWords) > 0:
# Allow known words, allow any roman numerals with local suffixes
badWords = set([i for i in badWords
if i not in self.knownWords and
self.romanNumSfxPtrn.match(i) is not None])
if len(badWords) == 0 or self.Page(title).isImage():
return
count = 0
ambigBadWords = set()
ambigBadWordsCount = 0
mapLcl = {}
mapLat = {}
for badWord in badWords:
# See if it would make sense to treat the whole word as either
# cyrilic or latin
mightBeLat = mightBeLcl = True
for l in badWord:
if l in self.localLtr:
if mightBeLat and l not in self.localSuspects:
mightBeLat = False
else:
if mightBeLcl and l not in self.latinSuspects:
mightBeLcl = False
if l not in ascii_letters:
raise ValueError(u'Assert failed')
# Some words are well known and frequently mixed-typed
if mightBeLcl and mightBeLat:
if badWord in self.alwaysInLocal:
mightBeLat = False
elif badWord in self.alwaysInLatin:
mightBeLcl = False
if mightBeLcl:
mapLcl[badWord] = badWord.translate(self.latToLclDict)
if mightBeLat:
mapLat[badWord] = badWord.translate(self.lclToLatDict)
if mightBeLcl and mightBeLat:
ambigBadWords.add(badWord)
# Cannot do len(ambigBadWords) because they might be duplicates
ambigBadWordsCount += 1
if not mightBeLcl and not mightBeLat:
# try to match one of the knownWords
bwLen = len(badWord)
kw = [w for w in self.knownWords if len(w) == bwLen]
for p in xrange(bwLen):
if len(kw) == 0:
break
c = badWord[p]
co = ord(c)
if co in self.latToLclDict:
c2 = self.latToLclDict[co]
elif co in self.lclToLatDict:
c2 = self.lclToLatDict[co]
else:
c2 = None
kw = [w for w in kw if p < len(w) and
(w[p] == c or (c2 is not None and w[p] == c2))]
if len(kw) > 1:
pywikibot.output(u"Word '%s' could be treated as more than "
u"one known words" % badWord)
elif len(kw) == 1:
mapLcl[badWord] = kw[0]
count += 1
infoText = self.MakeLink(title)
possibleAlternatives = []
if len(mapLcl) + len(mapLat) - ambigBadWordsCount < count:
# We cannot auto-translate - offer a list of suggested words
suggestions = list(mapLcl.values()) + list(mapLat.values())
if len(suggestions) > 0:
infoText += u", word suggestions: " + u', '.join(
[self.ColorCodeWord(t) for t in suggestions])
else:
infoText += u", no suggestions"
else:
# Replace all unambiguous bad words
for k, v in mapLat.items() + mapLcl.items():
if k not in ambigBadWords:
title = title.replace(k, v)
if len(ambigBadWords) == 0:
# There are no ambiguity, we can safelly convert
possibleAlternatives.append(title)
infoText += u", convert to " + self.MakeLink(title)
else:
# Try to pick 0, 1, 2, ..., len(ambiguous words) unique
# combinations from the bad words list, and convert just the
# picked words to cyrilic, whereas making all other words as
# latin character.
for itemCntToPick in xrange(0, len(ambigBadWords) + 1):
title2 = title
for uc in itertools.combinations(list(ambigBadWords),
itemCntToPick):
wordsToLat = ambigBadWords.copy()
for bw in uc:
title2 = title2.replace(bw, mapLcl[bw])
wordsToLat.remove(bw)
for bw in wordsToLat:
title2 = title2.replace(bw, mapLat[bw])
possibleAlternatives.append(title2)
if len(possibleAlternatives) > 0:
infoText += u", can be converted to " + u', '.join(
[self.MakeLink(t) for t in possibleAlternatives])
else:
infoText += u", no suggestions"
return (infoText, possibleAlternatives)
def PickTarget(self, title, original, candidates):
"""Pick target from candidates."""
if len(candidates) == 0:
return
if len(candidates) == 1:
return candidates[0]
pagesDontExist = []
pagesRedir = {}
pagesExist = []
for newTitle in candidates:
dst = self.Page(newTitle)
if not dst.exists():
pagesDontExist.append(newTitle)
elif dst.isRedirectPage():
pagesRedir[newTitle] = dst.getRedirectTarget().title()
else:
pagesExist.append(newTitle)
if len(pagesExist) == 1:
return pagesExist[0]
elif len(pagesExist) == 0 and len(pagesRedir) > 0:
if len(pagesRedir) == 1:
return list(pagesRedir.keys())[0]
t = None
for v in pagesRedir.values():
if not t:
t = v # first item
elif t != v:
break
else:
# all redirects point to the same target
# pick the first one, doesn't matter what it is
return list(pagesRedir.keys())[0]
if not self.autonomous:
pywikibot.output(u'Could not auto-decide for page %s. Which link '
u'should be chosen?' % self.MakeLink(title, False))
pywikibot.output(u'Original title: ', newline=False)
self.ColorCodeWord(original + "\n", True)
count = 1
for t in candidates:
if t in pagesDontExist:
msg = u'missing'
elif t in pagesRedir:
msg = u'Redirect to ' + pagesRedir[t]
else:
msg = u'page exists'
self.ColorCodeWord(u' %d: %s (%s)\n' % (count, t, msg), True)
count += 1
answers = [('skip', 's')] + [(str(i), i) for i in range(1, count)]
choice = pywikibot.input_choice(u'Which link to choose?', answers)
if choice != 's':
return candidates[int(choice) - 1]
def ColorCodeWord(self, word, toScreen=False):
"""Colorize code word."""
if not toScreen:
return self._ColorCodeWordHtml(word)
else:
return self._ColorCodeWordScreen(word)
def _ColorCodeWordHtml(self, word):
res = '<b>'
lastIsCyr = word[0] in self.localLtr
if lastIsCyr:
res += self.lclClrFnt
else:
res += self.latClrFnt
for l in word:
if l in self.localLtr:
if not lastIsCyr:
res += self.suffixClr + self.lclClrFnt
lastIsCyr = True
elif l in ascii_letters:
if lastIsCyr:
res += self.suffixClr + self.latClrFnt
lastIsCyr = False
res += l
return res + self.suffixClr + '</b>'
def _ColorCodeWordScreen(self, word):
res = ''
lastIsCyr = word[0] in self.localLtr
if lastIsCyr:
res += self.colorFormatLocalColor
else:
res += self.colorFormatLatinColor
for l in word:
if l in self.localLtr:
if not lastIsCyr:
res += self.colorFormatLocalColor
lastIsCyr = True
elif l in self.latLtr:
if lastIsCyr:
res += self.colorFormatLatinColor
lastIsCyr = False
res += l
return formatter.color_format(res + self.colorFormatSuffix)
def AddNoSuggestionTitle(self, title):
"""Add backlinks to log."""
if title in self.seenUnresolvedLinks:
return True
self.seenUnresolvedLinks.add(title)
params = {
'action': 'query',
'list': 'backlinks',
'bltitle': title,
'bllimit': '50',
}
req = api.Request(site=self.site, parameters=params)
data = req.submit()
cl = 0
redirs = 0
if 'backlinks' in data['query']:
bl = data['query']['backlinks']
cl = len(bl)
redirs = len([i for i in bl if 'redirect' in i])
if cl > 0 and 'query-continue' in data:
count = '50+'
else:
count = str(cl if cl > 0 else 'no backlinks')
self.AppendLineToLog(self.nosuggestions, u'* %s (%s%s)'
% (self.MakeLink(title), count, u', %d redirects'
% redirs if redirs > 0 else u''))
return False
def PutNewPage(self, pageObj, pageTxt, msg):
"""Save new page."""
title = pageObj.title(asLink=True, textlink=True)
coloredMsg = u', '.join([self.ColorCodeWord(m) for m in msg])
if pageObj.text == pageTxt:
self.WikiLog(u"* Error: Text replacement failed in %s (%s)"
% (self.MakeLink(title, False), coloredMsg))
else:
pywikibot.output(u'Case Replacements: %s' % u', '.join(msg))
pageObj.text = pageTxt
try:
pageObj.save(
u'%s: %s'
% (i18n.twtranslate(
self.site,
"casechecker-replacement-summary"),
self.site.mediawiki_message(u"comma-separator").join(msg)))
return True
except KeyboardInterrupt:
raise
except (pywikibot.LockedPage, pywikibot.PageNotSaved):
self.WikiLog(u"* Error: Could not save updated page %s (%s)"
% (self.MakeLink(title, False), coloredMsg))
return False
def MakeMoveSummary(self, fromTitle, toTitle):
"""Move summary from i18n."""
return i18n.twtranslate(self.site, "casechecker-replacement-linklist",
{'source': fromTitle, 'target': toTitle})
def MakeLink(self, title, colorcode=True):
"""Create a colored link string."""
prf = u'' if self.Page(title).namespace() == 0 else u':'
cc = u'|««« %s »»»' % self.ColorCodeWord(title) if colorcode else u''
return u"[[%s%s%s]]" % (prf, title, cc)
def OpenLogFile(self, filename):
"""Open logfile."""
try:
return codecs.open(filename, 'a', 'utf-8')
except IOError:
return codecs.open(filename, 'w', 'utf-8')
def AppendLineToLog(self, filename, text):
"""Write text to logfile."""
with self.OpenLogFile(filename) as f:
f.write(text + u'\n')
def Page(self, title):
"""Create Page object from title."""
return pywikibot.Page(self.site, title)
def ReplaceLink(self, text, oldtxt, newtxt):
"""Replace links."""
frmParts = [s.strip(self.stripChars)
for s in self.wordBreaker.split(oldtxt)]
toParts = [s.strip(self.stripChars)
for s in self.wordBreaker.split(newtxt)]
if len(frmParts) != len(toParts):
raise ValueError(u'Splitting parts do not match counts')
for i in xrange(0, len(frmParts)):
if len(frmParts[i]) != len(toParts[i]):
raise ValueError(u'Splitting parts do not match word length')
if len(frmParts[i]) > 0:
text = text.replace(first_lower(frmParts[i]), first_lower(toParts[i]))
text = text.replace(first_upper(frmParts[i]), first_upper(toParts[i]))
return text
if __name__ == "__main__":
bot = CaseChecker()
bot.Run()
|
the-stack_106_14958
|
'''
MobileNet in TensorFlow2.
Reference:
[1] Sandler, Mark, et al.
"Mobilenetv2: Inverted residuals and linear bottlenecks."
Proceedings of the IEEE conference on computer vision and pattern recognition. 2018.
'''
import tensorflow as tf
from tensorflow.keras import layers
class Block(tf.keras.Model):
'''Expand + depthwise & pointwise convolution'''
def __init__(self, in_channels, out_channels, expansion, strides):
super(Block, self).__init__()
self.strides = strides
channels = expansion * in_channels
self.conv1 = layers.Conv2D(channels, kernel_size=1, use_bias=False)
self.bn1 = layers.BatchNormalization()
self.conv2 = layers.Conv2D(channels, kernel_size=3, strides=strides, padding='same',
groups=channels, use_bias=False)
self.bn2 = layers.BatchNormalization()
self.conv3 = layers.Conv2D(out_channels, kernel_size=1, use_bias=False)
self.bn3 = layers.BatchNormalization()
if strides == 1 and in_channels != out_channels:
self.shortcut = tf.keras.Sequential([
layers.Conv2D(out_channels, kernel_size=1, use_bias=False),
layers.BatchNormalization()
])
else:
self.shortcut = lambda x: x
def call(self, x):
out = tf.keras.activations.relu(self.bn1(self.conv1(x)))
out = tf.keras.activations.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = layers.add([self.shortcut(x), out]) if self.strides==1 else out
return out
class MobileNetV2(tf.keras.Model):
# (expansion, out_channels, num_blocks, strides)
config = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change strides 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 strides 2 -> 1 for CIFAR10
self.conv1 = layers.Conv2D(32, kernel_size=3, padding='same', use_bias=False)
self.bn1 = layers.BatchNormalization()
self.layer = self._make_layers(in_channels=32)
self.conv2 = layers.Conv2D(1280, kernel_size=1, use_bias=False)
self.bn2 = layers.BatchNormalization()
self.avg_pool2d = layers.AveragePooling2D(pool_size=4)
self.flatten = layers.Flatten()
self.fc = layers.Dense(num_classes, activation='softmax')
def call(self, x):
out = tf.keras.activations.relu(self.bn1(self.conv1(x)))
out = self.layer(out)
out = tf.keras.activations.relu(self.bn2(self.conv2(out)))
out = self.avg_pool2d(out)
out = self.flatten(out)
out = self.fc(out)
return out
def _make_layers(self, in_channels):
layer = []
for expansion, out_channels, num_blocks, strides in self.config:
stride = [strides] + [1]*(num_blocks-1)
for s in stride:
layer += [Block(in_channels, out_channels, expansion, s)]
in_channels = out_channels
return tf.keras.Sequential(layer)
|
the-stack_106_14959
|
# -*- coding: utf-8 -*-
'''
Manage Apigateway Rest APIs
===========================
.. versionadded:: 2016.11.0
Create and destroy rest apis depending on a swagger version 2 definition file.
Be aware that this interacts with Amazon's services, and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
vpc.keyid: GKTADJGHEIQSXMKKRBJ08H
vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure Apigateway API exists:
boto_apigateway.present:
- name: myfunction
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import hashlib
import logging
import os
import re
# Import Salt Libs
import salt.utils.files
import salt.utils.json
import salt.utils.yaml
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False
def present(name, api_name, swagger_file, stage_name, api_key_required,
lambda_integration_role, lambda_region=None, stage_variables=None,
region=None, key=None, keyid=None, profile=None,
lambda_funcname_format='{stage}_{api}_{resource}_{method}',
authorization_type='NONE', error_response_template=None, response_template=None):
'''
Ensure the spcified api_name with the corresponding swaggerfile is deployed to the
given stage_name in AWS ApiGateway.
this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is
handled through a Mock integration.
There may be multiple deployments for the API object, each deployment is tagged with a description
(i.e. unique label) in pretty printed json format consisting of the following key/values.
.. code-block:: text
{
"api_name": api_name,
"swagger_file": basename_of_swagger_file
"swagger_file_md5sum": md5sum_of_swagger_file,
"swagger_info_object": info_object_content_in_swagger_file
}
Please note that the name of the lambda function to be integrated will be derived
via the provided lambda_funcname_format parameters:
- the default lambda_funcname_format is a string with the following
substitutable keys: "{stage}_{api}_{resource}_{method}". The user can
choose to reorder the known keys.
- the stage key corresponds to the stage_name passed in.
- the api key corresponds to the api_name passed in.
- the resource corresponds to the resource path defined in the passed swagger file.
- the method corresponds to the method for a resource path defined in the passed swagger file.
For the default lambda_funcname_format, given the following input:
.. code-block:: python
api_name = ' Test Service'
stage_name = 'alpha'
basePath = '/api'
path = '/a/{b}/c'
method = 'POST'
We will end up with the following Lambda Function Name that will be looked
up: 'test_service_alpha_a_b_c_post'
The canconicalization of these input parameters is done in the following order:
1. lambda_funcname_format is formatted with the input parameters as passed,
2. resulting string is stripped for leading/trailing spaces,
3. path parameter's curly braces are removed from the resource path,
4. consecutive spaces and forward slashes in the paths are replaced with '_'
5. consecutive '_' are replaced with '_'
Please note that for error response handling, the swagger file must have an error response model
with the following schema. The lambda functions should throw exceptions for any non successful responses.
An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda
to the proper error return status codes.
.. code-block:: yaml
Error:
type: object
properties:
stackTrace:
type: array
items:
type: array
items:
type: string
description: call stack
errorType:
type: string
description: error type
errorMessage:
type: string
description: |
Error message, will be matched based on pattern.
If no pattern is specified, the default pattern used for response mapping will be +*.
name
The name of the state definition
api_name
The name of the rest api that we want to ensure exists in AWS API Gateway
swagger_file
Name of the location of the swagger rest api definition file in YAML format.
stage_name
Name of the stage we want to be associated with the given api_name and swagger_file
definition
api_key_required
True or False - whether the API Key is required to call API methods
lambda_integration_role
The name or ARN of the IAM role that the AWS ApiGateway assumes when it
executes your lambda function to handle incoming requests
lambda_region
The region where we expect to find the lambda functions. This is used to
determine the region where we should look for the Lambda Function for
integration purposes. The region determination is based on the following
priority:
1. lambda_region as passed in (is not None)
2. if lambda_region is None, use the region as if a boto_lambda
function were executed without explicitly specifying lambda region.
3. if region determined in (2) is different than the region used by
boto_apigateway functions, a final lookup will be attempted using
the boto_apigateway region.
stage_variables
A dict with variables and their values, or a pillar key (string) that
contains a dict with variables and their values.
key and values in the dict must be strings. {'string': 'string'}
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
lambda_funcname_format
Please review the earlier example for the usage. The only substituable keys in the funcname
format are {stage}, {api}, {resource}, {method}.
Any other keys or positional subsitution parameters will be flagged as an invalid input.
authorization_type
This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given
swagger spec file. Default is set to 'NONE'
error_response_template
String value that defines the response template mapping that should be applied in cases error occurs.
Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
If set to None, the following default value is used:
.. code-block:: text
'#set($inputRoot = $input.path(\'$\'))\\n'
'{\\n'
' "errorMessage" : "$inputRoot.errorMessage",\\n'
' "errorType" : "$inputRoot.errorType",\\n'
' "stackTrace" : [\\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\\n'
' [\\n'
'#foreach($elem in $stackTrace)\\n'
' "$elem"\\n'
'#if($foreach.hasNext),#end\\n'
'#end\\n'
' ]\\n'
'#if($foreach.hasNext),#end\\n'
'#end\\n'
' ]\\n'
.. versionadded:: 2017.7.0
response_template
String value that defines the response template mapping applied in case
of success (including OPTIONS method) If set to None, empty ({})
template is assumed, which will transfer response from the lambda
function as is.
.. versionadded:: 2017.7.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
try:
common_args = dict([('region', region),
('key', key),
('keyid', keyid),
('profile', profile)])
# try to open the swagger file and basic validation
swagger = _Swagger(api_name, stage_name,
lambda_funcname_format,
swagger_file,
error_response_template, response_template,
common_args)
# retrieve stage variables
stage_vars = _get_stage_variables(stage_variables)
# verify if api and stage already exists
ret = swagger.verify_api(ret)
if ret.get('publish'):
# there is a deployment label with signature matching the given api_name,
# swagger file name, swagger file md5 sum, and swagger file info object
# just reassociate the stage_name to the given deployment label.
if __opts__['test']:
ret['comment'] = ('[stage: {0}] will be reassociated to an already available '
'deployment that matched the given [api_name: {1}] '
'and [swagger_file: {2}].\n'
'Stage variables will be set '
'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars))
ret['result'] = None
return ret
return swagger.publish_api(ret, stage_vars)
if ret.get('current'):
# already at desired state for the stage, swagger_file, and api_name
if __opts__['test']:
ret['comment'] = ('[stage: {0}] is already at desired state with an associated '
'deployment matching the given [api_name: {1}] '
'and [swagger_file: {2}].\n'
'Stage variables will be set '
'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars))
ret['result'] = None
return swagger.overwrite_stage_variables(ret, stage_vars)
# there doesn't exist any previous deployments for the given swagger_file, we need
# to redeploy the content of the swagger file to the api, models, and resources object
# and finally create a new deployment and tie the stage_name to this new deployment
if __opts__['test']:
ret['comment'] = ('There is no deployment matching the given [api_name: {0}] '
'and [swagger_file: {1}]. A new deployment will be '
'created and the [stage_name: {2}] will then be associated '
'to the newly created deployment.\n'
'Stage variables will be set '
'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars))
ret['result'] = None
return ret
ret = swagger.deploy_api(ret)
if ret.get('abort'):
return ret
ret = swagger.deploy_models(ret)
if ret.get('abort'):
return ret
ret = swagger.deploy_resources(ret,
api_key_required=api_key_required,
lambda_integration_role=lambda_integration_role,
lambda_region=lambda_region,
authorization_type=authorization_type)
if ret.get('abort'):
return ret
ret = swagger.publish_api(ret, stage_vars)
except (ValueError, IOError) as e:
ret['result'] = False
ret['comment'] = '{0}'.format(e.args)
return ret
def _get_stage_variables(stage_variables):
'''
Helper function to retrieve stage variables from pillars/options, if the
input is a string
'''
ret = dict()
if stage_variables is None:
return ret
if isinstance(stage_variables, six.string_types):
if stage_variables in __opts__:
ret = __opts__[stage_variables]
master_opts = __pillar__.get('master', {})
if stage_variables in master_opts:
ret = master_opts[stage_variables]
if stage_variables in __pillar__:
ret = __pillar__[stage_variables]
elif isinstance(stage_variables, dict):
ret = stage_variables
if not isinstance(ret, dict):
ret = dict()
return ret
def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None):
'''
Ensure the stage_name associated with the given api_name deployed by boto_apigateway's
present state is removed. If the currently associated deployment to the given stage_name has
no other stages associated with it, the deployment will also be removed.
name
Name of the swagger file in YAML format
api_name
Name of the rest api on AWS ApiGateway to ensure is absent.
stage_name
Name of the stage to be removed irrespective of the swagger file content.
If the current deployment associated with the stage_name has no other stages associated
with it, the deployment will also be removed.
nuke_api
If True, removes the API itself only if there are no other stages associated with any other
deployments once the given stage_name is removed.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
try:
common_args = dict([('region', region),
('key', key),
('keyid', keyid),
('profile', profile)])
swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args)
if not swagger.restApiId:
ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name)
return ret
if __opts__['test']:
if nuke_api:
ret['comment'] = ('[stage: {0}] will be deleted, if there are no other '
'active stages, the [api: {1} will also be '
'deleted.'.format(stage_name, api_name))
else:
ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name))
ret['result'] = None
return ret
ret = swagger.delete_stage(ret)
if ret.get('abort'):
return ret
if nuke_api and swagger.no_more_deployments_remain():
ret = swagger.delete_api(ret)
except (ValueError, IOError) as e:
ret['result'] = False
ret['comment'] = '{0}'.format(e.args)
return ret
# Helper Swagger Class for swagger version 2.0 API specification
def _gen_md5_filehash(fname, *args):
'''
helper function to generate a md5 hash of the swagger definition file
any extra argument passed to the function is converted to a string
and participates in the hash calculation
'''
_hash = hashlib.md5()
with salt.utils.files.fopen(fname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
_hash.update(chunk)
for extra_arg in args:
_hash.update(six.b(str(extra_arg)))
return _hash.hexdigest()
def _dict_to_json_pretty(d, sort_keys=True):
'''
helper function to generate pretty printed json output
'''
return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys)
# Heuristic on whether or not the property name loosely matches given set of 'interesting' factors
# If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match
def _name_matches(name, matches):
'''
Helper function to see if given name has any of the patterns in given matches
'''
for m in matches:
if name.endswith(m):
return True
if name.lower().endswith('_' + m.lower()):
return True
if name.lower() == m.lower():
return True
return False
def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod',
'statusCode', 'Created', 'Deleted',
'Updated', 'Flushed', 'Associated', 'Disassociated')):
'''
Helper function to reduce the amount of information that will be kept in the change log
for API GW related return values
'''
result = {}
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
reduced = v if k == 'variables' else _object_reducer(v, names)
if reduced or _name_matches(k, names):
result[k] = reduced
elif isinstance(v, list):
newlist = []
for val in v:
reduced = _object_reducer(val, names)
if reduced or _name_matches(k, names):
newlist.append(reduced)
if newlist:
result[k] = newlist
else:
if _name_matches(k, names):
result[k] = v
return result
def _log_changes(ret, changekey, changevalue):
'''
For logging create/update/delete operations to AWS ApiGateway
'''
cl = ret['changes'].get('new', [])
cl.append({changekey: _object_reducer(changevalue)})
ret['changes']['new'] = cl
return ret
def _log_error_and_abort(ret, obj):
'''
helper function to update errors in the return structure
'''
ret['result'] = False
ret['abort'] = True
if 'error' in obj:
ret['comment'] = '{0}'.format(obj.get('error'))
return ret
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None,
profile=None):
'''
Ensure the spcifieda usage plan with the corresponding metrics is deployed
.. versionadded:: 2017.7.0
name
name of the state
plan_name
[Required] name of the usage plan
throttle
[Optional] throttling parameters expressed as a dictionary.
If provided, at least one of the throttling parameters must be present
rateLimit
rate per second at which capacity bucket is populated
burstLimit
maximum rate allowed
quota
[Optional] quota on the number of api calls permitted by the plan.
If provided, limit and period must be present
limit
[Required] number of calls permitted per quota period
offset
[Optional] number of calls to be subtracted from the limit at the beginning of the period
period
[Required] period to which quota applies. Must be DAY, WEEK or MONTH
.. code-block:: yaml
UsagePlanPresent:
boto_apigateway.usage_plan_present:
- plan_name: my_usage_plan
- throttle:
rateLimit: 70
burstLimit: 100
- quota:
limit: 1000
offset: 0
period: DAY
- profile: my_profile
'''
func_params = locals()
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
try:
common_args = dict([('region', region),
('key', key),
('keyid', keyid),
('profile', profile)])
existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args)
if 'error' in existing:
ret['result'] = False
ret['comment'] = 'Failed to describe existing usage plans'
return ret
if not existing['plans']:
# plan does not exist, we need to create it
if __opts__['test']:
ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name)
ret['result'] = None
return ret
result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name,
description=description,
throttle=throttle,
quota=quota,
**common_args)
if 'error' in result:
ret['result'] = False
ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error'])
return ret
ret['changes']['old'] = {'plan': None}
ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name)
else:
# need an existing plan modified to match given value
plan = existing['plans'][0]
needs_updating = False
modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period')))
for p, fields in modifiable_params:
for f in fields:
actual_param = {} if func_params.get(p) is None else func_params.get(p)
if plan.get(p, {}).get(f, None) != actual_param.get(f, None):
needs_updating = True
break
if not needs_updating:
ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name)
ret['result'] = None
return ret
result = __salt__['boto_apigateway.update_usage_plan'](plan['id'],
throttle=throttle,
quota=quota,
**common_args)
if 'error' in result:
ret['result'] = False
ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error'])
return ret
ret['changes']['old'] = {'plan': plan}
ret['comment'] = 'usage plan {0} has been updated'.format(plan_name)
newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args)
if 'error' in existing:
ret['result'] = False
ret['comment'] = 'Failed to describe existing usage plans after updates'
return ret
ret['changes']['new'] = {'plan': newstate['plans'][0]}
except (ValueError, IOError) as e:
ret['result'] = False
ret['comment'] = '{0}'.format(e.args)
return ret
def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None):
'''
Ensures usage plan identified by name is no longer present
.. versionadded:: 2017.7.0
name
name of the state
plan_name
name of the plan to remove
.. code-block:: yaml
usage plan absent:
boto_apigateway.usage_plan_absent:
- plan_name: my_usage_plan
- profile: my_profile
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
try:
common_args = dict([('region', region),
('key', key),
('keyid', keyid),
('profile', profile)])
existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args)
if 'error' in existing:
ret['result'] = False
ret['comment'] = 'Failed to describe existing usage plans'
return ret
if not existing['plans']:
ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name)
return ret
if __opts__['test']:
ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name)
ret['result'] = None
return ret
plan_id = existing['plans'][0]['id']
result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args)
if 'error' in result:
ret['result'] = False
ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result)
return ret
ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name)
ret['changes']['old'] = {'plan': existing['plans'][0]}
ret['changes']['new'] = {'plan': None}
except (ValueError, IOError) as e:
ret['result'] = False
ret['comment'] = '{0}'.format(e.args)
return ret
def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None):
'''
Ensures usage plan identified by name is added to provided api_stages
.. versionadded:: 2017.7.0
name
name of the state
plan_name
name of the plan to use
api_stages
list of dictionaries, where each dictionary consists of the following keys:
apiId
apiId of the api to attach usage plan to
stage
stage name of the api to attach usage plan to
.. code-block:: yaml
UsagePlanAssociationPresent:
boto_apigateway.usage_plan_association_present:
- plan_name: my_plan
- api_stages:
- apiId: 9kb0404ec0
stage: my_stage
- apiId: l9v7o2aj90
stage: my_stage
- profile: my_profile
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
try:
common_args = dict([('region', region),
('key', key),
('keyid', keyid),
('profile', profile)])
existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args)
if 'error' in existing:
ret['result'] = False
ret['comment'] = 'Failed to describe existing usage plans'
return ret
if not existing['plans']:
ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name)
ret['result'] = False
return ret
if len(existing['plans']) != 1:
ret['comment'] = 'There are multiple usage plans with the same name - it is not supported'
ret['result'] = False
return ret
plan = existing['plans'][0]
plan_id = plan['id']
plan_stages = plan.get('apiStages', [])
stages_to_add = []
for api in api_stages:
if api not in plan_stages:
stages_to_add.append(api)
if not stages_to_add:
ret['comment'] = 'Usage plan is already asssociated to all api stages'
return ret
result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args)
if 'error' in result:
ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name,
stages_to_add,
result['error'])
ret['result'] = False
return ret
ret['comment'] = 'successfully associated usage plan to apis'
ret['changes']['old'] = plan_stages
ret['changes']['new'] = result.get('result', {}).get('apiStages', [])
except (ValueError, IOError) as e:
ret['result'] = False
ret['comment'] = '{0}'.format(e.args)
return ret
def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None):
'''
Ensures usage plan identified by name is removed from provided api_stages
If a plan is associated to stages not listed in api_stages parameter,
those associations remain intact.
.. versionadded:: 2017.7.0
name
name of the state
plan_name
name of the plan to use
api_stages
list of dictionaries, where each dictionary consists of the following keys:
apiId
apiId of the api to detach usage plan from
stage
stage name of the api to detach usage plan from
.. code-block:: yaml
UsagePlanAssociationAbsent:
boto_apigateway.usage_plan_association_absent:
- plan_name: my_plan
- api_stages:
- apiId: 9kb0404ec0
stage: my_stage
- apiId: l9v7o2aj90
stage: my_stage
- profile: my_profile
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
try:
common_args = dict([('region', region),
('key', key),
('keyid', keyid),
('profile', profile)])
existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args)
if 'error' in existing:
ret['result'] = False
ret['comment'] = 'Failed to describe existing usage plans'
return ret
if not existing['plans']:
ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name)
ret['result'] = False
return ret
if len(existing['plans']) != 1:
ret['comment'] = 'There are multiple usage plans with the same name - it is not supported'
ret['result'] = False
return ret
plan = existing['plans'][0]
plan_id = plan['id']
plan_stages = plan.get('apiStages', [])
if not plan_stages:
ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name)
return ret
stages_to_remove = []
for api in api_stages:
if api in plan_stages:
stages_to_remove.append(api)
if not stages_to_remove:
ret['comment'] = 'Usage plan is already not asssociated to any api stages'
return ret
result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args)
if 'error' in result:
ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name,
stages_to_remove,
result['error'])
ret['result'] = False
return ret
ret['comment'] = 'successfully disassociated usage plan from apis'
ret['changes']['old'] = plan_stages
ret['changes']['new'] = result.get('result', {}).get('apiStages', [])
except (ValueError, IOError) as e:
ret['result'] = False
ret['comment'] = '{0}'.format(e.args)
return ret
|
the-stack_106_14960
|
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
class TTrain(nn.Module):
"""Abstract class for Tensor Train models. Use instantiating class.
Parameters:
D (int): bond dimension
d (int): physical dimension (number of categories in data)
dtype ([tensor.dtype]):
tensor.float for real, or tensor.cfloat for complex
"""
def __init__(
self, dataset, d, D, dtype,
homogeneous=True, w_randomization=None, gradient_clipping_threshold=None,
verbose=False):
super().__init__()
self.D = D
self.d = d
self.dtype = dtype
self.verbose = verbose
self.homogeneous = homogeneous
self.dataset = dataset
self.n_datapoints = dataset.shape[0]
self.seqlen = dataset.shape[1]
# choose weight initialization scheme
if w_randomization == 'noisy':
w_init = self.noisy_ones # constant at 1, with some noise
elif w_randomization == 'random_angle':
w_init = self.randomsign_ones # 1 * +/-(/+j/-j)
elif w_randomization == 'gaussian_zeros':
w_init = torch.randn # gaussian centred at 0
elif w_randomization == 'zeros':
w_init = torch.zeros # constant at 1
elif w_randomization == 'ones':
w_init = torch.ones # constant at 1
else:
w_init = torch.ones # constant at 1
# the following are set to nn.Parameters thus are backpropped over
k_core = (d*D*D)**-0.5
k_vectors = (D)**-0.5
if homogeneous: # initialize single core to be repeated
core = k_core * w_init((d, D, D), dtype=dtype)
#core = torch.randn(d, D, D, dtype=dtype)
self.core = nn.Parameter(core)
else: # initialize seqlen different non-homogeneous cores
core = k_core * w_init((self.seqlen, d, D, D), dtype=dtype)
#core = torch.randn(self.seqlen, d, D, D, dtype=dtype)
self.core = nn.Parameter(core)
left_boundary = k_vectors * w_init(D, dtype=dtype)
#left_boundary = torch.randn(D, dtype=dtype)
self.left_boundary = nn.Parameter(left_boundary)
right_boundary = k_vectors * w_init(D, dtype=dtype)
#right_boundary = torch.randn(D, dtype=dtype)
self.right_boundary = nn.Parameter(right_boundary)
if gradient_clipping_threshold:
# clip gradients at gradient_clipping_threshold if not None
self.add_gradient_hook(clipping_threshold=gradient_clipping_threshold)
@staticmethod
def noisy_ones(shape, dtype=torch.float):
"""Fill from gaussian with mean 1, variance hardcoded."""
x = torch.ones(shape, dtype=dtype)
e = 0.5 * torch.randn(shape, dtype=dtype)
return x + e
@staticmethod
def randomsign_ones(shape, dtype=torch.float):
"""Makes a vector of ones with random sign,
or if dtype is torch.cfloat, randomized real or imaginary units"""
x = torch.zeros(shape)
if dtype==torch.cfloat:
random4=torch.randint_like(x,4)
r = x + 1*(random4==0) - 1*(random4==1)
i = x + 1*(random4==2) - 1*(random4==3)
out = torch.complex(r,i)
else:
random2=torch.randint_like(x,2)
out = x + 1*(random2==0) - 1*(random2==1)
return torch.tensor(out, dtype=dtype)
def mat_norm(self, mat):
"""Our norm for matrices: infinity norm"""
# equivalent to torch.linalg.norm(vec, ord=float('inf')).real
return torch.max(torch.sum(abs(mat), dim=1))
def vec_norm(self, vec):
"""Our norm for vectors: infinity norm"""
# equivalent to torch.linalg.norm(vec, ord=float('inf')).real
return vec.abs().max()
def _contract_at(self, x):
"""Contract network at particular values in the physical dimension,
for computing probability of x.
"""
if self.homogeneous:
# repeat the core seqlen times
w = self.core[None].repeat(self.seqlen, 1, 1, 1)
else:
w = self.core
# contract the network, from the left boundary through to the last core
contracting_tensor = self.left_boundary
for i in range(self.seqlen):
contracting_tensor = torch.einsum(
'i, ij -> j',
contracting_tensor,
w[i, x[i], :, :])
# contract the final bond dimension
output = torch.einsum(
'i, i ->', contracting_tensor, self.right_boundary)
# if self.verbose:
# print("contract_at", output)
return output
def _contract_all(self):
"""Contract network with a copy of itself across physical index,
for computing norm.
"""
if self.homogeneous:
# repeat the core seqlen times
w = self.core[None].repeat(self.seqlen, 1, 1, 1)
else:
w = self.core
# first, left boundary contraction
# (note: if real-valued conj will have no effect)
contracting_tensor = torch.einsum(
'ij, ik -> jk',
torch.einsum(
'j, ijk -> ik', self.left_boundary, w[0, :, :, :]),
torch.einsum(
'j, ijk -> ik', self.left_boundary, w[0, :, :, :].conj())
)
# contract the network
for i in range(1, self.seqlen):
contracting_tensor = torch.einsum(
'ij, ijkl -> kl',
contracting_tensor,
torch.einsum(
'ijk, ilm -> jlkm',
w[i, :, :, :],
w[i, :, :, :].conj()))
# contract the final bond dimension with right boundary vector
output = torch.einsum(
'ij, i, j ->',
contracting_tensor,
self.right_boundary,
self.right_boundary.conj())
# if self.verbose:
# print("contract_all", output)
return output
def _log_contract_at(self, x):
"""Contract network at particular values in the physical dimension,
for computing probability of x.
Uses log norm stability trick.
RETURNS A LOG PROB.
"""
if self.homogeneous:
# repeat the core seqlen times
w = self.core[None].repeat(self.seqlen, 1, 1, 1)
else:
w = self.core
# contract the network, from the left boundary through to the last core
Z = self.vec_norm(self.left_boundary)
contractor_unit = self.left_boundary / Z
accumulated_lognorm = Z.log()
for i in range(self.seqlen):
contractor_temp = torch.einsum(
'i, ij -> j',
contractor_unit,
w[i, x[i], :, :])
Z = self.vec_norm(contractor_temp)
contractor_unit = contractor_temp / Z
accumulated_lognorm += Z.log()
# contract the final bond dimension
output = torch.einsum(
'i, i ->', contractor_unit, self.right_boundary)
output = (accumulated_lognorm.exp()*output).abs().square()
logprob = output.log()
# if self.verbose:
# print("contract_at", output)
return logprob
def clamp_c(self, tensor, clip_min, clip_max):
'''clamp complex or real'''
if tensor.dtype==torch.cfloat:
return torch.complex(
tensor.real.clamp(clip_min, clip_val),
tensor.imag.clamp(clip_min, clip_max))
else:
return torch.clamp(tensor, clip_min, clip_max)
def _log_contract_all(self):
"""Contract network with a copy of itself across physical index,
for computing norm.
"""
if self.homogeneous:
# repeat the core seqlen times
w = self.core[None].repeat(self.seqlen, 1, 1, 1)
else:
w = self.core
# first, left boundary contraction
# (note: if real-valued conj will have no effect)
Z = self.vec_norm(self.left_boundary)
contractor_unit = self.left_boundary / Z
accumulated_lognorm = Z.log()
if not accumulated_lognorm.isfinite():
print("nonfinite lognorm in contract all! clamping")
accumulated_lognorm = self.clamp_c(accumulated_lognorm, -1e-20, None)
contractor_temp = torch.einsum(
'ij, ik -> jk',
torch.einsum(
'j, ijk -> ik', contractor_unit, w[0, :, :, :]),
torch.einsum(
'j, ijk -> ik', contractor_unit.conj(), w[0, :, :, :].conj())
)
Z = self.mat_norm(contractor_temp)
contractor_unit = contractor_temp / Z
accumulated_lognorm += Z.log()
# contract the network
for i in range(1, self.seqlen):
contractor_temp = torch.einsum(
'ij, ijkl -> kl',
contractor_unit,
torch.einsum(
'ijk, ilm -> jlkm',
w[i, :, :, :],
w[i, :, :, :].conj()))
Z = self.mat_norm(contractor_temp)
contractor_unit = contractor_temp / Z
accumulated_lognorm += Z.log()
if not accumulated_lognorm.isfinite():
print("nonfinite lognorm in contract all! clamping")
accumulated_lognorm = self.clamp_c(accumulated_lognorm, -1e-20, None)
# contract the final bond dimension with right boundary vector
output = torch.einsum(
'ij, i, j ->',
contractor_unit,
self.right_boundary,
self.right_boundary.conj())
lognorm = (accumulated_lognorm.exp()*output).abs().log()
# if self.verbose:
# print("contract_all", output)
return lognorm
def _log_contract_at_batch(self, X):
"""Contract network at particular values in the physical dimension,
for computing probability of x, for x in X.
input:
X: tensor batch of observations, size [batch_size, seq_len]
returns:
logprobs: tensor of log probs, size [batch_size]
Uses log norm stability trick.
"""
batch_size = X.shape[0]
if self.homogeneous:
# repeat the core seqlen times, and repeat that batch_size times
w = self.core[(None,)*2].repeat(batch_size, self.seqlen, 1, 1, 1)
else:
# repeat nonhomogenous core batch_size times
w = self.core[None].repeat(batch_size, 1, 1, 1, 1)
# contract the network, from the left boundary through to the last core
left_boundaries = self.left_boundary[None].repeat(batch_size, 1)
right_boundaries = self.right_boundary[None].repeat(batch_size, 1)
# normalizers, one per batch
Zs, _ = left_boundaries.abs().max(axis=1) # do vec_norm on each row (!note infinity norm is hardcoded here)
contractor_unit = left_boundaries / Zs[:,None]
accumulated_lognorms = Zs.log()
if not accumulated_lognorms.isfinite().all():
print("nonfinite lognorm in contract_at! clamping")
accumulated_lognorms = self.clamp_c(accumulated_lognorms, -1e-20, None)
# make one hot encoding of data, and select along physical dimension of weights
Xh = torch.nn.functional.one_hot(X, num_classes=self.d)
w_selected = (w * Xh[:, :, :, None, None]).sum(2) # w_selected shape is [batchsize, seqlen, D, D]
# contract the network, from the left boundary through to the last core
for i in range(self.seqlen):
contractor_temp = torch.einsum(
'bi, bij -> bj',
contractor_unit,
w_selected[:, i, :, :])
Zs, _ = contractor_temp.abs().max(axis=1)
contractor_unit = contractor_temp / Zs[:,None]
accumulated_lognorms += Zs.log()
if not accumulated_lognorms.isfinite().all():
print("nonfinite lognorm in contract_at! clamping")
accumulated_lognorms = self.clamp_c(accumulated_lognorms, -1e-20, None)
# contract the final bond dimension
output = torch.einsum(
'bi, bi -> b', contractor_unit, right_boundaries)
probs = (accumulated_lognorms.exp() * output).abs().square()
logprobs = probs.log()
return logprobs
def _logprob(self, x):
"""Compute log probability of one configuration P(x)
Args:
x : shape (seqlen,)
Returns:
logprob (torch.Tensor): size [1]
"""
pass
def _logprob_batch(self, X):
"""Compute log P(x) for all x in a batch X
Args:
X : shape (batch_size, seqlen)
Returns:
logprobs (torch.Tensor): size [batchsize]
"""
pass
def forward(self, x):
return self._logprob(x)
def forward_batch(self, batch):
logprobs = self._logprob_batch(batch)
return logprobs
@staticmethod
def clip_grad(grad, clip_val, param_name, verbose=False):
"""Clip the gradients, to be used as a hook during training."""
if torch.isnan(grad).any():
print(f"├─NaN value in gradient of {param_name}, {grad.size()}")
if grad.dtype==torch.cfloat:
for ext, v in [("min", grad.real.min()),("max", grad.real.max())]:
if verbose and abs(v) > clip_val:
print(f"│(clipping {param_name} real {ext} {v:.2} to size {clip_val})")
for ext, v in [("min", grad.imag.min()),("max", grad.imag.max())]:
if verbose and abs(v) > clip_val:
print(f"│(clipping {param_name} imag {ext} {1.j*v:.2} to size {clip_val})")
clipped_grad = torch.complex(grad.real.clamp(-clip_val, clip_val),
grad.imag.clamp(-clip_val, clip_val))
else:
for ext, v in [("min", grad.min()),("max", grad.max())]:
if verbose and abs(v) > clip_val:
print(f"│(clipping {param_name} {ext} {v:.2} to size {clip_val})")
clipped_grad = torch.clamp(grad, -clip_val, clip_val)
return clipped_grad
def add_gradient_hook(self, clipping_threshold):
for param_index, p in enumerate(self.parameters()):
pnames = list(self.state_dict().keys())
p.register_hook(lambda grad: self.clip_grad(grad, clipping_threshold, pnames[param_index], verbose=self.verbose))
if torch.isnan(p).any():
print(f"{pnames[param_index]} contains a NaN value!")
def train(
self, batchsize, max_epochs, early_stopping_threshold=0,
plot=False, tqdm=tqdm, device='cpu', batched=False,
verbose=False,
optimizer=torch.optim.Adadelta, clamp_at=None, **optim_kwargs):
dataset = self.dataset
model = self.to(device)
trainloader = DataLoader(dataset, batch_size=batchsize, shuffle=True)
optimizer = optimizer(model.parameters(), **optim_kwargs)
early_stopping_threshold = early_stopping_threshold # 0 for no early stopping
loss_values = [] # store by-epoch avg loss values
print(f'╭───────────────────────────batched={batched}\n│Training {self.name}, on {device}')
print(f'│ batchsize:{batchsize}, {optimizer.__module__}, {optim_kwargs}.')
av_batch_loss_running = -1e4
with tqdm(range(max_epochs), unit="epoch", leave=True) as tepochs:
for epoch in tepochs:
batch_loss_list = []
# with tqdm(trainloader, unit="batch", leave=False, desc=f"epoch {epoch}") as tepoch:
# for batch in tepoch:
for batch_idx, batch in enumerate(trainloader):
for pindex, p in enumerate(model.parameters()):
if torch.isnan(p).any():
pnames = list(self.state_dict().keys())
print("│ loss values:", *(f"{x:.3f}" for x in loss_values))
print(f"└────Stopped before epoch {epoch}. NaN in weights {pnames[pindex]}!")
if plot:
plt.plot(loss_values)
plt.show()
return loss_values
model.zero_grad()
if batched:
logprobs = model.forward_batch(batch.to(device))
if verbose and (logprobs > 0).any():
print(f"├─── Epoch {epoch}, batch {batch_idx}: Warning! logprobs contains positive values (max={logprobs.max()})...")
neglogprob = -logprobs.sum(0)
else:
neglogprob = 0
for x_idx, x in enumerate(batch):
logprob = model(x.to(device))
if verbose and (logprob > 0):
print(f"├─── Batch {batch_idx}[{x_idx}]: Warning! positive logprob...")
neglogprob -= logprob
loss = neglogprob / len(batch)
if clamp_at:
loss = torch.clamp(loss, min=-clamp_at, max=clamp_at)
loss.backward()
# for pindex, p in enumerate(model.parameters()):
# if torch.isnan(p.grad).any():
# pnames = list(self.state_dict().keys())
# print("│ loss values:", *(f"{x:.3f}" for x in loss_values))
# print(f"└────Stopped. NaN value in gradient for {pnames[pindex]}!")
# if plot:
# plt.plot(loss_values)
# plt.show()
# return loss_values
optimizer.step()
# tepoch.set_postfix(loss=loss.item())
batch_loss_list.append(loss.item())
av_batch_loss = torch.Tensor(batch_loss_list).mean().item()
batch_loss_variance = torch.Tensor(batch_loss_list).var().item()
loss_values.append(av_batch_loss)
tepochs.set_postfix(
dict(av_batch_loss=av_batch_loss, batch_loss_variance=batch_loss_variance))
if abs(av_batch_loss_running - av_batch_loss) < early_stopping_threshold:
print(f"├────Early stopping after epoch {epoch}/{max_epochs}.")
break
av_batch_loss_running = av_batch_loss
print("│ loss values:", *(f"{x:.3f}" for x in loss_values))
if plot:
plt.plot(loss_values)
plt.show()
print('│ Finished training.\n╰───────────────────────────\n')
return loss_values
|
the-stack_106_14962
|
# 2019 4월 it works.
import numpy as np
import matplotlib.pyplot as plt
import random as rand
from scipy.spatial import Delaunay
colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow']
x = 0
y = 1
def orientation(p, q, r):
val = (float(all_point[q,y] - all_point[p,y]) * (all_point[r,x] - all_point[q,x])) - (float(all_point[q,x] - all_point[p,x]) * (all_point[r,y] - all_point[q,y]))
if (val > 0):
# Clockwise orientation
return 1
elif (val < 0):
# Counterclockwise orientation
return 2
else:
# Colinear orientation
return 0
def doIntersect(p1,q1,p2,q2):
# Find the 4 orientations required for
# the general and special cases
o1 = orientation(p1, q1, p2)
o2 = orientation(p1, q1, q2)
o3 = orientation(p2, q2, p1)
o4 = orientation(p2, q2, q1)
if(o1 == 0 or o2 == 0 or o3 == 0 or o4 == 0):
return False
if ((o1 != o2) and (o3 != o4)):
return True
return False
filetargets = ['points1', 'HARI SETIAWAN_dt01', 'HARI SETIAWAN_dt02']
for filetarget in filetargets:
filepath = filetarget+'.txt'
fileout = filetarget+'_out.txt'
with open(filepath) as f:
lines = f.read().splitlines()
intersect_lines = lines[-2:]
intersect = []
lines = lines[1:-2]
points = []
all_point = []
for i in lines:
temp = i.split(" ")
points.append([temp[0],temp[1]])
all_point.append([temp[0],temp[1]])
for i in intersect_lines:
temp = i.split(" ")
intersect.append([temp[0],temp[1]])
all_point.append([temp[0],temp[1]])
intersect = np.asarray(intersect).astype(int)
points = np.asarray(points).astype(int)
all_point = np.asarray(all_point).astype(int)
for i in range(len(points)) :
ptxt = str(i) + "(" + str(points[i][0]) + "," + str(points[i][1]) + ")"
plt.annotate( ptxt, (points[i][0], points[i][1]), fontsize=8, fontweight='bold' )
for i in range(len(intersect)) :
ptxt = "P"+str(i) + "(" + str(intersect[i][0]) + "," + str(intersect[i][1]) + ")"
plt.annotate( ptxt, (intersect[i][0], intersect[i][1]), fontsize=8, fontweight='bold' )
tri = Delaunay(points)
with open(fileout, "a") as myfile:
p1 = len(all_point)-2
q1 = len(all_point)-1
for p in tri.simplices :
edges = []
edges.append([p[0],p[1]])
edges.append([p[1],p[2]])
edges.append([p[0],p[2]])
crossing = False
for edge in edges:
p2 = edge[0]
q2 = edge[1]
if doIntersect(p1, q1, p2, q2):
crossing = True
break
if crossing:
plt.fill(all_point[p,x], all_point[p,1], colors[rand.randrange(0,6)],alpha=0.5)
myfile.write(' '.join(p.astype(str)))
myfile.write('\r\n')
myfile.close()
plt.triplot(points[:,0], points[:,1], tri.simplices.copy())
plt.plot(points[:,0], points[:,1], 'o')
plt.plot(intersect[:,0], intersect[:,1], '*-r') #Plot the intersecting line
plt.show()
|
the-stack_106_14963
|
import json, discord, os
from .namedtuples import Presence, AutoMod, Roles, Channels, Colours, Emojis
from .exceptions import InvalidConfig
class Config:
def __init__(self):
self.config = json.load(open("config.json"))
@property
def prefix(self):
return self.config["prefix"]
@property
def token(self):
return self.config["token"]
@property
def server(self):
try:
return int(self.config["server"])
except:
raise InvalidConfig("Server", "int")
@property
def roles(self):
roles = self.config["roles"]
try:
member = roles["member"]
if not isinstance(member, int):
if member.lower() in ("@everyone", "everyone", "default"):
member = "default"
else:
member = int(member)
ping = roles["polls_news_ping"]
if not isinstance(ping, int):
if ping.lower() in ("@everyone", "everyone", "default"):
ping = "default"
else:
ping = int(ping)
return Roles(int(roles["admin"]), int(roles["mod"]), int(roles["muted"]), member, int(roles["offduty"]), int(roles["staff"]), int(roles["support"]), ping)
except:
raise InvalidConfig("Roles", "list of int")
@property
def channels(self):
channels = self.config["channels"]
try:
return Channels(int(channels["user_log"]), int(channels["mod_log"]), int(channels["announcements"]))
except:
raise InvalidConfig("Channels", "list of int")
@property
def emojis(self):
emojis = self.config["emojis"]
try:
return Emojis(int(emojis["online"]), int(emojis["idle"]), int(emojis["dnd"]), int(emojis["offline"]), int(emojis["streaming"]), int(emojis["text_channel"]), int(emojis["voice_channel"]), int(emojis["green_tick"]), int(emojis["red_tick"]), int(emojis["gray_tick"]), int(emojis["bot_tag"]))
except:
raise InvalidConfig("Emojis", "list of int")
@property
def colours(self):
colours = self.config["colours"]
return Colours(colours["embed"], colours["error"], colours["ban"], colours["unban"], colours["mute"], colours["unmute"], colours["kick"])
@property
def presence(self):
presence = self.config["presence"]
if presence["type"].upper() not in ("WATCHING", "PLAYING", "STREAMING", "LISTENING"):
raise ValueError("'type' should be one of 'watching', 'playing', 'streaming' or 'listening', not %s" % presence["type"])
if presence["status"].upper() not in ("ONLINE", "IDLE", "DND", "OFFLINE", "INVISIBLE"):
raise ValueError("'status' should be one of 'online', 'idle', 'dnd', 'offline' or 'invisible', not %s" % presence["status"])
status = {
"ONLINE": discord.Status.online,
"IDLE": discord.Status.idle,
"DND": discord.Status.dnd,
"OFFLINE": discord.Status.offline,
"INVISIBLE": discord.Status.offline
}[presence["status"].upper()]
return Presence({
"WATCHING": discord.Activity(type=discord.ActivityType.watching, name=presence["name"]),
"STREAMING": discord.Streaming(name=presence["name"], url=presence["url"]),
"PLAYING": discord.Game(name=presence["name"]),
"LISTENING": discord.Activity(type=discord.ActivityType.listening, name=presence["name"])
}[presence["type"].upper()], status)
@property
def figlet(self):
return self.config["figlet"]
@property
def directories(self):
return self.config["directories"]
@property
def cogs(self):
return [f"{self.directories['cogs']}.{file[:-3]}" for file in os.listdir(self.directories["cogs"]) if file.endswith(".py")]
@property
def database(self):
return f"{self.directories['database']}.db"
@property
def stderr(self):
return f"{self.directories['stderr']}.log"
@property
def case_insensitive(self):
return self.config["case_insensitive"]
def anti(self, setting="invite"):
auto_mod = self.config["auto_moderator"]["enabled"]
config = self.config["auto_moderator"]["modules"][f"anti_{setting}"]
if not auto_mod or not config["enabled"]:
return False
return config
@property
def auto_mod(self):
config = self.config["auto_moderator"]
if config["punishment"] not in ("kick", "ban", "mute"):
raise InvalidConfig("Auto Mod", "one of kick ban or mute", "Punishment")
return AutoMod(config["enabled"], config["punishment"])
|
the-stack_106_14965
|
from bisect import bisect_left
from collections import defaultdict
from copy import deepcopy
from multiprocessing.pool import Pool
from os import cpu_count
from time import time
from typing import List, Tuple, Dict, Set
from scipy.special import perm
from core.mallows import Mallows
from core.patterns import TwoLabelPattern
class TwoLabelInferencer(object):
"""
It performs inference for a list of 2-label patterns over a Mallows model, i.e., calculating the probability
that a random sampled permutation satisfies at least one pattern.
Note that "L" is better than "H" in a permutation.
"""
def __init__(self, mallows: Mallows):
self.mallows = mallows
self.patterns: List[TwoLabelPattern] = None
self.item_to_pid_and_contributing_lh: Dict[object, Set[Tuple[int, str]]] = None
self.step_to_pids_to_stop_tracking_lhs = None
self.pid_to_sharing_item = {}
self.sharing_item_to_pids = {}
self.is_always_true = False
def estimate_num_states_generated_during_evaluation(self, patterns: List[TwoLabelPattern]):
self.patterns = deepcopy(patterns)
pid_to_label_spans: Dict[int, List[List[int]]] = {}
m = len(self.mallows.center)
for pid, pattern in enumerate(self.patterns):
span = [[m, -1], [m, -1]]
for label, items in pattern.label_to_items.items():
is_a_preferred_label = pattern.is_better_label(label)
for item in items:
step = self.mallows.center.index(item)
if is_a_preferred_label:
new_l = min(span[0][0], step)
new_h = max(span[0][1], step)
span[0] = [new_l, new_h]
else:
new_l = min(span[1][0], step)
new_h = max(span[1][1], step)
span[1] = [new_l, new_h]
new_span = [[span[0][0], span[0][1]], [span[1][0], span[1][1]]]
pid_to_label_spans[pid] = new_span
num_states = 0
for step in range(m):
num_labels = 0
num_both_labels = 0
for span in pid_to_label_spans.values():
positions = 0
if span[0][0] <= step <= span[0][1]:
positions += 1
if span[1][0] <= step <= span[1][1]:
positions += 1
num_labels += positions
if positions == 2:
num_both_labels += 1
num_states += perm(step + 1, len(patterns) * 2) / (2 ** len(patterns))
return num_states
def pre_process(self):
for pid, pattern in enumerate(self.patterns):
better_items = pattern.get_items_in_label(pattern.better_label)
worse_items = pattern.get_items_in_label(pattern.worse_label)
sharing_items = better_items.intersection(worse_items)
if len(sharing_items) == 1:
sharing_item = sharing_items.pop()
self.pid_to_sharing_item[pid] = sharing_item
self.sharing_item_to_pids.setdefault(sharing_item, set()).add(pid)
elif len(sharing_items) == 2:
self.is_always_true = True
break
def solve(self, patterns: List[TwoLabelPattern], threads=None) -> Tuple[float, int]:
start_time = time() # timestamp in seconds since the Epoch
self.patterns = deepcopy(patterns)
self.pre_process() # calculate sharing items and self.is_always_true
if self.is_always_true:
return 1, 0
else:
self.item_to_pid_and_contributing_lh = self.calculate_item_to_pid_and_contributing_lh()
self.step_to_pids_to_stop_tracking_lhs = self.calculate_step_to_pids_to_stop_tracking_lhs()
max_step = max(self.step_to_pids_to_stop_tracking_lhs)
state_to_prob: Dict[State, float] = defaultdict(float)
threads = threads or cpu_count()
for step, item in enumerate(self.mallows.center[:max_step + 1]):
print(f'step {step} / {max_step}, #states = {len(state_to_prob)}')
# initialize state_to_prob by inserting the first item.
if step == 0:
init_state = State()
init_state = init_state.insert(step, item, 0, self)
state_to_prob[init_state] = 1
# after inserting the 1st item, insert the rest items by Dynamic Programming.
else:
tasks = list(state_to_prob.items())
pij_i = self.mallows.pij_matrix[step]
batched_tasks = [(tasks[i::threads], step, item, pij_i) for i in range(threads)]
with Pool(processes=threads) as pool:
res_list = pool.map(self.perform_a_batch_of_tasks, batched_tasks)
state_to_prob.clear()
for res in res_list:
for state, probs in res.items():
state_to_prob[state] += sum(probs)
runtime_ms = int(1000 * (time() - start_time))
return 1 - sum(state_to_prob.values()), runtime_ms
def perform_a_batch_of_tasks(self, task_batch):
state_and_prob_batch: Dict[State, float] = task_batch[0]
step, item, pij_i = task_batch[1:]
state_to_probs = defaultdict(list)
for state, prob in state_and_prob_batch:
for position in state.calculate_insertion_positions_iterator(step, item, self):
state_new = state.insert(step, item, position, self)
state_to_probs[state_new].append(prob * pij_i[position])
return state_to_probs
def calculate_item_to_pid_and_contributing_lh(self):
item_to_pid_and_contributing_lh: Dict[object, Set[Tuple[int, str]]] = {}
for pid, pattern in enumerate(self.patterns):
sharing_item = self.pid_to_sharing_item.get(pid, None)
for label, items in pattern.label_to_items.items():
is_better_label = pattern.is_better_label(label)
for item in items:
if item != sharing_item:
if is_better_label:
# it is positioned after the other label in negation, so it is a H bound
item_to_pid_and_contributing_lh.setdefault(item, set()).add((pid, 'H'))
else:
item_to_pid_and_contributing_lh.setdefault(item, set()).add((pid, 'L'))
return item_to_pid_and_contributing_lh
def calculate_step_to_pids_to_stop_tracking_lhs(self):
pid_to_finishing_steps: Dict[int, List[int]] = {}
for pid, pattern in enumerate(self.patterns):
pid_to_finishing_steps[pid] = [-1, -1]
for label, items in pattern.label_to_items.items():
is_better_label = pattern.is_better_label(label)
for item in items:
step = self.mallows.center.index(item)
if is_better_label:
pid_to_finishing_steps[pid][0] = max(pid_to_finishing_steps[pid][0], step)
else:
pid_to_finishing_steps[pid][1] = max(pid_to_finishing_steps[pid][1], step)
step_to_pids_to_stop_tracking_lhs = {}
for pid, [step_worse, step_better] in pid_to_finishing_steps.items():
step_to_pids_to_stop_tracking_lhs.setdefault(step_worse, {}).setdefault(pid, set()).add('L')
step_to_pids_to_stop_tracking_lhs.setdefault(step_better, {}).setdefault(pid, set()).add('H')
return step_to_pids_to_stop_tracking_lhs
class State(object):
def __init__(self):
self.positions = []
self.pid_to_lh_to_pos_rank = {}
def __eq__(self, other):
return isinstance(other, State) and \
(self.positions == other.positions) and \
(self.pid_to_lh_to_pos_rank == other.pid_to_lh_to_pos_rank)
def __hash__(self):
str_1 = str(self.positions)
str_2 = str([sorted(self.pid_to_lh_to_pos_rank[pid].items()) for pid in sorted(self.pid_to_lh_to_pos_rank)])
return hash((str_1, str_2))
def __str__(self):
rank_to_labels = defaultdict(set)
for pid, lh_to_rank in self.pid_to_lh_to_pos_rank.items():
if 'L' in lh_to_rank:
rank_to_labels[lh_to_rank['L']].add(f'{pid}B')
if 'H' in lh_to_rank:
rank_to_labels[lh_to_rank['H']].add(f'{pid}A')
pos, labels = [], []
for rank, sorted_labels in sorted(rank_to_labels.items()):
pos.append(str(self.positions[rank]))
labels.append('+'.join(sorted_labels))
return f"(pos)[{', '.join(pos)}] (label)[{', '.join(labels)}]"
def calculate_insertion_positions_iterator(self, step, item, inferencer: TwoLabelInferencer):
low, high = -1, len(self.positions)
if item in inferencer.item_to_pid_and_contributing_lh:
for pid, contributing_lh in inferencer.item_to_pid_and_contributing_lh[item]:
if contributing_lh == 'L' and self.is_tracking_pid_lh(pid, 'H'):
high = min(high, self.pid_to_lh_to_pos_rank[pid]['H'])
elif contributing_lh == 'H' and self.is_tracking_pid_lh(pid, 'L'):
low = max(low, self.pid_to_lh_to_pos_rank[pid]['L'])
for pid in inferencer.sharing_item_to_pids.get(item, set()):
if self.is_tracking_pid_lh(pid, 'H'):
high = min(high, self.pid_to_lh_to_pos_rank[pid]['H'])
if self.is_tracking_pid_lh(pid, 'L'):
low = max(low, self.pid_to_lh_to_pos_rank[pid]['L'])
if low < 0:
low_bound_pos = 0
else:
low_bound_pos = self.positions[low] + 1
if high < len(self.positions):
high_bound_pos = self.positions[high]
else:
high_bound_pos = step
return range(low_bound_pos, high_bound_pos + 1)
def insert(self, step, item, position, inferencer: TwoLabelInferencer):
state = deepcopy(self)
# index of insertion position in state.positions
pos_idx = bisect_left(self.positions, position)
# increase position values by 1 accordingly
for idx in range(pos_idx, len(self.positions)):
state.positions[idx] += 1
is_an_item_in_pattern = item in inferencer.item_to_pid_and_contributing_lh
is_a_sharing_item = item in inferencer.sharing_item_to_pids
if is_an_item_in_pattern or is_a_sharing_item:
# insert new pos into state.positions
state.positions.insert(pos_idx, position)
# increase rank values by 1 accordingly
for pid, tracking_ranks in self.pid_to_lh_to_pos_rank.items():
if 'L' in tracking_ranks and tracking_ranks['L'] >= pos_idx:
state.pid_to_lh_to_pos_rank[pid]['L'] += 1
if 'H' in tracking_ranks:
state.pid_to_lh_to_pos_rank[pid]['H'] += 1
elif 'H' in tracking_ranks and tracking_ranks['H'] >= pos_idx:
state.pid_to_lh_to_pos_rank[pid]['H'] += 1
# re-calculate representative label ranks
if is_an_item_in_pattern:
for pid, contributing_lh in inferencer.item_to_pid_and_contributing_lh[item]:
if contributing_lh == 'L':
if state.is_tracking_pid_lh(pid, 'L'):
current_l = state.pid_to_lh_to_pos_rank[pid]['L']
state.pid_to_lh_to_pos_rank[pid]['L'] = max(current_l, pos_idx)
else:
state.pid_to_lh_to_pos_rank.setdefault(pid, {})['L'] = pos_idx
else:
if state.is_tracking_pid_lh(pid, 'H'):
current_h = state.pid_to_lh_to_pos_rank[pid]['H']
state.pid_to_lh_to_pos_rank[pid]['H'] = min(current_h, pos_idx)
else:
state.pid_to_lh_to_pos_rank.setdefault(pid, {})['H'] = pos_idx
elif is_a_sharing_item:
for pid in inferencer.sharing_item_to_pids[item]:
if state.is_tracking_pid_lh(pid, 'L'):
current_l = state.pid_to_lh_to_pos_rank[pid]['L']
state.pid_to_lh_to_pos_rank[pid]['L'] = max(current_l, pos_idx)
else:
state.pid_to_lh_to_pos_rank.setdefault(pid, {})['L'] = pos_idx
if state.is_tracking_pid_lh(pid, 'H'):
current_h = state.pid_to_lh_to_pos_rank[pid]['H']
state.pid_to_lh_to_pos_rank[pid]['H'] = max(current_h, pos_idx)
else:
state.pid_to_lh_to_pos_rank.setdefault(pid, {})['H'] = pos_idx
if step in inferencer.step_to_pids_to_stop_tracking_lhs:
for pid, boundaries in inferencer.step_to_pids_to_stop_tracking_lhs[step].items():
if 'L' in boundaries and state.is_tracking_pid_lh(pid, 'L'):
del state.pid_to_lh_to_pos_rank[pid]['L']
if 'H' in boundaries and state.is_tracking_pid_lh(pid, 'H'):
del state.pid_to_lh_to_pos_rank[pid]['H']
if not state.pid_to_lh_to_pos_rank[pid]:
del state.pid_to_lh_to_pos_rank[pid]
state.compact()
return state
def compact(self):
"""
remove positions that no label is tracking.
"""
valid_ranks = set()
for tracking_ranks in self.pid_to_lh_to_pos_rank.values():
valid_ranks.update(tracking_ranks.values())
missing_ranks = [rank for rank in range(len(self.positions) - 1, -1, -1) if rank not in valid_ranks]
for missing_rank in missing_ranks:
self.positions.remove(self.positions[missing_rank])
for pid, tracking_ranks in self.pid_to_lh_to_pos_rank.items():
if 'L' in tracking_ranks and tracking_ranks['L'] > missing_rank:
self.pid_to_lh_to_pos_rank[pid]['L'] -= 1
if 'H' in tracking_ranks:
self.pid_to_lh_to_pos_rank[pid]['H'] -= 1
elif 'H' in tracking_ranks and tracking_ranks['H'] > missing_rank:
self.pid_to_lh_to_pos_rank[pid]['H'] -= 1
def is_tracking_pid_lh(self, pid, lh):
return pid in self.pid_to_lh_to_pos_rank and lh in self.pid_to_lh_to_pos_rank[pid]
|
the-stack_106_14968
|
"""
Test models
"""
from unittest import TestCase, main, skip
import numpy as np
from maml.models import AtomSets
from maml.describers import SiteElementProperty
class TestAtomSets(TestCase):
x = np.array([[0, 1, 0, 1, 0, 1]], dtype=np.int32).reshape((1, -1))
x_vec = np.random.normal(size=(1, 6, 20))
indices = np.array([[0, 0, 0, 1, 1, 1]], dtype=np.int32).reshape((1, -1))
y = np.array([[0.1, 0.2]]).reshape((1, 2, 1))
model1 = AtomSets(
describer=SiteElementProperty(),
is_embedding=True,
symmetry_func="mean",
n_neurons=(8, 8),
n_neurons_final=(4, 4),
n_targets=1,
)
model2 = AtomSets(
input_dim=20,
is_embedding=False,
symmetry_func="set2set",
n_neurons=(4, 4),
n_neurons_final=(4, 4),
T=2,
n_hidden=10,
)
def test_predict(self):
res = self.model1.predict_objs(["H2O", "FeO"])
print(res.shape, " res.shape")
self.assertTrue(res.shape == (2, 1))
res3 = self.model2.model.predict([self.x_vec, np.ones_like(self.indices), self.indices])
self.assertTrue(res3.shape == (1, 2, 1))
if __name__ == "__main__":
main()
|
the-stack_106_14973
|
# -*- coding: UTF-8 -*-
##Author Igor Támara [email protected]
##Use this little program as you wish, if you
#include it in your work, let others know you
#are using it preserving this note, you have
#the right to make derivative works, Use it
#at your own risk.
#Tested to work on(etch testing 13-08-2007):
# Python 2.4.4 (#2, Jul 17 2007, 11:56:54)
# [GCC 4.1.3 20070629 (prerelease) (Debian 4.1.2-13)] on linux2
dependclasses = ["User", "Group", "Permission", "Message"]
import re
import six
import sys
import gzip
import codecs
from xml.dom.minidom import * # NOQA
#Type dictionary translation types SQL -> Django
tsd = {
"text": "TextField",
"date": "DateField",
"varchar": "CharField",
"int": "IntegerField",
"float": "FloatField",
"serial": "AutoField",
"boolean": "BooleanField",
"numeric": "FloatField",
"timestamp": "DateTimeField",
"bigint": "IntegerField",
"datetime": "DateTimeField",
"date": "DateField",
"time": "TimeField",
"bool": "BooleanField",
"int": "IntegerField",
}
#convert varchar -> CharField
v2c = re.compile('varchar\((\d+)\)')
def index(fks, id):
"""Looks for the id on fks, fks is an array of arrays, each array has on [1]
the id of the class in a dia diagram. When not present returns None, else
it returns the position of the class with id on fks"""
for i, j in fks.items():
if fks[i][1] == id:
return i
return None
def addparentstofks(rels, fks):
"""Gets a list of relations, between parents and sons and a dict of
clases named in dia, and modifies the fks to add the parent as fk to get
order on the output of classes and replaces the base class of the son, to
put the class parent name.
"""
for j in rels:
son = index(fks, j[1])
parent = index(fks, j[0])
fks[son][2] = fks[son][2].replace("models.Model", parent)
if parent not in fks[son][0]:
fks[son][0].append(parent)
def dia2django(archivo):
models_txt = ''
f = codecs.open(archivo, "rb")
#dia files are gzipped
data = gzip.GzipFile(fileobj=f).read()
ppal = parseString(data)
#diagram -> layer -> object -> UML - Class -> name, (attribs : composite -> name,type)
datos = ppal.getElementsByTagName("dia:diagram")[0].getElementsByTagName("dia:layer")[0].getElementsByTagName("dia:object")
clases = {}
herit = []
imports = six.u("")
for i in datos:
#Look for the classes
if i.getAttribute("type") == "UML - Class":
myid = i.getAttribute("id")
for j in i.childNodes:
if j.nodeType == Node.ELEMENT_NODE and j.hasAttributes():
if j.getAttribute("name") == "name":
actclas = j.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
myname = "\nclass %s(models.Model) :\n" % actclas
clases[actclas] = [[], myid, myname, 0]
if j.getAttribute("name") == "attributes":
for l in j.getElementsByTagName("dia:composite"):
if l.getAttribute("type") == "umlattribute":
#Look for the attribute name and type
for k in l.getElementsByTagName("dia:attribute"):
if k.getAttribute("name") == "name":
nc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "type":
tc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "value":
val = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
if val == '##':
val = ''
elif k.getAttribute("name") == "visibility" and k.getElementsByTagName("dia:enum")[0].getAttribute("val") == "2":
if tc.replace(" ", "").lower().startswith("manytomanyfield("):
#If we find a class not in our model that is marked as being to another model
newc = tc.replace(" ", "")[16:-1]
if dependclasses.count(newc) == 0:
dependclasses.append(newc)
if tc.replace(" ", "").lower().startswith("foreignkey("):
#If we find a class not in our model that is marked as being to another model
newc = tc.replace(" ", "")[11:-1]
if dependclasses.count(newc) == 0:
dependclasses.append(newc)
#Mapping SQL types to Django
varch = v2c.search(tc)
if tc.replace(" ", "").startswith("ManyToManyField("):
myfor = tc.replace(" ", "")[16:-1]
if actclas == myfor:
#In case of a recursive type, we use 'self'
tc = tc.replace(myfor, "'self'")
elif clases[actclas][0].count(myfor) == 0:
#Adding related class
if myfor not in dependclasses:
#In case we are using Auth classes or external via protected dia visibility
clases[actclas][0].append(myfor)
tc = "models." + tc
if len(val) > 0:
tc = tc.replace(")", "," + val + ")")
elif tc.find("Field") != -1:
if tc.count("()") > 0 and len(val) > 0:
tc = "models.%s" % tc.replace(")", "," + val + ")")
else:
tc = "models.%s(%s)" % (tc, val)
elif tc.replace(" ", "").startswith("ForeignKey("):
myfor = tc.replace(" ", "")[11:-1]
if actclas == myfor:
#In case of a recursive type, we use 'self'
tc = tc.replace(myfor, "'self'")
elif clases[actclas][0].count(myfor) == 0:
#Adding foreign classes
if myfor not in dependclasses:
#In case we are using Auth classes
clases[actclas][0].append(myfor)
tc = "models." + tc
if len(val) > 0:
tc = tc.replace(")", "," + val + ")")
elif varch is None:
tc = "models." + tsd[tc.strip().lower()] + "(" + val + ")"
else:
tc = "models.CharField(max_length=" + varch.group(1) + ")"
if len(val) > 0:
tc = tc.replace(")", ", " + val + " )")
if not (nc == "id" and tc == "AutoField()"):
clases[actclas][2] = clases[actclas][2] + (" %s = %s\n" % (nc, tc))
elif i.getAttribute("type") == "UML - Generalization":
mycons = ['A', 'A']
a = i.getElementsByTagName("dia:connection")
for j in a:
if len(j.getAttribute("to")):
mycons[int(j.getAttribute("handle"))] = j.getAttribute("to")
print(mycons)
if 'A' not in mycons:
herit.append(mycons)
elif i.getAttribute("type") == "UML - SmallPackage":
a = i.getElementsByTagName("dia:string")
for j in a:
if len(j.childNodes[0].data[1:-1]):
imports += six.u("from %s.models import *" % j.childNodes[0].data[1:-1])
addparentstofks(herit, clases)
#Ordering the appearance of classes
#First we make a list of the classes each classs is related to.
ordered = []
for j, k in six.iteritems(clases):
k[2] = k[2] + "\n def __unicode__(self):\n return u\"\"\n"
for fk in k[0]:
if fk not in dependclasses:
clases[fk][3] += 1
ordered.append([j] + k)
i = 0
while i < len(ordered):
mark = i
j = i + 1
while j < len(ordered):
if ordered[i][0] in ordered[j][1]:
mark = j
j += 1
if mark == i:
i += 1
else:
# swap %s in %s" % ( ordered[i] , ordered[mark]) to make ordered[i] to be at the end
if ordered[i][0] in ordered[mark][1] and ordered[mark][0] in ordered[i][1]:
#Resolving simplistic circular ForeignKeys
print("Not able to resolve circular ForeignKeys between %s and %s" % (ordered[i][1], ordered[mark][0]))
break
a = ordered[i]
ordered[i] = ordered[mark]
ordered[mark] = a
if i == len(ordered) - 1:
break
ordered.reverse()
if imports:
models_txt = str(imports)
for i in ordered:
models_txt += '%s\n' % str(i[3])
return models_txt
if __name__ == '__main__':
if len(sys.argv) == 2:
dia2django(sys.argv[1])
else:
print(" Use:\n \n " + sys.argv[0] + " diagram.dia\n\n")
|
the-stack_106_14975
|
""" Auto Augment
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
Papers: https://arxiv.org/abs/1805.09501 and https://arxiv.org/abs/1906.11172
Hacked together by Ross Wightman
"""
import random
import math
from PIL import Image, ImageOps, ImageEnhance
import PIL
import numpy as np
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
_RANDOM_INTERPOLATION = (Image.NEAREST, Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.NEAREST)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
bits_to_keep = max(1, bits_to_keep) # prevent all 0 images
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level):
# range [-0.45, 0.45]
level = (level / _MAX_LEVEL) * 0.45
level = _randomly_negate(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
# FIXME these are both different from original impl as I believe there is a bug,
# not sure what is the correct alternative, hence 2 options that look better
'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4) + 4,), # range [4, 8]
'Posterize2': lambda level: (4 - int((level / _MAX_LEVEL) * 4),), # range [4, 0]
'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),), # range [0, 256]
'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),), # range [0, 110]
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'TranslateX': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
'TranslateY': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
'TranslateXRel': lambda level: _translate_rel_level_to_arg(level),
'TranslateYRel': lambda level: _translate_rel_level_to_arg(level),
}
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Posterize2': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
}
class AutoAugmentOp:
def __init__(self, name, prob, magnitude, hparams={}):
self.aug_fn = NAME_TO_OP[name]
self.level_fn = level_to_arg(hparams)[name]
self.prob = prob
self.magnitude = magnitude
# If std deviation of magnitude is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from normal dist
# with mean magnitude and std-dev of magnitude_std.
# NOTE This is being tested as it's not in paper or reference impl.
self.magnitude_std = 0.5 # FIXME add arg/hparam
self.kwargs = {
'fillcolor': hparams['img_mean'] if 'img_mean' in hparams else _FILL,
'resample': hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION
}
def __call__(self, img):
if self.prob < random.random():
return img
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = self.level_fn(magnitude)
return self.aug_fn(img, *level_args, **self.kwargs)
def auto_augment_policy_v0(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from TPU EfficientNet impl, cannot find
# a paper reference.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_original(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from https://arxiv.org/abs/1805.09501
policy = [
[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('Posterize', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('Posterize', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy(name='v0', hparams=_HPARAMS_DEFAULT):
if name == 'original':
return auto_augment_policy_original(hparams)
elif name == 'v0':
return auto_augment_policy_v0(hparams)
else:
assert False, 'Unknown AA policy (%s)' % name
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img
|
the-stack_106_14976
|
import tensorflow as tf
import numpy as np
import tflib as lib
import tflib.ops.linear
import tflib.ops.cond_batchnorm
import tflib.ops.conv2d
import tflib.ops.deconv2d
import tflib.ops.batchnorm
import tflib.ops.layernorm
import tflib.ops.concat
import functools
def nonlinearity(x):
return tf.nn.relu(x)
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
# for 32p initialization was not set!
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs, initialization='he')
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs, initialization='he')
return LeakyReLU(output)
def Normalize(cfg, name, inputs, labels=None, is_training=True):
"""This is messy, but basically it chooses between batchnorm, layernorm,
their conditional variants, or nothing, depending on the value of `name` and
the global hyperparam flags."""
if (not cfg.CONDITIONAL) or cfg.LAYER_COND:
labels = None
if cfg.CONDITIONAL and cfg.ACGAN and ('Discriminator' in name):
labels = None
if ('Discriminator' in name) and cfg.NORMALIZATION_D:
if labels is not None:
# todo: fix (does not work)
# return lib.ops.layernorm.Layernorm_cond(name,[1,2,3],inputs,labels=labels,n_labels=N_LABELS)
return lib.ops.cond_batchnorm.Batchnorm(name, [0, 2, 3], inputs, labels=labels, n_labels=cfg.N_LABELS)
elif cfg.MODE == 'wgan-gp':
return lib.ops.layernorm.Layernorm(name,[1,2,3],inputs)
else:
return tf.layers.batch_normalization(inputs, axis=1, training=is_training, fused=True)
elif ('Generator' in name) and cfg.NORMALIZATION_G:
if labels is not None:
return lib.ops.cond_batchnorm.Batchnorm(name, [0,2,3], inputs,labels=labels, n_labels=cfg.N_LABELS)
else:
# return lib.ops.batchnorm.Batchnorm(name,[0,2,3], inputs, fused=True,
# is_training=is_training, stats_iter=stats_iter,
# update_moving_stats=update_moving_stats)
return tf.layers.batch_normalization(inputs, axis=1, training=is_training, fused=True)
else:
return inputs
def pixcnn_gated_nonlinearity(a, b):
return tf.sigmoid(a) * tf.tanh(b)
def SubpixelConv2D(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
return output
def ConvMeanPool(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=he_init, biases=biases)
output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.
return output
def MeanPoolConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4. # half the size and averaging
output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
return output
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = tf.concat([output, output, output, output], axis=1)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
return output
def ResidualBlock(cfg, name, input_dim, output_dim, filter_size, inputs, resample=None, no_dropout=False, labels=None,
is_training=True):
"""
resample: None, 'down', or 'up'
"""
if cfg.LAYER_COND:
y = labels
add_dim = cfg.N_LABELS
yb = tf.reshape(y, [-1, cfg.N_LABELS, 1, 1]) # 128 channel 1x1
else:
add_dim = 0
if resample=='down':
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim + add_dim, output_dim=input_dim)
conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim + add_dim, output_dim=output_dim)
conv_shortcut = ConvMeanPool
elif resample=='up':
conv_1 = functools.partial(UpsampleConv, input_dim=input_dim + add_dim, output_dim=output_dim)
conv_shortcut = UpsampleConv
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim + add_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim + add_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim + add_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, he_init=False, biases=True, inputs=inputs)
output = inputs
output = Normalize(cfg, name+'.N1', output, labels=labels, is_training=is_training)
output = nonlinearity(output)
if cfg.LAYER_COND:
output = tflib.ops.concat.conv_cond_concat(output, yb) # after been norm and nonlinearity process,append labels to latent space
output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output)
output = Normalize(cfg, name+'.N2', output, labels=labels, is_training=is_training)
output = nonlinearity(output)
if cfg.LAYER_COND:
output = tflib.ops.concat.conv_cond_concat(output, yb)
output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output)
return shortcut + output # at last,1x1 output is add to 3x3 output,in elementwise
def OptimizedResBlockDisc1(cfg, inputs, labels):
if cfg.LAYER_COND:
y = labels
add_dim = cfg.N_LABELS
yb = tf.reshape(y, [-1, cfg.N_LABELS, 1, 1])
else:
add_dim = 0
#(function name + bond to some parameters,which will simplies the applying later):
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=3 + add_dim, output_dim=cfg.DIM_D) #because image is 3 channel,RBG
conv_2 = functools.partial(ConvMeanPool, input_dim=cfg.DIM_D + add_dim, output_dim=cfg.DIM_D)
conv_shortcut = MeanPoolConv
shortcut = conv_shortcut('Discriminator.1.Shortcut', input_dim=3, output_dim=cfg.DIM_D, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
if cfg.LAYER_COND:
output = tflib.ops.concat.conv_cond_concat(output, yb)
output = conv_1('Discriminator.1.Conv1', filter_size=3, inputs=output)
output = nonlinearity(output)
if cfg.LAYER_COND:
output = tflib.ops.concat.conv_cond_concat(output, yb)
output = conv_2('Discriminator.1.Conv2', filter_size=3, inputs=output)
return shortcut + output
def BottleneckResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim/2)
conv_1b = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim/2, output_dim=output_dim/2, stride=2)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim/2, output_dim=output_dim)
elif resample=='up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim/2)
conv_1b = functools.partial(lib.ops.deconv2d.Deconv2D, input_dim=input_dim/2, output_dim=output_dim/2)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim/2, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim/2)
conv_1b = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim/2, output_dim=output_dim/2)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim/2, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample is None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=1, inputs=output, he_init=he_init)
output = tf.nn.relu(output)
output = conv_1b(name+'.Conv1B', filter_size=filter_size, inputs=output, he_init=he_init)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=1, inputs=output, he_init=he_init, biases=False)
output = Normalize(name+'.BN', [0,2,3], output)
return shortcut + (0.3*output)
def ScaledUpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = lib.ops.concat.concat([output, output, output, output], axis=1)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases, gain=0.5)
return output
|
the-stack_106_14977
|
"""ClipTools clipboard manager and text processing tools
with a lines based GUI interface
Data loader, search for available personal data.
WARNING, python file will be executed!
When making python personal file, take care not allow uncontrolled changes!
yaml is safer from this point of view.
Note: logging is not part of ClipTools yet. Only minimalistic feedback is give.
If personal txt file not found then tool will silently read the default.
If file load has an error then a print to a console (if available) is given.
"""
import pathlib
from .. import config
def load_data():
"""Load available personal or sample text data
data is an OrderedDict or similar structure,
where keys are the names, values are list of texts.
"""
ext_data = pathlib.Path(config.EXTERNAL_DATA)
if not ext_data.is_absolute():
ext_data = pathlib.Path.home() / ext_data
if ext_data.exists():
try:
return load_ext_data(ext_data)
except Exception as exc: # pylint: disable=broad-except
# fallback to sample data
print('Cannot load: {}, exception: {}'.format(config.EXTERNAL_DATA, exc))
return load_sample_data()
def load_ext_data(ext_data):
"""Load external data, raise exception if something is not ok."""
if ext_data.suffix.lower() == '.py':
return load_ext_py_data(ext_data)
if ext_data.suffix.lower() == '.yml':
return load_ext_yml_data(ext_data)
raise RuntimeError('Type not supported')
def load_ext_py_data(ext_data):
"""Load external python data.
WARNING, python file will be executed, take care not allow uncontrolled changes!
raise exception if something is not ok."""
content = ext_data.read_text(encoding='utf-8')
glo = dict()
loc = dict()
exec(content, glo, loc) # pylint: disable=exec-used
return loc['DEFINED_TEXTS']
def load_ext_yml_data(ext_data):
"""Load external yaml data,
raise exception if something is not ok."""
import strictyaml # pylint: disable=import-outside-toplevel
content = ext_data.read_text(encoding='utf-8')
return strictyaml.load(content).data
def load_sample_data():
"""Load provided sample data"""
try:
from .. import text_data # pylint: disable=import-outside-toplevel
except Exception: # pylint: disable=broad-except
# No data at all, return an empty dictionary
return dict()
return text_data.DEFINED_TEXTS
|
the-stack_106_14979
|
#!/usr/bin/python3
import importlib
import json
import os
import re
import shutil
import sys
import warnings
import zipfile
from base64 import b64encode
from hashlib import sha1
from io import BytesIO
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, Iterator, KeysView, List, Optional, Set, Tuple, Union
from urllib.parse import urlparse
import requests
import yaml
from semantic_version import Version
from solcx.exceptions import SolcNotInstalled
from tqdm import tqdm
from vvm.exceptions import VyperNotInstalled
from brownie._config import (
CONFIG,
REQUEST_HEADERS,
_get_data_folder,
_load_project_compiler_config,
_load_project_config,
_load_project_dependencies,
_load_project_envvars,
_load_project_structure_config,
)
from brownie._expansion import expand_posix_vars
from brownie.exceptions import (
BadProjectName,
BrownieEnvironmentWarning,
InvalidPackage,
PragmaError,
ProjectAlreadyLoaded,
ProjectNotFound,
)
from brownie.network import web3
from brownie.network.contract import (
Contract,
ContractContainer,
InterfaceContainer,
ProjectContract,
)
from brownie.network.state import _add_contract, _remove_contract, _revert_register
from brownie.project import compiler, ethpm
from brownie.project.build import BUILD_KEYS, INTERFACE_KEYS, Build
from brownie.project.ethpm import get_deployment_addresses, get_manifest
from brownie.project.sources import Sources, get_pragma_spec
from brownie.utils import notify
BUILD_FOLDERS = ["contracts", "deployments", "interfaces"]
MIXES_URL = "https://github.com/brownie-mix/{}-mix/archive/{}.zip"
GITIGNORE = """__pycache__
.env
.history
.hypothesis/
build/
reports/
"""
GITATTRIBUTES = """*.sol linguist-language=Solidity
*.vy linguist-language=Python
"""
_loaded_projects = []
class _ProjectBase:
_path: Optional[Path]
_build_path: Optional[Path]
_sources: Sources
_build: Build
def _compile(self, contract_sources: Dict, compiler_config: Dict, silent: bool) -> None:
compiler_config.setdefault("solc", {})
allow_paths = None
cwd = os.getcwd()
if self._path is not None:
_install_dependencies(self._path)
allow_paths = self._path.as_posix()
os.chdir(self._path)
try:
build_json = compiler.compile_and_format(
contract_sources,
solc_version=compiler_config["solc"].get("version", None),
vyper_version=compiler_config["vyper"].get("version", None),
optimize=compiler_config["solc"].get("optimize", None),
runs=compiler_config["solc"].get("runs", None),
evm_version=compiler_config["evm_version"],
silent=silent,
allow_paths=allow_paths,
remappings=compiler_config["solc"].get("remappings", []),
optimizer=compiler_config["solc"].get("optimizer", None),
)
finally:
os.chdir(cwd)
for alias, data in build_json.items():
if self._build_path is not None and not data["sourcePath"].startswith("interface"):
# interfaces should generate artifact in /build/interfaces/ not /build/contracts/
if alias == data["contractName"]:
# if the alias == contract name, this is a part of the core project
path = self._build_path.joinpath(f"contracts/{alias}.json")
else:
# otherwise, this is an artifact from an external dependency
path = self._build_path.joinpath(f"contracts/dependencies/{alias}.json")
for parent in list(path.parents)[::-1]:
parent.mkdir(exist_ok=True)
with path.open("w") as fp:
json.dump(data, fp, sort_keys=True, indent=2, default=sorted)
if alias == data["contractName"]:
# only add artifacts from the core project for now
self._build._add_contract(data)
def _create_containers(self) -> None:
# create container objects
self.interface = InterfaceContainer(self)
self._containers: Dict = {}
for key, data in self._build.items():
if data["type"] == "interface":
self.interface._add(data["contractName"], data["abi"])
if data.get("bytecode"):
container = ContractContainer(self, data)
self._containers[key] = container
setattr(self, container._name, container)
def __getitem__(self, key: str) -> ContractContainer:
return self._containers[key]
def __iter__(self) -> Iterator[ContractContainer]:
return iter(self._containers[i] for i in sorted(self._containers))
def __len__(self) -> int:
return len(self._containers)
def __contains__(self, item: ContractContainer) -> bool:
return item in self._containers
def dict(self) -> Dict:
return dict(self._containers)
def keys(self) -> KeysView[Any]:
return self._containers.keys()
class Project(_ProjectBase):
"""
Top level dict-like container that holds data and objects related to
a brownie project.
Attributes:
_path: Path object, absolute path to the project
_name: Name that the project is loaded as
_sources: project Source object
_build: project Build object
"""
def __init__(self, name: str, project_path: Path) -> None:
self._path: Path = project_path
self._envvars = _load_project_envvars(project_path)
self._structure = expand_posix_vars(
_load_project_structure_config(project_path), self._envvars
)
self._build_path: Path = project_path.joinpath(self._structure["build"])
self._name = name
self._active = False
self.load()
def load(self) -> None:
"""Compiles the project contracts, creates ContractContainer objects and
populates the namespace."""
if self._active:
raise ProjectAlreadyLoaded("Project is already active")
contract_sources = _load_sources(self._path, self._structure["contracts"], False)
interface_sources = _load_sources(self._path, self._structure["interfaces"], True)
self._sources = Sources(contract_sources, interface_sources)
self._build = Build(self._sources)
contract_list = self._sources.get_contract_list()
for path in list(self._build_path.glob("contracts/*.json")):
try:
with path.open() as fp:
build_json = json.load(fp)
except json.JSONDecodeError:
build_json = {}
if not set(BUILD_KEYS).issubset(build_json) or path.stem not in contract_list:
path.unlink()
continue
if isinstance(build_json["allSourcePaths"], list):
# this handles the format change in v1.7.0, it can be removed in a future release
path.unlink()
test_path = self._build_path.joinpath("tests.json")
if test_path.exists():
test_path.unlink()
continue
if not self._path.joinpath(build_json["sourcePath"]).exists():
path.unlink()
continue
self._build._add_contract(build_json)
interface_hashes = {}
interface_list = self._sources.get_interface_list()
for path in list(self._build_path.glob("interfaces/*.json")):
try:
with path.open() as fp:
build_json = json.load(fp)
except json.JSONDecodeError:
build_json = {}
if not set(INTERFACE_KEYS).issubset(build_json) or path.stem not in interface_list:
path.unlink()
continue
self._build._add_interface(build_json)
interface_hashes[path.stem] = build_json["sha1"]
self._compiler_config = expand_posix_vars(
_load_project_compiler_config(self._path), self._envvars
)
# compile updated sources, update build
changed = self._get_changed_contracts(interface_hashes)
self._compile(changed, self._compiler_config, False)
self._compile_interfaces(interface_hashes)
self._load_dependency_artifacts()
self._create_containers()
self._load_deployments()
# add project to namespaces, apply import blackmagic
name = self._name
self.__all__ = list(self._containers) + ["interface"]
sys.modules[f"brownie.project.{name}"] = self # type: ignore
sys.modules["brownie.project"].__dict__[name] = self
sys.modules["brownie.project"].__all__.append(name) # type: ignore
sys.modules["brownie.project"].__console_dir__.append(name) # type: ignore
self._namespaces = [
sys.modules["__main__"].__dict__,
sys.modules["brownie.project"].__dict__,
]
# register project for revert and reset
_revert_register(self)
self._active = True
_loaded_projects.append(self)
def _get_changed_contracts(self, compiled_hashes: Dict) -> Dict:
# get list of changed interfaces and contracts
new_hashes = self._sources.get_interface_hashes()
# remove outdated build artifacts
for name in [k for k, v in new_hashes.items() if compiled_hashes.get(k, None) != v]:
self._build._remove_interface(name)
contracts = set(i for i in self._sources.get_contract_list() if self._compare_build_json(i))
for contract_name in list(contracts):
contracts.update(self._build.get_dependents(contract_name))
# remove outdated build artifacts
for name in contracts:
self._build._remove_contract(name)
# get final list of changed source paths
changed_set: Set = set(self._sources.get_source_path(i) for i in contracts)
return {i: self._sources.get(i) for i in changed_set}
def _compare_build_json(self, contract_name: str) -> bool:
config = self._compiler_config
# confirm that this contract was previously compiled
try:
source = self._sources.get(contract_name)
build_json = self._build.get(contract_name)
except KeyError:
return True
# compare source hashes
if build_json["sha1"] != sha1(source.encode()).hexdigest():
return True
# compare compiler settings
if _compare_settings(config, build_json["compiler"]):
return True
if build_json["language"] == "Solidity":
# compare solc-specific compiler settings
solc_config = config["solc"].copy()
solc_config["remappings"] = None
if _compare_settings(solc_config, build_json["compiler"]):
return True
# compare solc pragma against compiled version
if Version(build_json["compiler"]["version"]) not in get_pragma_spec(source):
return True
return False
def _compile_interfaces(self, compiled_hashes: Dict) -> None:
new_hashes = self._sources.get_interface_hashes()
changed_paths = [
self._sources.get_source_path(k, True)
for k, v in new_hashes.items()
if compiled_hashes.get(k, None) != v
]
if not changed_paths:
return
print("Generating interface ABIs...")
changed_sources = {i: self._sources.get(i) for i in changed_paths}
abi_json = compiler.get_abi(
changed_sources,
solc_version=self._compiler_config["solc"].get("version", None),
allow_paths=self._path.as_posix(),
remappings=self._compiler_config["solc"].get("remappings", []),
)
for name, abi in abi_json.items():
with self._build_path.joinpath(f"interfaces/{name}.json").open("w") as fp:
json.dump(abi, fp, sort_keys=True, indent=2, default=sorted)
self._build._add_interface(abi)
def _load_dependency_artifacts(self) -> None:
dep_build_path = self._build_path.joinpath("contracts/dependencies/")
for path in list(dep_build_path.glob("**/*.json")):
contract_alias = path.relative_to(dep_build_path).with_suffix("").as_posix()
if self._build.get_dependents(contract_alias):
with path.open() as fp:
build_json = json.load(fp)
self._build._add_contract(build_json, contract_alias)
else:
path.unlink()
def _load_deployments(self) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
path = self._build_path.joinpath(f"deployments/{chainid}")
path.mkdir(exist_ok=True)
deployments = list(path.glob("*.json"))
deployments.sort(key=lambda k: k.stat().st_mtime)
deployment_map = self._load_deployment_map()
for build_json in deployments:
with build_json.open() as fp:
build = json.load(fp)
contract_name = build["contractName"]
if contract_name not in self._containers:
build_json.unlink()
continue
if "pcMap" in build:
contract = ProjectContract(self, build, build_json.stem)
else:
contract = Contract.from_abi( # type: ignore
contract_name, build_json.stem, build["abi"]
)
contract._project = self
container = self._containers[contract_name]
_add_contract(contract)
container._contracts.append(contract)
# update deployment map for the current chain
instances = deployment_map.setdefault(chainid, {}).setdefault(contract_name, [])
if build_json.stem in instances:
instances.remove(build_json.stem)
instances.insert(0, build_json.stem)
self._save_deployment_map(deployment_map)
def _load_deployment_map(self) -> Dict:
deployment_map: Dict = {}
map_path = self._build_path.joinpath("deployments/map.json")
if map_path.exists():
with map_path.open("r") as fp:
deployment_map = json.load(fp)
return deployment_map
def _save_deployment_map(self, deployment_map: Dict) -> None:
with self._build_path.joinpath("deployments/map.json").open("w") as fp:
json.dump(deployment_map, fp, sort_keys=True, indent=2, default=sorted)
def _remove_from_deployment_map(self, contract: ProjectContract) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_map = self._load_deployment_map()
try:
deployment_map[chainid][contract._name].remove(contract.address)
if not deployment_map[chainid][contract._name]:
del deployment_map[chainid][contract._name]
if not deployment_map[chainid]:
del deployment_map[chainid]
except (KeyError, ValueError):
pass
self._save_deployment_map(deployment_map)
def _add_to_deployment_map(self, contract: ProjectContract) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_map = self._load_deployment_map()
try:
deployment_map[chainid][contract._name].remove(contract.address)
except (ValueError, KeyError):
pass
deployment_map.setdefault(chainid, {}).setdefault(contract._name, []).insert(
0, contract.address
)
self._save_deployment_map(deployment_map)
def _update_and_register(self, dict_: Any) -> None:
dict_.update(self)
if "interface" not in dict_:
dict_["interface"] = self.interface
self._namespaces.append(dict_)
def _add_to_main_namespace(self) -> None:
# temporarily adds project objects to the main namespace
brownie: Any = sys.modules["brownie"]
if "interface" not in brownie.__dict__:
brownie.__dict__["interface"] = self.interface
brownie.__dict__.update(self._containers)
brownie.__all__.extend(self.__all__)
def _remove_from_main_namespace(self) -> None:
# removes project objects from the main namespace
brownie: Any = sys.modules["brownie"]
if brownie.__dict__.get("interface") == self.interface:
del brownie.__dict__["interface"]
for key in self._containers:
brownie.__dict__.pop(key, None)
for key in self.__all__:
if key in brownie.__all__:
brownie.__all__.remove(key)
def __repr__(self) -> str:
return f"<Project '{self._name}'>"
def load_config(self) -> None:
"""Loads the project config file settings"""
if isinstance(self._path, Path):
_load_project_config(self._path)
def close(self, raises: bool = True) -> None:
"""Removes pointers to the project's ContractContainer objects and this object."""
if not self._active:
if not raises:
return
raise ProjectNotFound("Project is not currently loaded.")
# remove objects from namespace
for dict_ in self._namespaces:
for key in [
k
for k, v in dict_.items()
if v == self or (k in self and v == self[k]) # type: ignore
]:
del dict_[key]
# remove contracts
for contract in [x for v in self._containers.values() for x in v._contracts]:
_remove_contract(contract)
for container in self._containers.values():
container._contracts.clear()
self._containers.clear()
# undo black-magic
self._remove_from_main_namespace()
name = self._name
del sys.modules[f"brownie.project.{name}"]
sys.modules["brownie.project"].__all__.remove(name) # type: ignore
sys.modules["brownie.project"].__console_dir__.remove(name) # type: ignore
self._active = False
_loaded_projects.remove(self)
# clear paths
try:
sys.path.remove(str(self._path))
except ValueError:
pass
def _clear_dev_deployments(self, height: int) -> None:
path = self._build_path.joinpath("deployments/dev")
if path.exists():
deployment_map = self._load_deployment_map()
for deployment in path.glob("*.json"):
if height == 0:
deployment.unlink()
else:
with deployment.open("r") as fp:
deployment_artifact = json.load(fp)
block_height = deployment_artifact["deployment"]["blockHeight"]
address = deployment_artifact["deployment"]["address"]
contract_name = deployment_artifact["contractName"]
if block_height > height:
deployment.unlink()
try:
deployment_map["dev"][contract_name].remove(address)
except (KeyError, ValueError):
pass
if "dev" in deployment_map and (height == 0 or not deployment_map["dev"]):
del deployment_map["dev"]
shutil.rmtree(path)
self._save_deployment_map(deployment_map)
def _revert(self, height: int) -> None:
self._clear_dev_deployments(height)
def _reset(self) -> None:
self._clear_dev_deployments(0)
class TempProject(_ProjectBase):
"""Simplified Project class used to hold temporary contracts that are
compiled via project.compile_source"""
def __init__(self, name: str, contract_sources: Dict, compiler_config: Dict) -> None:
self._path = None
self._build_path = None
self._name = name
self._sources = Sources(contract_sources, {})
self._build = Build(self._sources)
self._compile(contract_sources, compiler_config, True)
self._create_containers()
def __repr__(self) -> str:
return f"<TempProject '{self._name}'>"
def check_for_project(path: Union[Path, str] = ".") -> Optional[Path]:
"""Checks for a Brownie project."""
path = Path(path).resolve()
for folder in [path] + list(path.parents):
structure_config = _load_project_structure_config(folder)
contracts = folder.joinpath(structure_config["contracts"])
interfaces = folder.joinpath(structure_config["interfaces"])
scripts = folder.joinpath(structure_config["scripts"])
tests = folder.joinpath(structure_config["tests"])
if next((i for i in contracts.glob("**/*") if i.suffix in (".vy", ".sol")), None):
return folder
if next((i for i in interfaces.glob("**/*") if i.suffix in (".json", ".vy", ".sol")), None):
return folder
if next((i for i in scripts.glob("**/*") if i.suffix in (".py")), None):
return folder
if contracts.is_dir() and tests.is_dir():
return folder
return None
def get_loaded_projects() -> List["Project"]:
"""Returns a list of currently loaded Project objects."""
return _loaded_projects.copy()
def new(
project_path_str: str = ".", ignore_subfolder: bool = False, ignore_existing: bool = False
) -> str:
"""Initializes a new project.
Args:
project_path: Path to initialize the project at. If not exists, it will be created.
ignore_subfolder: (deprecated)
ignore_existing: If True, will not raise when initiating in a non-empty directory.
Returns the path to the project as a string.
"""
project_path = Path(project_path_str).resolve()
if not ignore_existing and project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Directory is not empty: {project_path}")
project_path.mkdir(exist_ok=True)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_brownie_mix(
project_name: str, project_path: Union[Path, str] = None, ignore_subfolder: bool = False
) -> str:
"""Initializes a new project via a template. Templates are downloaded from
https://www.github.com/brownie-mix
Args:
project_path: Path to initialize the project at.
ignore_subfolders: (deprecated)
Returns the path to the project as a string.
"""
project_name = str(project_name).lower().replace("-mix", "")
headers = REQUEST_HEADERS.copy()
headers.update(_maybe_retrieve_github_auth())
default_branch = _get_mix_default_branch(project_name, headers)
url = MIXES_URL.format(project_name, default_branch)
if project_path is None:
project_path = Path(".").joinpath(project_name)
project_path = Path(project_path).resolve()
if project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Folder already exists - {project_path}")
print(f"Downloading from {url}...")
_stream_download(url, str(project_path.parent), headers)
project_path.parent.joinpath(f"{project_name}-mix-{default_branch}").rename(project_path)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_ethpm(uri: str) -> "TempProject":
"""
Generates a TempProject from an ethPM package.
"""
manifest = get_manifest(uri)
compiler_config = {
"evm_version": None,
"solc": {"version": None, "optimize": True, "runs": 200},
"vyper": {"version": None},
}
project = TempProject(manifest["package_name"], manifest["sources"], compiler_config)
if web3.isConnected():
for contract_name in project.keys():
for address in get_deployment_addresses(manifest, contract_name):
project[contract_name].at(address)
return project
def compile_source(
source: str,
solc_version: Optional[str] = None,
vyper_version: Optional[str] = None,
optimize: bool = True,
runs: Optional[int] = 200,
evm_version: Optional[str] = None,
) -> "TempProject":
"""
Compile the given source code string and return a TempProject container with
the ContractContainer instances.
"""
compiler_config: Dict = {"evm_version": evm_version, "solc": {}, "vyper": {}}
# if no compiler version was given, first try to find a Solidity pragma
if solc_version is None and vyper_version is None:
try:
solc_version = compiler.solidity.find_best_solc_version(
{"<stdin>": source}, install_needed=True, silent=False
)
except (PragmaError, SolcNotInstalled):
pass
if vyper_version is None:
# if no vyper compiler version is given, try to compile using solidity
compiler_config["solc"] = {
"version": solc_version or str(compiler.solidity.get_version().truncate()),
"optimize": optimize,
"runs": runs,
}
try:
return TempProject("TempSolcProject", {"<stdin>.sol": source}, compiler_config)
except Exception as exc:
# if compilation fails, raise when a solc version was given or we found a pragma
if solc_version is not None:
raise exc
if vyper_version is None:
# if no vyper compiler version was given, try to find a pragma
try:
vyper_version = compiler.vyper.find_best_vyper_version(
{"<stdin>": source}, install_needed=True, silent=False
)
except (PragmaError, VyperNotInstalled):
pass
compiler_config["vyper"] = {"version": vyper_version or compiler.vyper.get_version()}
try:
return TempProject("TempVyperProject", {"<stdin>.vy": source}, compiler_config)
except Exception as exc:
if solc_version is None and vyper_version is None:
raise PragmaError(
"No compiler version specified, no pragma statement in the source, "
"and compilation failed with both solc and vyper"
) from None
raise exc
def load(project_path: Union[Path, str, None] = None, name: Optional[str] = None) -> "Project":
"""Loads a project and instantiates various related objects.
Args:
project_path: Path of the project to load. If None, will attempt to
locate a project using check_for_project()
name: Name to assign to the project. If None, the name is generated
from the name of the project folder
Returns a Project object.
"""
# checks
if project_path is None:
project_path = check_for_project(".")
if project_path is not None and project_path != Path(".").absolute():
warnings.warn(
f"Loaded project has a root folder of '{project_path}' "
"which is different from the current working directory",
BrownieEnvironmentWarning,
)
else:
project_path = Path(project_path)
if project_path.resolve() != check_for_project(project_path):
packages_path = _get_data_folder().joinpath("packages")
if not project_path.is_absolute() and packages_path.joinpath(project_path).exists():
project_path = packages_path.joinpath(project_path)
else:
project_path = None
if project_path is None:
raise ProjectNotFound("Could not find Brownie project")
project_path = Path(project_path).resolve()
if name is None:
name = project_path.name
if not name.lower().endswith("project"):
name += " project"
if not name[0].isalpha():
raise BadProjectName("Project must start with an alphabetic character")
name = "".join(i for i in name.title() if i.isalnum())
if next((True for i in _loaded_projects if i._name == name), False):
raise ProjectAlreadyLoaded("There is already a project loaded with this name")
# paths
_create_folders(project_path)
_add_to_sys_path(project_path)
# load sources and build
return Project(name, project_path)
def _install_dependencies(path: Path) -> None:
for package_id in _load_project_dependencies(path):
try:
install_package(package_id)
except FileExistsError:
pass
def install_package(package_id: str) -> str:
"""
Install a package.
Arguments
---------
package_id : str
Package ID or ethPM URI.
Returns
-------
str
ID of the installed package.
"""
if urlparse(package_id).scheme in ("erc1319", "ethpm"):
return _install_from_ethpm(package_id)
else:
return _install_from_github(package_id)
def _install_from_ethpm(uri: str) -> str:
manifest = get_manifest(uri)
org = manifest["meta_brownie"]["registry_address"]
repo = manifest["package_name"]
version = manifest["version"]
install_path = _get_data_folder().joinpath(f"packages/{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is aleady installed")
try:
new(str(install_path), ignore_existing=True)
ethpm.install_package(install_path, uri)
project = load(install_path)
project.close()
except Exception as e:
shutil.rmtree(install_path)
raise e
return f"{org}/{repo}@{version}"
def _maybe_retrieve_github_auth() -> Dict[str, str]:
"""Returns appropriate github authorization headers.
Otherwise returns an empty dict if no auth token is present.
"""
token = os.getenv("GITHUB_TOKEN")
if token:
auth = b64encode(token.encode()).decode()
return {"Authorization": f"Basic {auth}"}
return {}
def _latest_tag(tags: List[str]) -> str:
if not tags:
raise ValueError("empty")
# Regexp taken from https://semver.org/ .
pattern = re.compile(r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)")
def f(x: str) -> int:
match = pattern.match(x.strip().lstrip("v"))
if match:
a = int(match.group(1)) * 1_000_000
b = int(match.group(2)) * 1_000
c = int(match.group(3)) * 1
return a + b + c
return 0
xs = sorted(tags, key=f)
return xs[-1]
def _install_from_github(package_id: str) -> str:
try:
path, version = package_id.split("@")
org, repo = path.split("/")
except ValueError:
raise ValueError(
"Invalid package ID. Must be given as [ORG]/[REPO]@[VERSION]"
"\ne.g. 'OpenZeppelin/[email protected]'"
) from None
headers = REQUEST_HEADERS.copy()
headers.update(_maybe_retrieve_github_auth())
if re.match(r"^[0-9a-f]+$", version):
download_url = f"https://api.github.com/repos/{org}/{repo}/zipball/{version}"
else:
# If version==latest, we also update the `version` variable to
# the latest tag available.
download_url, version = _get_download_url_from_tag(org, repo, version, headers)
# Once version is eventually updated, we form the install path.
base_install_path = _get_data_folder().joinpath("packages")
install_path = base_install_path.joinpath(f"{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is aleady installed")
existing = list(install_path.parent.iterdir())
_stream_download(download_url, str(install_path.parent), headers)
installed = next(i for i in install_path.parent.iterdir() if i not in existing)
shutil.move(installed, install_path)
try:
if not install_path.joinpath("brownie-config.yaml").exists():
brownie_config: Dict = {"project_structure": {}}
contract_paths = set(
i.relative_to(install_path).parts[0] for i in install_path.glob("**/*.sol")
)
contract_paths.update(
i.relative_to(install_path).parts[0] for i in install_path.glob("**/*.vy")
)
if not contract_paths:
raise InvalidPackage(f"{package_id} does not contain any .sol or .vy files")
if install_path.joinpath("contracts").is_dir():
brownie_config["project_structure"]["contracts"] = "contracts"
elif len(contract_paths) == 1:
brownie_config["project_structure"]["contracts"] = contract_paths.pop()
else:
raise InvalidPackage(
f"{package_id} has no `contracts/` subdirectory, and "
"multiple directories containing source files"
)
with install_path.joinpath("brownie-config.yaml").open("w") as fp:
yaml.dump(brownie_config, fp)
project = load(install_path)
project.close()
except InvalidPackage:
shutil.rmtree(install_path)
raise
except Exception as e:
notify(
"WARNING",
f"Unable to compile {package_id} due to a {type(e).__name__} - you may still be able to"
" import sources from the package, but will be unable to load the package directly.\n",
)
return f"{org}/{repo}@{version}"
def _get_download_url_from_tag(org: str, repo: str, version: str, headers: dict) -> Tuple[str, str]:
response = requests.get(
f"https://api.github.com/repos/{org}/{repo}/tags?per_page=100", headers=headers
)
if response.status_code != 200:
msg = "Status {} when getting package versions from Github: '{}'".format(
response.status_code, response.json()["message"]
)
if response.status_code in (403, 404):
msg += (
"\n\nMissing or forbidden.\n"
"If this issue persists, generate a Github API token and store"
" it as the environment variable `GITHUB_TOKEN`:\n"
"https://github.blog/2013-05-16-personal-api-tokens/"
)
raise ConnectionError(msg)
data = response.json()
if not data:
raise ValueError("Github repository has no tags set")
org, repo = data[0]["zipball_url"].split("/")[3:5]
tags = [i["name"].lstrip("v") for i in data]
# In case version is latest, set it to the latest tag.
if version == "latest":
version = _latest_tag(tags)
if version not in tags:
raise ValueError(
"Invalid version for this package. Available versions are:\n" + ", ".join(tags)
) from None
url = next(i["zipball_url"] for i in data if i["name"].lstrip("v") == version)
return url, version
def _create_gitfiles(project_path: Path) -> None:
gitignore = project_path.joinpath(".gitignore")
if not gitignore.exists():
with gitignore.open("w") as fp:
fp.write(GITIGNORE)
gitattributes = project_path.joinpath(".gitattributes")
if not gitattributes.exists():
with gitattributes.open("w") as fp:
fp.write(GITATTRIBUTES)
def _create_folders(project_path: Path) -> None:
structure = _load_project_structure_config(project_path)
for path in structure.values():
project_path.joinpath(path).mkdir(exist_ok=True)
build_path = project_path.joinpath(structure["build"])
for path in BUILD_FOLDERS:
build_path.joinpath(path).mkdir(exist_ok=True)
def _add_to_sys_path(project_path: Path) -> None:
project_path_string = str(project_path)
if project_path_string in sys.path:
return
sys.path.insert(0, project_path_string)
def _compare_settings(left: Dict, right: Dict) -> bool:
return next(
(True for k, v in left.items() if v and not isinstance(v, dict) and v != right.get(k)),
False,
)
def _load_sources(project_path: Path, subfolder: str, allow_json: bool) -> Dict:
contract_sources: Dict = {}
suffixes: Tuple = (".sol", ".vy")
if allow_json:
suffixes = suffixes + (".json",)
# one day this will be a beautiful plugin system
hooks: Optional[ModuleType] = None
if project_path.joinpath("brownie_hooks.py").exists():
hooks = importlib.import_module("brownie_hooks")
for path in project_path.glob(f"{subfolder}/**/*"):
if path.suffix not in suffixes:
continue
if next((i for i in path.relative_to(project_path).parts if i.startswith("_")), False):
continue
with path.open() as fp:
source = fp.read()
if hasattr(hooks, "brownie_load_source"):
source = hooks.brownie_load_source(path, source) # type: ignore
path_str: str = path.relative_to(project_path).as_posix()
contract_sources[path_str] = source
return contract_sources
def _stream_download(
download_url: str, target_path: str, headers: Dict[str, str] = REQUEST_HEADERS
) -> None:
response = requests.get(download_url, stream=True, headers=headers)
if response.status_code == 404:
raise ConnectionError(
f"404 error when attempting to download from {download_url} - "
"are you sure this is a valid mix? https://github.com/brownie-mix"
)
if response.status_code != 200:
raise ConnectionError(
f"Received status code {response.status_code} when attempting "
f"to download from {download_url}"
)
total_size = int(response.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
content = bytes()
for data in response.iter_content(1024, decode_unicode=True):
progress_bar.update(len(data))
content += data
progress_bar.close()
with zipfile.ZipFile(BytesIO(content)) as zf:
zf.extractall(target_path)
def _get_mix_default_branch(mix_name: str, headers: Dict[str, str] = REQUEST_HEADERS) -> str:
"""Get the default branch for a brownie-mix repository.
Arguments
---------
mix_name : str
Name of a brownie-mix repository without -mix appended.
Returns
-------
str
The default branch name on github.
"""
REPO_GH_API = f"https://api.github.com/repos/brownie-mix/{mix_name}-mix"
r = requests.get(REPO_GH_API, headers=headers)
if r.status_code != 200:
status, repo, message = r.status_code, f"brownie-mix/{mix_name}", r.json()["message"]
msg = f"Status {status} when retrieving repo {repo} information from GHAPI: '{message}'"
if r.status_code in (403, 404):
msg_lines = (
msg,
"\n\nMissing or forbidden.\n",
"If this issue persists, generate a Github API token and store",
" it as the environment variable `GITHUB_TOKEN`:\n",
"https://github.blog/2013-05-16-personal-api-tokens/",
)
msg = "".join(msg_lines)
raise ConnectionError(msg)
elif "default_branch" not in r.json():
msg = f"API results did not include {mix_name}'s default branch"
raise KeyError(msg)
return r.json()["default_branch"]
|
the-stack_106_14980
|
"""Utiliy functions for tensors."""
import gym.spaces
import numpy as np
import scipy.signal
import torch
def discount_cumsum(x, discount):
"""Discounted cumulative sum.
See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering # noqa: E501
Here, we have y[t] - discount*y[t+1] = x[t]
or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
Args:
x (np.ndarrary): Input.
discount (float): Discount factor.
Returns:
np.ndarrary: Discounted cumulative sum.
"""
if torch.is_tensor(x):
x=x.cpu().numpy()
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1],
axis=0)[::-1]
def explained_variance_1d(ypred, y, valids=None):
"""Explained variation for 1D inputs.
It is the proportion of the variance in one variable that is explained or
predicted from another variable.
Args:
ypred (np.ndarray): Sample data from the first variable.
Shape: :math:`(N, max_path_length)`.
y (np.ndarray): Sample data from the second variable.
Shape: :math:`(N, max_path_length)`.
valids (np.ndarray): Optional argument. Array indicating valid indices.
If None, it assumes the entire input array are valid.
Shape: :math:`(N, max_path_length)`.
Returns:
float: The explained variance.
"""
if valids is not None:
ypred = ypred[valids.astype(np.bool)]
y = y[valids.astype(np.bool)]
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
if np.isclose(vary, 0):
if np.var(ypred) > 0:
return 0
return 1
return 1 - np.var(y - ypred) / (vary + 1e-8)
def flatten_tensors(tensors):
"""Flatten a list of tensors.
Args:
tensors (list[numpy.ndarray]): List of tensors to be flattened.
Returns:
numpy.ndarray: Flattened tensors.
"""
if tensors:
return np.concatenate([np.reshape(x, [-1]) for x in tensors])
return np.asarray([])
def unflatten_tensors(flattened, tensor_shapes):
"""Unflatten a flattened tensors into a list of tensors.
Args:
flattened (numpy.ndarray): Flattened tensors.
tensor_shapes (tuple): Tensor shapes.
Returns:
list[numpy.ndarray]: Unflattened list of tensors.
"""
tensor_sizes = list(map(np.prod, tensor_shapes))
indices = np.cumsum(tensor_sizes)[:-1]
return [
np.reshape(pair[0], pair[1])
for pair in zip(np.split(flattened, indices), tensor_shapes)
]
def pad_tensor(x, max_len, mode='zero'):
"""Pad tensors.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
numpy.ndarray: Padded tensor.
"""
padding = np.zeros_like(x[0])
if mode == 'last':
padding = x[-1]
return np.concatenate(
[x, np.tile(padding, (max_len - len(x), ) + (1, ) * np.ndim(x[0]))])
def pad_tensor_n(xs, max_len):
"""Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
def pad_tensor_dict(tensor_dict, max_len, mode='zero'):
"""Pad dictionary of tensors.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
dict[numpy.ndarray]: Padded tensor.
"""
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len, mode=mode)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len, mode=mode)
return ret
def stack_tensor_dict_list(tensor_dict_list):
"""Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = stack_tensor_dict_list(dict_list)
else:
v = np.array(dict_list)
ret[k] = v
return ret
def stack_and_pad_tensor_dict_list(tensor_dict_list, max_len):
"""Stack and pad array of list of tensors.
Input paths are a list of N dicts, each with values of shape
:math:`(D, S^*)`. This function stack and pad the values with the input
key with max_len, so output will be shape :math:`(N, D, S^*)`.
Args:
tensor_dict_list (list[dict]): List of dict to be stacked and padded.
Value of each dict will be shape of :math:`(D, S^*)`.
max_len (int): Maximum length for padding.
Returns:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}. Shape: :math:`(N, D, S^*)`
where N is the len of input paths.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = stack_and_pad_tensor_dict_list(dict_list, max_len)
else:
v = pad_tensor_n(np.array(dict_list), max_len)
ret[k] = v
return ret
def concat_tensor_dict_list(tensor_dict_list):
"""Concatenate dictionary of list of tensor.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = concat_tensor_dict_list(dict_list)
else:
v = np.concatenate(dict_list, axis=0)
ret[k] = v
return ret
def split_tensor_dict_list(tensor_dict):
"""Split dictionary of list of tensor.
Args:
tensor_dict (dict[numpy.ndarray]): a dictionary of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
def truncate_tensor_dict(tensor_dict, truncated_len):
"""Truncate dictionary of list of tensor.
Args:
tensor_dict (dict[numpy.ndarray]): a dictionary of {tensors or
dictionary of tensors}.
truncated_len (int): Length to truncate.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
ret = dict()
for k, v in tensor_dict.items():
if isinstance(v, dict):
ret[k] = truncate_tensor_dict(v, truncated_len)
else:
ret[k] = v[:truncated_len]
return ret
def normalize_pixel_batch(env_spec, observations):
"""Normalize the observations (images).
If the input are images, it normalized into range [0, 1].
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Normalized observations.
"""
if isinstance(env_spec.observation_space, gym.spaces.Box):
if len(env_spec.observation_space.shape) == 3:
return [obs.astype(np.float32) / 255.0 for obs in observations]
return observations
def slice_nested_dict(dict_or_array, start, stop):
"""Slice a dictionary containing arrays (or dictionaries).
This function is primarily intended for un-batching env_infos and
action_infos.
Args:
dict_or_array (dict[str, dict or np.ndarray] or np.ndarray): A nested
dictionary should only contain dictionaries and numpy arrays
(recursively).
start (int): First index to be included in the slice.
stop (int): First index to be excluded from the slice. In other words,
these are typical python slice indices.
Returns:
dict or np.ndarray: The input, but sliced.
"""
if isinstance(dict_or_array, dict):
return {
k: slice_nested_dict(v, start, stop)
for (k, v) in dict_or_array.items()
}
else:
# It *should* be a numpy array (unless someone ignored the type
# signature).
return dict_or_array[start:stop]
|
the-stack_106_14981
|
import os
import sys
import magic
import re
from PIL import Image, ExifTags
import exiftool
import shutil
import datetime
from pathlib import Path
import platform
import calendar
import filecmp
class ExtractionException(Exception):
pass
current_year = datetime.datetime.now().year
years = list(range(1980, current_year))
duplicate_count = 0
def move_file(src, dst):
# Move without overwriting
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if os.path.dirname(src) == os.path.dirname(dst):
# condition to ensure that file already present in the correct destination directory
# does not get renamed as a duplicate in the next condition
print("File already exists in the correct directory")
return
if os.path.exists(dst):
if filecmp.cmp(src, dst, shallow=False):
print("deleting identical file..")
os.remove(src)
global duplicate_count
duplicate_count += 1
return
dst_basename = "duplicate_" + os.path.basename(dst)
dst_dirname = os.path.dirname(dst)
while os.path.exists(os.path.join(dst_dirname, dst_basename)):
dst_basename = "duplicate_" + dst_basename
dst = os.path.join(dst_dirname, dst_basename)
shutil.move(src, dst)
def creation_date(path_to_file):
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux.
# No easy way to get creation dates here, so we'll settle for when its content was last modified.
return stat.st_mtime
def is_video(fpath):
mime = magic.Magic(mime=True)
filename = mime.from_file(fpath)
if filename.find('video') != -1:
return 1
return 0
def get_exif_date_time(fpath):
try:
if is_video(fpath):
with exiftool.ExifTool() as et:
metadata = et.get_metadata(fpath)
dt = metadata['QuickTime:MediaCreateDate']
else:
img = Image.open(fpath)
exif = {ExifTags.TAGS[k]: v for k, v in img._getexif().items() if k in ExifTags.TAGS}
dt = exif['DateTimeOriginal']
return (datetime.datetime.strptime(dt, "%Y:%m:%d %H:%M:%S").strftime("%b"),
datetime.datetime.strptime(dt, "%Y:%m:%d %H:%M:%S").strftime("%Y"))
except Exception as e:
raise ExtractionException("Error in extracting date time from file: " + str(e))
def segregate_based_on_exif(fpath):
try:
(month, year) = get_exif_date_time(fpath)
new_path = os.path.join('.', year, month)
Path(new_path).mkdir(parents=True, exist_ok=True)
move_file(fpath, os.path.join(new_path, os.path.basename(fpath)))
except ExtractionException:
# pass/send exception to caller
raise ExtractionException
def segregate_based_on_file_name(fpath):
basename = str(os.path.basename(fpath))
dirname = str(os.path.dirname(fpath))
for year in years:
if str(year) in dirname:
# Already Segregated
return 1
if str(year) in basename:
date_regex = re.compile(str(year) + r'[-_]?(\d{1,2})')
# Year and month may be separated by hyphen or underscore
mo = date_regex.search(basename)
if not mo or int(mo.group(1)) > 12 or int(mo.group(1)) < 1:
exif_flag = 0
try:
exif_month, exif_year = get_exif_date_time(fpath)
except ExtractionException:
exif_month = None
exif_year = None
exif_flag = 1
if not exif_flag and int(exif_year) == year:
month = exif_month
print("Year and Month for {} have been guessed as {}-{}".format(fpath, year, month))
new_path = os.path.join('.', str(year), month)
Path(new_path).mkdir(parents=True, exist_ok=True)
move_file(fpath, os.path.join(new_path, basename))
return 1
print("Year for {} has been guessed as {}".format(fpath, year))
new_path = os.path.join('.', str(year))
Path(new_path).mkdir(parents=True, exist_ok=True)
move_file(fpath, os.path.join(new_path, basename))
return 1
else:
month = str(mo.group(1))
month = calendar.month_abbr[int(month)]
print("Year and Month for {} have been guessed as {}-{}".format(fpath, year, month))
new_path = os.path.join('.', str(year), month)
Path(new_path).mkdir(parents=True, exist_ok=True)
move_file(fpath, os.path.join(new_path, basename))
return 1
return 0
def segregate_based_on_creation_date(fpath):
try:
year = datetime.datetime.fromtimestamp(creation_date(fpath)).strftime('%Y')
month = datetime.datetime.fromtimestamp(creation_date(fpath)).strftime('%b')
except Exception:
raise ExtractionException
# return 0
print("Year and month for {} have been guessed as {},{}".format(fpath, year, month))
new_path = os.path.join('.', year, month)
Path(new_path).mkdir(parents=True, exist_ok=True)
move_file(fpath, os.path.join(new_path, os.path.basename(fpath)))
return 1
def main():
if len(sys.argv) < 2:
print("Usage: python3 segregator.py parent_dir")
sys.exit(-1)
os.chdir(sys.argv[1])
# Argument must specify parent directory containing the files to be segregated; this program preserves all directories
assumptions_file_name = "assumptions.txt"
assumptions_file_pointer = open(assumptions_file_name, 'w') # Stores names of files whose date were guessed
num_files = sum([len(files) for r, d, files in os.walk(".")]) - 1
i = 0
files_list = []
for (root, dirs, files) in os.walk('.', topdown=True):
for img_file_path in files:
files_list.append(os.path.abspath(os.path.join(root, img_file_path)))
for img_file_path in files_list:
if i:
print("{} out of {} files have been segregated!".format(i, num_files))
i += 1
if assumptions_file_name in img_file_path:
continue
flag = segregate_based_on_file_name(img_file_path)
if flag:
continue
try:
segregate_based_on_exif(img_file_path)
except ExtractionException:
print("Not able to get actual timestamp of {}, segregating based on creation date...".format(img_file_path))
assumptions_file_pointer.write(img_file_path + '\n')
try:
segregate_based_on_creation_date(img_file_path)
except ExtractionException:
print("Year for {} could not be guessed".format(img_file_path))
move_file(img_file_path, os.path.join(os.path.abspath(sys.argv[1]), os.path.basename(img_file_path)))
# print("{} out of {} files have been segregated!".format(i, num_files))
print("{} identical files deleted".format(duplicate_count))
assumptions_file_pointer.close()
if __name__ == '__main__':
# todo merge program and GUI
main()
|
the-stack_106_14982
|
import logging
from intent.utils.token import tag_tokenizer, tokenize_string
TAGLOG = logging.getLogger("TEST_TAGGERS")
#logging.basicConfig(level=logging.DEBUG)
from argparse import ArgumentParser
from tempfile import NamedTemporaryFile
import sys
from intent.eval.pos_eval import slashtags_eval
from intent.interfaces.stanford_tagger import train_postagger, StanfordPOSTagger, test_postagger
from intent.utils.argutils import existsfile
def remove_tags(source_path, target_path):
source_f = open(source_path, 'r', encoding='utf-8')
target_f = open(target_path, 'w', encoding='utf-8')
for line in source_f:
tokens = tokenize_string(line, tokenizer=tag_tokenizer)
target_f.write(tokens.text()+'\n')
source_f.close()
target_f.close()
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('--train', help='Training file.', type=existsfile)
p.add_argument('--tagger', help='Path to a pre-trained tagger.', type=existsfile)
p.add_argument('--test', help='File to evaluate against.', required=True, type=existsfile)
p.add_argument('--delimiter', help='Token to separate tags from words (default "/")', default='/')
p.add_argument('--output', help='Optionally, save the tagger output to this path.')
args = p.parse_args()
if not args.train or args.tagger:
sys.stderr.write("Either a training file or a pre-trained tagger is required.")
p.print_help()
sys.exit(11)
if args.train and args.tagger:
sys.stderr.write("WARNING: Both a training file and a tagger were specified. The tagger will take precedence.")
# =============================================================================
# First, train the tagger.
# =============================================================================
if args.train and not args.tagger:
print('Training tagger from "{}"'.format(args.train))
tagger_file = NamedTemporaryFile('w')
tagger = train_postagger(args.train, tagger_file.name)
print("Tagger training complete.")
tagger_path = tagger_file.name
else:
print('Loading tagger from "{}"'.format(args.tagger))
tagger_path = args.tagger
# =============================================================================
# Next, strip the tags from the test file into a temporary file.
# =============================================================================
raw_tmp = NamedTemporaryFile()
remove_tags(args.test, raw_tmp.name)
# =============================================================================
# Figure out if we want to save the output path
# =============================================================================
if args.output:
outpath = args.output
else:
output_file = NamedTemporaryFile('w', encoding='utf-8')
outpath = output_file.name
print('Running tagger on "{}"'.format(args.test))
test_postagger(raw_tmp.name, tagger_path, outpath)
slashtags_eval(args.test, outpath, args.delimiter, matrix=True)
|
the-stack_106_14984
|
"""
Name: Durgapal
References: Durgapal, J. Phys. A, v15, p2637-2644, (1982)
Coordinates: Spherical
Symmetry:
- Spherical
- Static
Notes: n = 4
"""
from sympy import Rational, sin, symbols, zeros
coords = symbols("t r theta phi", real=True)
variables = symbols("A C K", constant=True)
functions = ()
t, r, th, ph = coords
A, C, K = variables
metric = zeros(4)
metric[0, 0] = -(A ** 2) * (1 + C * r ** 2) ** 4
metric[1, 1] = (
7
* (1 + C * r ** 2) ** 2
* (1 + 5 * C * r ** 2) ** Rational(2, 5)
/ (
(7 - 10 * C * r ** 2 - C ** 2 * r ** 4) * (1 + 5 * C * r ** 2) ** Rational(2, 5)
+ 7 * K * C * r ** 2
)
)
metric[2, 2] = r ** 2
metric[3, 3] = r ** 2 * sin(th) ** 2
|
the-stack_106_14985
|
'''
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
For example,
S = "ADOBECODEBANC"
T = "ABC"
Minimum window is "BANC".
Note:
If there is no such window in S that covers all characters in T, return the empty string "".
If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.
'''
from collections import defaultdict
class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
MAX_INT = 2147483647
start = end = 0
char_need = defaultdict(int) # the count of char needed by current window, negative means current window has it but not needs it
count_need = len(t) # count of chars not in current window but in t
min_length = MAX_INT
min_start = 0
for i in t:
char_need[i] += 1 # current window needs all char in t
while end < len(s):
if char_need[s[end]] > 0:
count_need -= 1
char_need[s[end]] -= 1 # current window contains s[end] now, so does not need it any more
end += 1
while count_need == 0:
if min_length > end - start:
min_length = end - start
min_start = start
char_need[s[start]] += 1 # current window does not contain s[start] any more
if char_need[s[start]] > 0: # when some count in char_need is positive, it means there is char in t but not current window
count_need += 1
start += 1
return "" if min_length == MAX_INT else s[min_start:min_start + min_length]
if __name__ == "__main__":
assert Solution().minWindow("ADOBECODEBANC", "ABC") == "BANC"
|
the-stack_106_14988
|
from peewee import SQL, CharField, DateTimeField, ForeignKeyField, IntegerField
from ..db import db
from .base import Base
from .product_category import ProductCategory
from .product_gender import ProductGender
from .size_range import SizeRange
from .user import User
class Product(db.Model):
base = ForeignKeyField(
column_name="camp_id",
field="id",
model=Base,
null=True,
on_update="CASCADE",
constraints=[SQL("UNSIGNED")],
)
category = ForeignKeyField(
column_name="category_id",
field="id",
model=ProductCategory,
null=True,
constraints=[SQL("UNSIGNED")],
)
comments = CharField(null=True)
created_on = DateTimeField(column_name="created", null=True)
created_by = ForeignKeyField(
column_name="created_by",
field="id",
model=User,
null=True,
on_delete="SET NULL",
on_update="CASCADE",
constraints=[SQL("UNSIGNED")],
)
deleted = DateTimeField(null=True, default=None)
gender = ForeignKeyField(
column_name="gender_id",
field="id",
model=ProductGender,
on_update="CASCADE",
)
last_modified_on = DateTimeField(column_name="modified", null=True)
last_modified_by = ForeignKeyField(
column_name="modified_by",
field="id",
model=User,
null=True,
on_delete="SET NULL",
on_update="CASCADE",
constraints=[SQL("UNSIGNED")],
)
name = CharField()
size_range = ForeignKeyField(
column_name="sizegroup_id",
field="id",
model=SizeRange,
null=True,
on_update="CASCADE",
constraints=[SQL("UNSIGNED")],
)
in_shop = IntegerField(
column_name="stockincontainer", constraints=[SQL("DEFAULT 0")]
)
price = IntegerField(column_name="value", constraints=[SQL("DEFAULT 0")])
class Meta:
table_name = "products"
|
the-stack_106_14989
|
import time
import requests
from bs4 import BeautifulSoup
from checks import AgentCheck
from hashlib import md5
class MTACheck(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances=instances)
self.lines_running = 0
self.last_ping = time.time()
self.saved_line_statuses = {
'onetwothree_line': 'first check',
'fourfivesix_line': 'first check',
'seven_line': 'first check',
'ace_line': 'first check',
'bdfm_line': 'first check',
'g_line': 'first check',
'jz_line': 'first check',
'l_line': 'first check',
'nqr_line': 'first check',
'shuttle_line': 'first check',
'sir_line': 'first check'
}
def mta_site_check(self, instance, tags):
url = self.init_config.get('mta_url', "http://www.mta.info/")
timeout = float(self.init_config.get('timeout', 5))
start_time = time.time()
aggregation_key = md5(url).hexdigest()
try:
self.log.debug("Connecting to MTA site at '{0}'".format(url))
r = requests.get(url, timeout=timeout)
end_time = time.time()
except requests.exceptions.Timeout as e:
# If there's a timeout, send event plus failure service check
tags.append("status_code:{0}".format(r.status_code))
self.timeout_event(url, timeout, aggregation_key, tags)
self.service_check('mta_site.can_connect', 2)
return
tags.append("status_code:{0}".format(r.status_code))
if r.status_code is not 200:
self.status_code_event(url, r, aggregation_key, tags)
# service check status = warning
check_status = 1
else:
# service check status = success
check_status = 0
timing = end_time - start_time
self.gauge('mta_site.response_time', timing, tags=tags)
self.service_check('mta_site.can_connect', status=check_status)
def status_code_event(self, url, r, aggregation_key, tags):
self.event({
'timestamp': int(time.time()),
'event_type': 'mta_site_check',
'alert_type': 'warning',
'msg_title': 'MTA site: Invalid response code',
'msg_text': '%s returned a status of %s' % (url, r.status_code),
'aggregation_key': aggregation_key,
'tags': tags
})
def _find_line_name(self, title):
# onetwothree_status
idx = title.find('_line')
name = title[:idx]
if 'one' in name:
name = '1/2/3'
elif 'four' in name:
name = '4/5/6'
elif name == 'seven':
name = '7'
elif name == 'sir':
name = name.upper()
elif name == 'ace' or name == 'bdfm' or name == 'nqr' or name == 'jz':
name = name.upper().replace("", "/")[1: -1]
else:
name = name.capitalize()
return name
# for service check
def _status_convertor_sc(self, status):
status = status.lower()
if 'good' in status:
self.lines_running += 1
return 0
else:
return 2
# to get metric value
def _status_convertor_metric(self, status):
status = status.lower()
if 'good' in status:
return 1
else:
return 0
def _status_to_tag(self, status):
tag = "status:"
status = status.lower().replace(" ", "_")
tag = tag + status
return tag
def _get_status_link(self, line):
line = line.replace('/', "").lower()
if line.lower() == 'shuttle':
line = 's'
return 'http://www.mta.info/status/subway/%s' % (line)
def timeout_event(self, url, timeout, aggregation_key, tags):
self.event({
'timestamp': int(time.time()),
'event_type': 'mta_site_check',
'alert_type': 'error',
'msg_title': 'MTA site: timeout',
'msg_text': '%s timed out after %s seconds.' % (url, timeout),
'aggregation_key': aggregation_key,
'tags': tags
})
def check(self, instance):
self.lines_running = 0
tags = self.init_config.get('tags', [])
self.web_scraper()
self.gauge('mta.lines_running', self.lines_running)
self.mta_site_check(instance, tags)
self.last_ping = time.time()
def web_scraper(self):
now = time.time()
mta_status_page = self.init_config.get('mta_status_page', 'http://web.mta.info/status/serviceStatus.txt')
self.log.debug("Connecting to MTA status at '{0}'".format(mta_status_page))
page = requests.get(mta_status_page)
mta_status_page = self.init_config.get('mta_status_page', 'http://web.mta.info/status/serviceStatus.txt')
page = requests.get(mta_status_page)
html_page = BeautifulSoup(page.text, 'html.parser')
# send metric about how many seconds since last update
last_updated = html_page.find('timestamp').text
time_since_updated = time.mktime(time.strptime(last_updated, "%m/%d/%Y %I:%M:%S %p"))
# account for timezone differences when making that timestamp, since it will do it according to the local system's clock. whoops. this assumes UTC - 4 aka 14400 seconds
timezone_offset = time.timezone * 3600
time_since_updated += timezone_offset
# very rough calculation to accomodate for daylight savings time
is_dst = not(time.localtime().tm_mon <= 3 and time.localtime().tm_mday <= 10) and not(time.localtime().tm_mon >= 11 and time.localtime().tm_mday >= 1)
if is_dst:
time_since_updated += 14400
else:
time_since_updated += 18000
seconds_since_updated = (now - time_since_updated)
self.gauge('mta.time_since_status_update', seconds_since_updated)
lines = html_page.find_all('line')
# create dict with value being the position that the line is in the lines index, gets around how python dict are itereated over randomly in a loop
new_line_statuses = {
'onetwothree_line': 0,
'fourfivesix_line': 1,
'seven_line': 2,
'ace_line': 3,
'bdfm_line': 4,
'g_line': 5,
'jz_line': 6,
'l_line': 7,
'nqr_line': 8,
'shuttle_line': 9,
'sir_line': 10
}
# update new_line_statuses with new statuses from the update
for line, status in new_line_statuses.items():
idx = status
new_line_statuses[line] = lines[idx].contents[3].text
for line, saved_status in self.saved_line_statuses.iteritems():
# new_status = locals()[line] # uses function's local variables
new_status = new_line_statuses[line]
line_name = self._find_line_name(line)
event_tags=["status:{0}".format(new_status.replace(" ", "_"))]
# check if the saved status of the lines is the same as the most recent one
if saved_status.lower() != new_status.lower():
self.log.debug('Updating status for {0} to {1}'.format(line, new_status))
event_tags.append("line:{0}".format(line_name))
if "good" not in new_status.lower():
alert = "warning"
else:
alert="success"
self.event({
'timestamp': int(time.time()),
'event_type': 'mta_status_update',
'alert_type': alert,
'tags': event_tags,
'msg_title': '[MTA] {0} service update: {1}'.format(line_name, new_status.lower()),
'msg_text': '''
The MTA has updated the service status for %s from '%s' to '%s'
Check the status page for more information: %s
''' % (line_name, saved_status.lower(), new_status.lower(), self._get_status_link(line_name))
})
self.saved_line_statuses[line] = new_status
else:
self.log.debug('No update on {}'.format(line))
# submit service checks for all lines
line_tag = "line:{0}".format(line_name)
self.service_check('mta.line_up', status=self._status_convertor_sc(new_status), tags=[line_tag])
# use _status_convertor to find if good service(1) then send a 1 or 0 for a metric to determine uptime later on
self.gauge('mta.line_service', value=self._status_convertor_metric(new_status), tags=[line_tag, self._status_to_tag(new_status)])
|
the-stack_106_14990
|
import os
import sys
import time
import pytest
import logging
from abc import ABCMeta
from ccmlib.common import get_version_from_build, is_win
from dtest import Tester, create_ks
logger = logging.getLogger(__name__)
def switch_jdks(major_version_int):
"""
Changes the jdk version globally, by setting JAVA_HOME = JAVA[N]_HOME.
This means the environment must have JAVA[N]_HOME set to switch to jdk version N.
"""
new_java_home = 'JAVA{}_HOME'.format(major_version_int)
try:
os.environ[new_java_home]
except KeyError:
raise RuntimeError("You need to set {} to run these tests!".format(new_java_home))
# don't change if the same version was requested
current_java_home = os.environ.get('JAVA_HOME')
if current_java_home != os.environ[new_java_home]:
logger.debug("Switching jdk to version {} (JAVA_HOME is changing from {} to {})".format(major_version_int, current_java_home or 'undefined', os.environ[new_java_home]))
os.environ['JAVA_HOME'] = os.environ[new_java_home]
@pytest.mark.upgrade_test
@pytest.mark.skipif(sys.platform == 'win32', reason='Skip upgrade tests on Windows')
class UpgradeTester(Tester, metaclass=ABCMeta):
"""
When run in 'normal' upgrade mode without specifying any version to run,
this will test different upgrade paths depending on what version of C* you
are testing. When run on 2.1 or 2.2, this will test the upgrade to 3.0.
When run on 3.0, this will test the upgrade path to trunk. When run on
versions above 3.0, this will test the upgrade path from 3.0 to HEAD.
"""
NODES, RF, __test__, CL, UPGRADE_PATH = 2, 1, False, None, None
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
# known non-critical bug during teardown:
# https://issues.apache.org/jira/browse/CASSANDRA-12340
if fixture_dtest_setup.dtest_config.cassandra_version_from_build < '2.2':
_known_teardown_race_error = (
'ScheduledThreadPoolExecutor$ScheduledFutureTask@[0-9a-f]+ '
'rejected from org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor'
)
fixture_dtest_setup.ignore_log_patterns = fixture_dtest_setup.ignore_log_patterns \
+ [_known_teardown_race_error]
fixture_dtest_setup.ignore_log_patterns = fixture_dtest_setup.ignore_log_patterns + [
r'RejectedExecutionException.*ThreadPoolExecutor has shut down', # see CASSANDRA-12364
]
@pytest.fixture(autouse=True)
def around_test(self):
self.validate_class_config()
logger.info("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
.format(self.UPGRADE_PATH.starting_version, self.UPGRADE_PATH.starting_meta.java_version))
previous_java_home = os.environ['JAVA_HOME']
previous_cassandra_version = os.environ['CASSANDRA_VERSION'] if 'CASSANDRA_VERSION' in os.environ else None
switch_jdks(self.UPGRADE_PATH.starting_meta.java_version)
os.environ['CASSANDRA_VERSION'] = self.UPGRADE_PATH.starting_version
yield
os.environ['JAVA_HOME'] = previous_java_home
if previous_cassandra_version:
os.environ['CASSANDRA_VERSION'] = previous_cassandra_version
# Ignore errors before upgrade on Windows
# We ignore errors from 2.1, because windows 2.1
# support is only beta. There are frequent log errors,
# related to filesystem interactions that are a direct result
# of the lack of full functionality on 2.1 Windows, and we dont
# want these to pollute our results.
if is_win() and self.cluster.version() <= '2.2':
self.cluster.nodelist()[1].mark_log_for_errors()
def prepare(self, ordered=False, create_keyspace=True, use_cache=False, use_thrift=False,
nodes=None, rf=None, protocol_version=None, cl=None, extra_config_options=None, **kwargs):
nodes = self.NODES if nodes is None else nodes
rf = self.RF if rf is None else rf
cl = self.CL if cl is None else cl
self.CL = cl # store for later use in do_upgrade
assert nodes, 2 >= "backwards compatibility tests require at least two nodes"
self.protocol_version = protocol_version
cluster = self.cluster
cluster.set_install_dir(version=self.UPGRADE_PATH.starting_version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
if ordered:
cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
if use_cache:
cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
if use_thrift:
cluster.set_configuration_options(values={'start_rpc': 'true'})
start_rpc = kwargs.pop('start_rpc', False)
if start_rpc:
cluster.set_configuration_options(values={'start_rpc': True})
cluster.set_configuration_options(values={'internode_compression': 'none'})
if extra_config_options:
cluster.set_configuration_options(values=extra_config_options)
cluster.populate(nodes)
cluster.start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
if cl:
session = self.patient_cql_connection(node1, protocol_version=protocol_version, consistency_level=cl, **kwargs)
else:
session = self.patient_cql_connection(node1, protocol_version=protocol_version, **kwargs)
if create_keyspace:
create_ks(session, 'ks', rf)
return session
def do_upgrade(self, session, use_thrift=False, return_nodes=False, **kwargs):
"""
Upgrades the first node in the cluster and returns a list of
(is_upgraded, Session) tuples. If `is_upgraded` is true, the
Session is connected to the upgraded node. If `return_nodes`
is True, a tuple of (is_upgraded, Session, Node) will be
returned instead.
"""
session.cluster.shutdown()
node1 = self.cluster.nodelist()[0]
node2 = self.cluster.nodelist()[1]
# stop the nodes, this can fail due to https://issues.apache.org/jira/browse/CASSANDRA-8220 on MacOS
# for the tests that run against 2.0. You will need to run those in Linux.
node1.drain()
node1.stop(gently=True)
# Ignore errors before upgrade on Windows
# We ignore errors from 2.1, because windows 2.1
# support is only beta. There are frequent log errors,
# related to filesystem interactions that are a direct result
# of the lack of full functionality on 2.1 Windows, and we dont
# want these to pollute our results.
if is_win() and self.cluster.version() <= '2.2':
node1.mark_log_for_errors()
logger.debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))
switch_jdks(self.UPGRADE_PATH.upgrade_meta.java_version)
node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)
# this is a bandaid; after refactoring, upgrades should account for protocol version
new_version_from_build = get_version_from_build(node1.get_install_dir())
# Check if a since annotation with a max_version was set on this test.
# The since decorator can only check the starting version of the upgrade,
# so here we check to new version of the upgrade as well.
if hasattr(self, 'max_version') and self.max_version is not None and new_version_from_build >= self.max_version:
pytest.skip("Skipping test, new version {} is equal to or higher than "
"max version {}".format(new_version_from_build, self.max_version))
if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
pytest.skip('Protocol version {} incompatible '
'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
node1.set_log_level(logging.getLevelName(logging.root.level))
node1.set_configuration_options(values={'internode_compression': 'none'})
if use_thrift and node1.get_cassandra_version() < '4':
node1.set_configuration_options(values={'start_rpc': 'true'})
node1.start(wait_for_binary_proto=True)
sessions_and_meta = []
if self.CL:
session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version, consistency_level=self.CL, **kwargs)
else:
session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version, **kwargs)
session.set_keyspace('ks')
if return_nodes:
sessions_and_meta.append((True, session, node1))
else:
sessions_and_meta.append((True, session))
# open a second session with the node on the old version
if self.CL:
session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version, consistency_level=self.CL, **kwargs)
else:
session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version, **kwargs)
session.set_keyspace('ks')
if return_nodes:
sessions_and_meta.append((False, session, node2))
else:
sessions_and_meta.append((False, session))
# Let the nodes settle briefly before yielding connections in turn (on the upgraded and non-upgraded alike)
# CASSANDRA-11396 was the impetus for this change, wherein some apparent perf noise was preventing
# CL.ALL from being reached. The newly upgraded node needs to settle because it has just barely started, and each
# non-upgraded node needs a chance to settle as well, because the entire cluster (or isolated nodes) may have been doing resource intensive activities
# immediately before.
for s in sessions_and_meta:
time.sleep(5)
yield s
def get_version(self):
node1 = self.cluster.nodelist()[0]
return node1.get_cassandra_version()
def get_node_versions(self):
return [n.get_cassandra_version() for n in self.cluster.nodelist()]
def node_version_above(self, version):
return min(self.get_node_versions()) >= version
def get_node_version(self, is_upgraded):
"""
Used in places where is_upgraded was used to determine if the node version was >=2.2.
"""
node_versions = self.get_node_versions()
assert len({v.vstring for v in node_versions}) <= 2
return max(node_versions) if is_upgraded else min(node_versions)
def validate_class_config(self):
# check that an upgrade path is specified
subclasses = self.__class__.__subclasses__()
no_upgrade_path_error = (
'No upgrade path specified. {klaus} may not be configured to run as a test.'.format(klaus=self.__class__) +
(' Did you mean to run one of its subclasses, such as {sub}?'.format(sub=subclasses[0])
if subclasses else
'')
)
assert self.UPGRADE_PATH is not None, no_upgrade_path_error
def upgrade_version_family(self):
"""
Returns a hopefully useful version string that can be compared
to tune test behavior. For trunk this returns trunk, for an earlier
version like github:apache/cassandra-3.11 it returns a version number
as a string
"""
return self.UPGRADE_PATH.upgrade_meta.family
def upgrade_is_version_4_or_greater(self):
upgrade_version = self.upgrade_version_family()
return upgrade_version == 'trunk' or upgrade_version >= '4.0'
|
the-stack_106_14991
|
from unittest.mock import patch
def mocked_execute(remote_executor, command, *args, **kwargs):
from .test_assets import TestAsset
return TestAsset.REMOTE_HOST_MOCKS[remote_executor.hostname].execute(
command
)
class PatchRemoteHostMeta(type):
"""
can be used as a metaclass for a TestCase to patch relevant methods, required to mock a RemoteHost
"""
MOCKED_EXECUTE = mocked_execute
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
patch('remote_execution.remote_execution.SshRemoteExecutor.connect', lambda self: None)(self)
patch('remote_execution.remote_execution.SshRemoteExecutor.close', lambda self: None)(self)
patch('remote_execution.remote_execution.SshRemoteExecutor.is_connected', lambda self: True)(self)
patch(
'remote_execution.remote_execution.SshRemoteExecutor._execute',
PatchRemoteHostMeta.MOCKED_EXECUTE
)(self)
class PatchTrackedRemoteExecutionMeta(PatchRemoteHostMeta):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.executed_commands = set()
def tracked_mocked_execute(remote_host, command, *args, **kwargs):
self.executed_commands.add(command)
return PatchRemoteHostMeta.MOCKED_EXECUTE(remote_host, command)
patch(
'remote_execution.remote_execution.SshRemoteExecutor._execute',
tracked_mocked_execute
)(self)
|
the-stack_106_14992
|
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of cinder."""
import copy
import uuid
from cinder.openstack.common import local
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, project_name=None, remote_address=None,
timestamp=None, request_id=None, auth_token=None,
overwrite=True, quota_class=None, service_catalog=None,
domain=None, user_domain=None, project_domain=None,
**kwargs):
"""Initialize RequestContext.
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
self.user_id = user_id
self.project_id = project_id
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
self.roles = roles or []
self.project_name = project_name
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self.roles)
elif self.is_admin and 'admin' not in self.roles:
self.roles.append('admin')
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, basestring):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
self.quota_class = quota_class
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('compute',)]
else:
# if list is empty or none
self.service_catalog = []
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {'user_id': self.user_id,
'project_id': self.project_id,
'project_name': self.project_name,
'domain': self.domain,
'user_domain': self.user_domain,
'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'service_catalog': self.service_catalog,
'tenant': self.tenant,
'user': self.user,
'user_identity': user_idt}
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def deepcopy(self):
return copy.deepcopy(self)
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Cinder version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
|
the-stack_106_14993
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import typing as tp
import numpy as np
from nevergrad.common.typetools import ArrayLike
from . import discretization
from . import utils
from . import core
from .container import Tuple
from .data import Array
# weird pylint issue on "Descriptors"
# pylint: disable=no-value-for-parameter
C = tp.TypeVar("C", bound="Choice")
T = tp.TypeVar("T", bound="TransitionChoice")
class BaseChoice(core.Dict):
def __init__(
self,
*,
choices: tp.Iterable[tp.Any],
repetitions: tp.Optional[int] = None,
**kwargs: tp.Any
) -> None:
assert repetitions is None or isinstance(repetitions, int) # avoid silent issues
self._repetitions = repetitions
assert not isinstance(choices, Tuple)
lchoices = list(choices) # for iterables
if not lchoices:
raise ValueError("{self._class__.__name__} received an empty list of options.")
super().__init__(choices=Tuple(*lchoices), **kwargs)
def _compute_descriptors(self) -> utils.Descriptors:
deterministic = getattr(self, "_deterministic", True)
ordered = not hasattr(self, "_deterministic")
internal = utils.Descriptors(deterministic=deterministic, continuous=not deterministic, ordered=ordered)
return self.choices.descriptors & internal
def __len__(self) -> int:
"""Number of choices
"""
return len(self.choices)
@property
def index(self) -> int: # delayed choice
"""Index of the chosen option
"""
assert self.indices.size == 1
return int(self.indices[0])
@property
def indices(self) -> np.ndarray:
"""Indices of the chosen options
"""
raise NotImplementedError # TODO remove index?
@property
def choices(self) -> Tuple:
"""The different options, as a Tuple Parameter
"""
return self["choices"] # type: ignore
@property
def value(self) -> tp.Any:
return self._get_value()
@value.setter
def value(self, value: tp.Any) -> None:
self._find_and_set_value(value)
def _get_value(self) -> tp.Any:
if self._repetitions is None:
return core.as_parameter(self.choices[self.index]).value
return tuple(core.as_parameter(self.choices[ind]).value for ind in self.indices)
def _find_and_set_value(self, values: tp.List[tp.Any]) -> np.ndarray:
"""Must be adapted to each class
This handles a list of values, not just one
""" # TODO this is currenlty very messy, may need some improvement
values = [values] if self._repetitions is None else values
self._check_frozen()
indices: np.ndarray = -1 * np.ones(len(values), dtype=int)
nums = sorted(int(k) for k in self.choices._content)
# try to find where to put this
for i, value in enumerate(values):
for k in nums:
choice = self.choices[k]
try:
choice.value = value
indices[i] = k
break
except Exception: # pylint: disable=broad-except
pass
if indices[i] == -1:
raise ValueError(f"Could not figure out where to put value {value}")
return indices
def get_value_hash(self) -> tp.Hashable:
hashes: tp.List[tp.Hashable] = []
for ind in self.indices:
c = self.choices[int(ind)]
const = isinstance(c, core.Constant) or not isinstance(c, core.Parameter)
hashes.append(int(ind) if const else (int(ind), c.get_value_hash()))
return tuple(hashes) if len(hashes) > 1 else hashes[0]
class Choice(BaseChoice):
"""Unordered categorical parameter, randomly choosing one of the provided choice options as a value.
The choices can be Parameters, in which case there value will be returned instead.
The chosen parameter is drawn randomly from the softmax of weights which are
updated during the optimization.
Parameters
----------
choices: list
a list of possible values or Parameters for the variable.
repetitions: None or int
set to an integer :code:`n` if you want :code:`n` similar choices sampled independently (each with its own distribution)
This is equivalent to :code:`Tuple(*[Choice(options) for _ in range(n)])` but can be
30x faster for large :code:`n`.
deterministic: bool
whether to always draw the most likely choice (hence avoiding the stochastic behavior, but loosing
continuity)
Note
----
- Since the chosen value is drawn randomly, the use of this variable makes deterministic
functions become stochastic, hence "adding noise"
- the "mutate" method only mutates the weights and the chosen Parameter (if it is not constant),
leaving others untouched
Examples
--------
>>> print(Choice(["a", "b", "c", "e"]).value)
"c"
>>> print(Choice(["a", "b", "c", "e"], repetitions=3).value)
("b", "b", "c")
"""
def __init__(
self,
choices: tp.Iterable[tp.Any],
repetitions: tp.Optional[int] = None,
deterministic: bool = False,
) -> None:
assert not isinstance(choices, Tuple)
lchoices = list(choices)
rep = 1 if repetitions is None else repetitions
super().__init__(choices=lchoices, repetitions=repetitions,
weights=Array(shape=(rep, len(lchoices)), mutable_sigma=False))
self._deterministic = deterministic
self._indices: tp.Optional[np.ndarray] = None
def _get_name(self) -> str:
name = super()._get_name()
cls = self.__class__.__name__
assert name.startswith(cls)
if self._deterministic:
name = cls + "{det}" + name[len(cls):]
return name
@property
def indices(self) -> np.ndarray: # delayed choice
"""Index of the chosen option
"""
if self._indices is None:
self._draw(deterministic=self._deterministic)
assert self._indices is not None
return self._indices
@property
def weights(self) -> Array:
"""The weights used to draw the value
"""
return self["weights"] # type: ignore
@property
def probabilities(self) -> np.ndarray:
"""The probabilities used to draw the value
"""
exp = np.exp(self.weights.value)
return exp / np.sum(exp)
def _find_and_set_value(self, values: tp.Any) -> np.ndarray:
indices = super()._find_and_set_value(values)
self._indices = indices
# force new probabilities
arity = self.weights.value.shape[1]
coeff = discretization.weight_for_reset(arity)
self.weights._value.fill(0.0) # reset since there is no reference
out = np.array(self.weights._value, copy=True) # just a zero matrix
out[np.arange(indices.size), indices] = coeff
self.weights.set_standardized_data(out.ravel(), deterministic=True)
return indices
def _draw(self, deterministic: bool = True) -> None:
encoder = discretization.Encoder(self.weights.value, rng=self.random_state)
self._indices = encoder.encode(deterministic=deterministic or self._deterministic)
def _internal_set_standardized_data(self: C, data: np.ndarray, reference: C, deterministic: bool = False) -> None:
super()._internal_set_standardized_data(data, reference=reference, deterministic=deterministic)
self._draw(deterministic=deterministic)
def mutate(self) -> None:
# force random_state sync
self.random_state # pylint: disable=pointless-statement
self.weights.mutate()
self._draw(deterministic=self._deterministic)
indices = set(self.indices)
for ind in indices:
self.choices[ind].mutate()
def _internal_spawn_child(self: C) -> C:
choices = (y for x, y in sorted(self.choices.spawn_child()._content.items()))
child = self.__class__(choices=choices, deterministic=self._deterministic, repetitions=self._repetitions)
child._content["weights"] = self.weights.spawn_child()
return child
class TransitionChoice(BaseChoice):
"""Ordered categorical parameter, choosing one of the provided choice options as a value, with continuous transitions.
The choices can be Parameters, in which case there value will be returned instead.
The chosen parameter is drawn using transitions between current choice and the next/previous ones.
Parameters
----------
choices: list
a list of possible values or Parameters for the variable.
transitions: np.ndarray or Array
the transition weights. During transition, the direction (forward or backward will be drawn with
equal probabilities), then the transitions weights are normalized through softmax, the 1st value gives
the probability to remain in the same state, the second to move one step (backward or forward) and so on.
Note
----
- the "mutate" method only mutates the weights and the chosen Parameter (if it is not constant),
leaving others untouched
- in order to support export to standardized space, the index is encoded as a scalar. A normal distribution N(O,1)
on this scalar yields a uniform choice of index. This may come to evolve for simplicity's sake.
- currently, transitions are computed through softmax, this may evolve since this is somehow impractical
"""
def __init__(
self,
choices: tp.Iterable[tp.Any],
transitions: tp.Union[ArrayLike, Array] = (1.0, 1.0),
repetitions: tp.Optional[int] = None,
) -> None:
choices = list(choices)
positions = Array(init=len(choices) / 2.0 * np.ones((repetitions if repetitions is not None else 1,)))
positions.set_bounds(0, len(choices), method="gaussian")
super().__init__(choices=choices,
repetitions=repetitions,
positions=positions,
transitions=transitions if isinstance(transitions, Array) else np.array(transitions, copy=False))
assert self.transitions.value.ndim == 1
@property
def indices(self) -> np.ndarray:
return np.minimum(len(self) - 1e-9, self.positions.value).astype(int)
def _find_and_set_value(self, values: tp.Any) -> np.ndarray:
indices = super()._find_and_set_value(values) # only one value for this class
self._set_index(indices)
return indices
def _set_index(self, indices: np.ndarray) -> None:
self.positions.value = indices + 0.5
@property
def transitions(self) -> Array:
"""The weights used to draw the step to the next value
"""
return self["transitions"] # type: ignore
@property
def position(self) -> Array:
"""The continuous version of the index (used when working with standardized space)
"""
warnings.warn("position is replaced by positions in order to allow for repetitions", DeprecationWarning)
return self.positions
@property
def positions(self) -> Array:
"""The continuous version of the index (used when working with standardized space)
"""
return self["positions"] # type: ignore
def mutate(self) -> None:
# force random_state sync
self.random_state # pylint: disable=pointless-statement
transitions = core.as_parameter(self.transitions)
transitions.mutate()
rep = 1 if self._repetitions is None else self._repetitions
#
enc = discretization.Encoder(np.ones((rep, 1)) * np.log(self.transitions.value),
self.random_state)
moves = enc.encode()
signs = self.random_state.choice([-1, 1], size=rep)
new_index = np.clip(self.indices + signs * moves, 0, len(self) - 1)
self._set_index(new_index.ravel())
# mutate corresponding parameter
indices = set(self.indices)
for ind in indices:
self.choices[ind].mutate()
def _internal_spawn_child(self: T) -> T:
choices = (y for x, y in sorted(self.choices.spawn_child()._content.items()))
child = self.__class__(choices=choices, repetitions=self._repetitions)
child._content["positions"] = self.positions.spawn_child()
child._content["transitions"] = self.transitions.spawn_child()
return child
|
the-stack_106_14994
|
# Ep03 - PYTHON PARA COMUNICAR COM SENSORES
import serial
from serial.tools import list_ports
#lista as portas do arduino
for port in list_ports.comports():
print("Dispositivo: {} - porta: {}".format(port.description, port.device))
conexao = serial.Serial('COM3', 115200) # porta no linux: '/dev/ttyUSB0'
acao = input("<L> para Ligar \n<D> para Desligar \nEscolha: ").upper()
while acao == "L" or acao == "D":
if acao == "L":
conexao.write(b'1')
else:
conexao.write(b'0')
acao = input("<L> para Ligar \n<D> para Desligar \nEscolha: ").upper()
conexao.close()
print("Conexão encerrada.")
|
the-stack_106_14996
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_evpn_vni
version_added: "2.2"
short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
description:
- Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network
Identifier (VNI) configurations of a Nexus device.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- default, where supported, restores params default value.
- RD override is not permitted. You should set it to the default values
first and then reconfigure it.
- C(route_target_both), C(route_target_import) and
C(route_target_export valid) values are a list of extended communities,
(i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'.
- The C(route_target_both) property is discouraged due to the inconsistent
behavior of the property across Nexus platforms and image versions.
For this reason it is recommended to use explicit C(route_target_export)
and C(route_target_import) properties instead of C(route_target_both).
- RD valid values are a string in one of the route-distinguisher formats,
the keyword 'auto', or the keyword 'default'.
options:
vni:
description:
- The EVPN VXLAN Network Identifier.
required: true
default: null
route_distinguisher:
description:
- The VPN Route Distinguisher (RD). The RD is combined with
the IPv4 or IPv6 prefix learned by the PE router to create a
globally unique address.
required: true
default: null
route_target_both:
description:
- Enables/Disables route-target settings for both import and
export target communities using a single property.
required: false
default: null
route_target_import:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
route_target_export:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_evpn_vni:
vni: 6000
route_distinguisher: "60:10"
route_target_import:
- "5000:10"
- "4100:100"
route_target_export: auto
route_target_both: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"route_target_import": ["5000:10", "4100:100",
"5001:10"],"vni": "6000"}
existing:
description: k/v pairs of existing EVPN VNI configuration
returned: verbose mode
type: dict
sample: {"route_distinguisher": "70:10", "route_target_both": [],
"route_target_export": [], "route_target_import": [
"4100:100", "5000:10"], "vni": "6000"}
end_state:
description: k/v pairs of EVPN VNI configuration after module execution
returned: verbose mode
type: dict
sample: {"route_distinguisher": "70:10", "route_target_both": [],
"route_target_export": [], "route_target_import": [
"4100:100", "5000:10", "5001:10"], "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
PARAM_TO_COMMAND_KEYMAP = {
'vni': 'vni',
'route_target_both': 'route-target both',
'route_target_import': 'route-target import',
'route_target_export': 'route-target export',
'route_distinguisher': 'rd'
}
WARNINGS = []
import time
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_route_target_value(arg, config, module):
splitted_config = config.splitlines()
value_list = []
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
for line in splitted_config:
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line.strip():
value = REGEX.search(line).group('value')
value_list.append(value)
return value_list
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg != 'vni':
if arg == 'route_distinguisher':
existing[arg] = get_value(arg, config, module)
else:
existing[arg] = get_route_target_value(arg, config, module)
existing_fix = dict((k, v) for k, v in existing.items() if v)
if existing_fix:
existing['vni'] = module.params['vni']
else:
existing = existing_fix
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed):
commands = list()
parents = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key.startswith('route-target'):
if value == ['default']:
existing_value = existing_commands.get(key)
if existing_value:
for target in existing_value:
commands.append('no {0} {1}'.format(key, target))
else:
if not isinstance(value, list):
value = [value]
for target in value:
if existing:
if target not in existing.get(key.replace('-', '_').replace(' ', '_')):
commands.append('{0} {1}'.format(key, target))
else:
commands.append('{0} {1}'.format(key, target))
else:
if value == 'default':
existing_value = existing_commands.get(key)
if existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
return commands, parents
def state_absent(module, existing, proposed):
commands = ['no vni {0} l2'.format(module.params['vni'])]
parents = ['evpn']
return commands, parents
def execute_config(module, candidate):
result = {}
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
return result
def main():
argument_spec = dict(
vni=dict(required=True, type='str'),
route_distinguisher=dict(required=False, type='str'),
route_target_both=dict(required=False, type='list'),
route_target_import=dict(required=False, type='list'),
route_target_export=dict(required=False, type='list'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
args = [
'vni',
'route_distinguisher',
'route_target_both',
'route_target_import',
'route_target_export'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'vni':
if value == 'true':
value = True
elif value == 'false':
value = False
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
commands, parents = invoke('state_%s' % state, module, existing,
proposed)
if commands:
if (existing.get('route_distinguisher') and
proposed.get('route_distinguisher')):
if (existing['route_distinguisher'] != proposed[
'route_distinguisher'] and
proposed['route_distinguisher'] != 'default'):
WARNINGS.append('EVPN RD {0} was automatically removed. '
'It is highly recommended to use a task '
'(with default as value) to explicitly '
'unconfigure it.'.format(
existing['route_distinguisher']))
remove_commands = ['no rd {0}'.format(
existing['route_distinguisher'])]
candidate.add(remove_commands, parents=parents)
result = execute_config(module, candidate)
time.sleep(30)
candidate = CustomNetworkConfig(indent=3)
candidate.add(commands, parents=parents)
result = execute_config(module, candidate)
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
|
the-stack_106_14998
|
from ExtractAttackPattern import is_valid_attack_pattern
from CommonServerPython import *
import ExtractAttackPattern as eap
def test_extract_existing_mitre_ids(mocker):
"""
Given
- MITRE IDs to extract
When
- we need to get its value (name).
Then
- run the ExtractAttackPattern script
Validate that name extracted successfully from the ID.
"""
mocker.patch.object(eap, 'get_mitre_results', return_value=[
{'id': 'T1530', 'value': 'Data from Cloud Storage Object'},
{'id': 'T1602', 'value': 'Data from Configuration Repository'}
])
indicators = is_valid_attack_pattern(['T1530', 'T1602'])
assert indicators == ['Data from Cloud Storage Object', 'Data from Configuration Repository']
def test_extract_non_existing_mitre_ids(mocker):
mocker.patch.object(eap, 'get_mitre_results', return_value=[])
indicators = is_valid_attack_pattern(['T1111', 'T2222'])
assert indicators == []
def test_extract_existing_mitre_id(mocker):
"""
Given
- MITRE ID to extract
When
- we need to get its value (name).
Then
- run the ExtractAttackPattern script
Validate that name extracted successfully from the ID.
"""
mocker.patch.object(eap, 'get_mitre_results', return_value=[{
'id': 'T1530', 'value': 'Data from Cloud Storage Object'}])
indicators = is_valid_attack_pattern(['T1530'])
assert indicators == ['Data from Cloud Storage Object']
def test_extract_with_value_error(mocker):
"""
Given
- MITRE ID to extract
When
- we need to get its value (name), but MITRE Integration is disable.
Then
- run the ExtractAttackPattern script
Validate that the results is empty.
validate the error massage.
"""
mocker.patch.object(demisto, 'info')
mocker.patch.object(eap, 'get_mitre_results', side_effect=ValueError(
'verify you have proper integration enabled to support it'))
mocker.patch.object(demisto, 'info')
result = is_valid_attack_pattern(['T1530', 'T1602'])
assert not result
assert demisto.info.call_args[0][0] == 'Unsupported Command : mitre-get-indicator-name, ' \
'verify you have proper integration (MITRE ATTACK v2) enabled to support it. ' \
'This Is needed in order to auto extract MITRE IDs and translate them to Attack Pattern IOCs'
mocker.patch.object(eap, 'get_mitre_results', side_effect=ValueError(
'Something went wrong'))
mocker.patch.object(demisto, 'info')
result = is_valid_attack_pattern(['T1530', 'T1602'])
assert not result
assert demisto.info.call_args[0][0] == 'MITRE Attack formatting script, Something went wrong'
def test_extract_with_unknown_error(mocker):
"""
Given
- MITRE ID to extract
When
- we need to get its value (name), but there is some error.
Then
- run the ExtractAttackPattern script
Validate that the results is empty.
validate the error massage.
"""
mocker.patch.object(eap, 'get_mitre_results', side_effect=TypeError(
'Something went wrong'))
mocker.patch.object(demisto, 'info')
result = is_valid_attack_pattern(['T1530', 'T1602'])
assert not result
assert demisto.info.call_args[0][0] == "MITRE Attack formatting script, Something went wrong"
|
the-stack_106_15000
|
# This program uses a dictonary to keep friends'
# names and birthdays.
LOOK_UP = 1
ADD = 2
CHANGE = 3
DELETE = 4
QUIT = 5
def main():
birthdays = {}
choice = 0
while choice != QUIT:
choice = get_menu_choice()
if choice == LOOK_UP:
look_up(birthdays)
elif choice == ADD:
add(birthdays)
elif choice == CHANGE:
change(birthdays)
elif choice == DELETE:
delete(birthdays)
def get_menu_choice():
print()
print('Friends and Their Birthdays')
print('---------------------------')
print('1. Look up a birthday')
print('2. Add a new birthday')
print('3. Change a birthday')
print('4. Delete a birthday')
print('5. Quite the program')
choice = int(input('Enter your choice: '))
while choice < LOOK_UP or choice > QUIT:
choice = int(input('Enter a valid choice: '))
return choice
def look_up(birthdays):
name = input('Enter a name: ')
print(birthdays.get(name,'Not found.'))
def add(birthdays):
name = input('Enter a name: ')
bday = input('Enter a birthday: ')
if name not in birthdays:
birthdays[name] = bday
else:
print('That entry already exists.')
def change(birthdays):
name = input('Enter a name: ')
if name in birthdays:
bday = input('Enter the new birthday: ')
birthdays[name] = bday
else:
print('That name is not found.')
def delete(birthdays):
name = input('Enter a name: ')
if name in birthdays:
del birthdays[name]
else:
print('That name is not found.')
if __name__ == '__main__':
main()
# End
|
the-stack_106_15002
|
#!/usr/bin/env python3
import re
import torch
import logging
import speechbrain as sb
from speechbrain.utils.data_utils import undo_padding
from cl.base_asr_model import BaseASR
logger = logging.getLogger(__name__)
# Define training procedure
class ASR_Old(BaseASR):
def __init__(self, modules=None, opt_class=None, hparams=None, run_opts=None,
checkpointer=None, sorting=None, train_set=None, tokenizer=None,
train_loader_kwargs=None, *args, **kwargs):
super(ASR, self).__init__(
modules=modules,
opt_class=opt_class,
hparams=hparams,
run_opts=run_opts,
checkpointer=checkpointer,
sorting=sorting,
train_set=train_set,
train_loader_kwargs=train_loader_kwargs,
*args, **kwargs,
)
self.tokenizer = tokenizer
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
## Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
out = p_seq, p_ctc, wav_lens
else:
out = p_seq, wav_lens
# For metric-based curriculum we also need to decode in order to later get
# the predicted tokens and calculate wer/cer.
if self.sorting in getattr(self.train_set, "METRIC_SORTERS", []):
p_tokens, scores = self.hparams.beam_searcher(x, wav_lens)
out += (p_tokens, scores)
return out
else:
p_tokens, scores = self.hparams.beam_searcher(x, wav_lens)
return p_seq, wav_lens, p_tokens, scores
def compute_objectives(
self,
predictions,
batch,
stage,
):
"""Computes the loss (CTC+NLL) given predictions and targets."""
if stage != sb.Stage.TRAIN:
# p_seq, wav_lens, predicted_tokens = predictions
# Needed in beam searcher
# predicted_tokens = [h[0] for h in predicted_tokens]
predicted_tokens = [h[0] for h in predictions[2]]
ids = batch.id
loss = self.compute_loss(
predictions,
batch,
stage=stage,
reduction="mean",
weight=self.hparams.ctc_weight
)
if stage != sb.Stage.TRAIN:
tokens, tokens_lens = batch.tokens
# Decode token terms to words
# logging.info(f"predicted tokens: {predicted_tokens}")
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
# Process predictions and truth so that they don't contain special tokens (.br, .fr etc)
predicted_words = [re.sub("\.\w+|-", "", ' '.join(txt)).strip().split() for txt in predicted_words]
target_words = [re.sub("\.\w+|-", "", ' '.join(txt)).strip().split() for txt in target_words]
# import random
# if random.random() > 0.99:
# print(" preds-truth pairs:", list(zip(predicted_words, target_words))[:1])
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_valid_test_stage_start(self, stage):
"""Gets called before validation or testing"""
assert stage != sb.Stage.TRAIN
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
super().on_stage_end(stage, stage_loss, epoch)
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
if hasattr(self.hparams, 'cer_file'):
with open(self.hparams.cer_file, "w") as c:
self.cer_metric.write_stats(c)
|
the-stack_106_15003
|
import numpy as np
import pickle
class BasisSet():
"""
This object contains the Basis Set data of the molecule
"""
def __init__(self,
basis_set_name,
atomic_numbers,
atomic_symbols,
shell_type,
n_primitives,
atom_map,
p_exponents,
c_coefficients,
p_c_coefficients):
"""
:param basis_set_name: the name of basis set
:param atomic_numbers: atomic numbers
:param atomic_symbols: the symbols
:param shell_type: types of shell (check typeList)
:param n_primitives: number of primitives
:param atom_map: map shell - atom
:param p_exponents: exponents of basis functions
:param c_coefficients: coefficients of basis functions
:param p_c_coefficients: coefficients of P functions in SP shells
:return:
"""
typeList = {'0': ['s', 1],
'1': ['p', 3],
'2': ['d', 6],
'3': ['f', 10],
'-1': ['sp', 4],
'-2': ['d_', 5],
'-3': ['f_', 7]}
atomic_numbers = [int(an) for an in atomic_numbers]
atom_map = np.array(atom_map, dtype=int)
# print(atom_map)
self._basis_set = {'name': basis_set_name,
'primitive_type': 'gaussian'}
shell_type_index = [0] + np.cumsum([typeList['{}'.format(s)][1]
for s in shell_type]).tolist()
prim_from_shell_index = [0] + np.cumsum(np.array(n_primitives, dtype=int)).tolist()
# print(shell_type_index)
# print(prim_from_shell_index)
atoms_data = []
for iatom, atomic_number in enumerate(atomic_numbers):
symbol = str(atomic_symbols[iatom])
shell_from_atom_counts = np.unique(atom_map, return_counts=True)[1]
shell_from_atom_index = np.unique(atom_map, return_index=True)[1]
# print(shell_from_atom_counts)
# print('atom_indexes', shell_from_atom_index)
# print('atom_number', iatom)
# print('shells index', shell_from_atom_index[iatom])
# print('number of shells', shell_from_atom_counts[iatom])
shells_data = []
for ishell in range(shell_from_atom_counts[iatom]):
st = typeList['{}'.format(shell_type[shell_from_atom_index[iatom] + ishell])]
# print(st, ishell)
ini_prim = prim_from_shell_index[shell_from_atom_index[iatom] + ishell]
fin_prim = prim_from_shell_index[shell_from_atom_index[iatom] + ishell + 1]
# print(ini_prim)
# print(fin_prim)
shells_data.append({
'shell_type': st[0],
'functions': st[1],
'p_exponents': p_exponents[ini_prim: fin_prim],
'con_coefficients': c_coefficients[ini_prim: fin_prim],
'p_con_coefficients': p_c_coefficients[ini_prim: fin_prim],
})
atoms_data.append({'shells': shells_data,
'symbol': symbol,
'atomic_number': atomic_number})
self._basis_set['atoms'] = atoms_data
def __hash__(self):
return hash(pickle.dumps(self._basis_set, protocol=2))
def __eq__(self, other):
return hash(other) == hash(self)
def get_dictionary(self):
return self._basis_set
def get_qc_input_txt(self):
"""
Return basis in plane text in the format of Q-chem/Gaussian input
:return: the basis set
"""
basis_txt = ''
for atom in self._basis_set['atoms']:
basis_txt += atom['symbol'] + '\n'
for shell in atom['shells']:
basis_txt += '{} {} {}\n'.format(shell['shell_type'].upper(), len(shell['p_exponents']), 1.00)
for p, c, pc in zip(shell['p_exponents'], shell['con_coefficients'], shell['p_con_coefficients']):
if shell['shell_type'].upper() in ['SP']:
basis_txt += '{:15.10e} {:15.10e} {:15.10e} \n'.format(p, c, pc)
else:
basis_txt += '{:15.10e} {:15.10e} \n'.format(p, c)
basis_txt += '****\n'
return basis_txt
|
the-stack_106_15007
|
import os
import time
from tensorflow.keras.layers import LSTM
# Window size or the sequence length
N_STEPS = 70
# Lookup step, 1 is the next day
LOOKUP_STEP = 1
# test ratio size, 0.2 is 20%
TEST_SIZE = 0.2
# features to use
FEATURE_COLUMNS = ["adjclose", "volume", "open", "high", "low"]
# date now
date_now = time.strftime("%Y-%m-%d")
### model parameters
N_LAYERS = 3
# LSTM cell
CELL = LSTM
# 256 LSTM neurons
UNITS = 256
# 40% dropout
DROPOUT = 0.4
# whether to use bidirectional RNNs
BIDIRECTIONAL = False
### training parameters
# mean absolute error loss
# LOSS = "mae"
# huber loss
LOSS = "huber_loss"
OPTIMIZER = "adam"
BATCH_SIZE = 64
EPOCHS = 400
# Tesla stock market
ticker = "TSLA"
ticker_data_filename = os.path.join("data", f"{ticker}_{date_now}.csv")
# model name to save, making it as unique as possible based on parameters
model_name = f"{date_now}_{ticker}-{LOSS}-{OPTIMIZER}-{CELL.__name__}-seq-{N_STEPS}-step-{LOOKUP_STEP}-layers-{N_LAYERS}-units-{UNITS}"
if BIDIRECTIONAL:
model_name += "-b"
|
the-stack_106_15010
|
from calendar import month
import math
from re import S
from turtle import forward
import torch
import torch.nn as nn
import torch.nn.functional as F
from .embedding import TimeEmbedding
class _Sparsemax(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
"""
Args:
ctx: autograd context
input (torch.Tensor): 2-D tensor, (N, C).
Returns:
torch.Tensor: (N, C).
"""
dim = 1
# translate input by max for numerical stability.
input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(input)
z_sorted = torch.sort(input, dim=dim, descending=True)[0]
input_size = input.size()[dim]
range_values = torch.arange(1, input_size + 1).to(input.device)
range_values = range_values.expand_as(z_sorted)
# Determine sparsity of projection
range_ = torch.arange(
1, input.size(dim) + 1, dtype=input.dtype, device=input.device
)
bound = 1.0 + range_ * z_sorted
cumsum_zs = torch.cumsum(z_sorted, dim)
is_gt = torch.gt(bound, cumsum_zs)
k = torch.max(is_gt * range_, dim=dim, keepdim=True)[0]
zs_sparse = is_gt * z_sorted
taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k
taus = taus.expand_as(input)
output = (input - taus).clamp(min=0.0)
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, grad_output):
output, = ctx.saved_tensors
dim = 1
nonzeros = output != 0.0
sum_grad = torch.sum(grad_output * nonzeros, dim=dim, keepdim=True) / torch.sum(
nonzeros, dim=dim, keepdim=True
)
return nonzeros * (grad_output - sum_grad.expand_as(grad_output))
sparsemax = _Sparsemax.apply
class GLU(nn.Module):
def forward(self, input):
return F.glu(input)
class GhostBatchNorm(nn.Module):
def __init__(self, num_features: int, momentum: float, ghost_batch_size: int):
super(GhostBatchNorm,self).__init__()
self.bn = nn.BatchNorm1d(num_features, momentum=momentum)
self.ghost_batch_size = ghost_batch_size
def forward(self, input_tensor):
batch_size = input_tensor.size(0)
chunks = input_tensor.chunk((batch_size - 1) // self.ghost_batch_size + 1, dim=0)
normalized_chunks = [self.bn(chunk) for chunk in chunks]
return torch.cat(normalized_chunks, dim=0)
class SharedFeatureTransformer(nn.Module):
def __init__(self,
in_channels: int,
hidden_size: int,
bn_momentum: float,
ghost_batch_size: int,
):
super(SharedFeatureTransformer,self).__init__()
self.block = nn.Sequential(
nn.Linear(in_channels, hidden_size * 2, bias=False),
GhostBatchNorm(
hidden_size * 2, momentum=bn_momentum, ghost_batch_size=ghost_batch_size
),
GLU(),
)
self.residual_block = nn.Sequential(
nn.Linear(hidden_size, hidden_size * 2, bias=False),
GhostBatchNorm(
hidden_size * 2, momentum=bn_momentum, ghost_batch_size=ghost_batch_size
),
GLU(),
)
def forward(self, input_tensor):
"""
Args:
input (torch.Tensor): (N, C)
Returns:
torch.Tensor: (N, C)
"""
x = self.block(input_tensor)
return (x + self.residual_block(x)) * math.sqrt(0.5)
class FeatureTransformer(nn.Module):
def __init__(self, in_channels: int, bn_momentum: float, ghost_batch_size: int):
super().__init__()
self.residual_block = nn.Sequential(
nn.Linear(in_channels, in_channels * 2, bias=False),
GhostBatchNorm(
in_channels * 2, momentum=bn_momentum, ghost_batch_size=ghost_batch_size
),
GLU(),
)
def forward(self, input_tensor):
"""
Args:
input (torch.Tensor): (N, C)
Returns:
torch.Tensor: (N, C)
"""
return (input_tensor + self.residual_block(input_tensor)) * math.sqrt(0.5)
class TabNet(nn.Module):
def __init__(self,
ent_vocab_size:int,
ent_size: int,
time_size:int,
value_size:int,
ent_dim=80,
time_embedding_dim=30,
year_span = 101,
feature_channels = 10,
emb_dropout=0.2,
n_decision_steps:int=4,
bn_momentum: float = 0.1,
n_d: int = 16,
n_a: int = 16,
relaxation_factor: float = 2.0,
ghost_batch_size: int = 256,
):
"""
Args:
dense_channels: number of dense features.
ent_size: entity feature cardinalities.
out_channels: number of output channels.
n_decision_steps: number of decision step layers.
cat_emb_dim: categorical feature embedding size.
bn_momentum: batch normalization momentum.
n_d: hidden size of decision output.
n_a: hidden size of attentive transformer.
relaxation_factor: relaxation parameter of feature selection regularization.
ghost_batch_size: ghost batch size for GhostBatchNorm.
"""
super(TabNet,self).__init__()
# Embedding 区域
self.ent_vocab_size = ent_vocab_size
self.ent_size = ent_size
self.time_size = time_size
self.value_size = value_size
self.out_channels = 1
self.year_span = year_span
self.ent_dim = ent_dim
self.feature_channels = feature_channels
self.time_embedding_dim = time_embedding_dim
self.time_embed = TimeEmbedding(years_num=year_span,embedding_dim=time_embedding_dim,
emb_dropout=emb_dropout)
self.ent_embed = nn.Embedding(self.ent_vocab_size,self.ent_dim)
nn.init.xavier_uniform_(self.ent_embed.weight, gain=nn.init.calculate_gain('relu'))
self.n_d = n_d
self.n_a = n_a
self.bais_wet = nn.Linear(self.ent_size*self.ent_dim,feature_channels)
self.bais_wtt = nn.Linear(self.time_size*self.time_embedding_dim,feature_channels)
self.bais_wvt = nn.Linear(self.value_size*self.feature_channels,feature_channels)
self.n_decision_steps = n_decision_steps
self.relaxation_factor = relaxation_factor
self.dense_bn = nn.BatchNorm1d(feature_channels, momentum=bn_momentum)
hidden_size = n_d + n_a
shared_feature_transformer = SharedFeatureTransformer(
feature_channels, hidden_size, bn_momentum, ghost_batch_size
)
self.feature_transformers = nn.ModuleList(
[
nn.Sequential(
shared_feature_transformer,
FeatureTransformer(hidden_size, bn_momentum, ghost_batch_size),
FeatureTransformer(hidden_size, bn_momentum, ghost_batch_size),
)
for _ in range(n_decision_steps)
]
)
self.attentive_transformers = nn.ModuleList(
[
nn.Sequential(
nn.Linear(n_a, feature_channels, bias=False),
GhostBatchNorm(
feature_channels,
momentum=bn_momentum,
ghost_batch_size=ghost_batch_size,
),
)
for _ in range(n_decision_steps - 1)
]
)
self.month_span = 12
self.day_span = 31
self.fc_year = nn.Linear(n_d, self.year_span, bias=False)
self.fc_month= nn.Linear(n_d, self.month_span, bias=False)
self.fc_day = nn.Linear(n_d, self.day_span, bias=False)
self.fc_price = nn.Linear(n_d, self.out_channels, bias=False)
def forward(self,ent_tensor,val_tensor,year_tensor,month_tensor,day_tensor):
batch_size = ent_tensor.shape[0]
ent_embs = self.ent_embed(ent_tensor)
time_embs = self.time_embed(year_tensor,month_tensor,day_tensor)
val_tensor = val_tensor.unsqueeze(1).repeat(1,self.feature_channels,1)
device = ent_tensor.device
# 属性对齐
hid_ent = torch.relu(self.bais_wet(ent_embs.reshape(batch_size,-1)))
hid_time = torch.relu(self.bais_wtt(time_embs.reshape(batch_size,-1)))
hid_value = torch.relu(self.bais_wvt(val_tensor.reshape(batch_size,-1)))
feature = hid_ent + hid_time + hid_value
aggregated_output = torch.zeros(
batch_size, self.n_d, dtype=torch.float, device=device
) # (N,n_d)
masked_feature = feature # (N,linear_dim)
prior_scale_term = torch.ones(
batch_size, feature.size(1), dtype=torch.float, device=device
) # (N,linear_dim)
mask = torch.zeros_like(prior_scale_term) # (N,linear_dim)
masks: List[torch.Tensor] = []
aggregated_masks = torch.zeros_like(prior_scale_term) # (N,linear_dim)
sparsity_regularization = torch.tensor(0.0).to(dtype=torch.float, device=device)
for step in range(self.n_decision_steps):
x = self.feature_transformers[step](masked_feature) # (N, hidden_size)
decision_out, coef_out = x.split(self.n_d, dim=1) # (N, n_d), (N, n_a)
if step != 0:
decision_out = F.relu(decision_out)
aggregated_output += decision_out
# For visualization and interpretability, aggregate feature mask values for all steps.
scale = decision_out.sum(1, keepdim=True) / (self.n_decision_steps - 1)
aggregated_masks += scale * mask
if step != self.n_decision_steps - 1:
# Prepare mask values for the next decision step.
mask = self.attentive_transformers[step](coef_out)
mask = mask * prior_scale_term
mask = sparsemax(mask)
# Update prior scale term to regulate feature selection
prior_scale_term = prior_scale_term * (self.relaxation_factor - mask)
# Update sparsity regularization
sparsity_regularization += (mask * (mask + 1e-5).log()).sum(1).mean(
0
) / (self.n_decision_steps - 1)
masked_feature = mask * feature
masks.append(mask)
year_logits = F.softmax(self.fc_year(aggregated_output),dim=0).squeeze()
month_logits = F.softmax(self.fc_month(aggregated_output),dim=0).squeeze()
day_logits = F.softmax(self.fc_day(aggregated_output),dim=0).squeeze()
price_value = self.fc_price(aggregated_output).squeeze()
# , masks, sparsity_regularization
return (year_logits,month_logits,day_logits),price_value
class CombineLoss(nn.Module):
def __init__(self,v_lambda):
super(CombineLoss,self).__init__()
assert 0.0<=v_lambda and 1.0>=v_lambda
self.v_lambda = v_lambda
self.cross_entropy_loss = nn.CrossEntropyLoss()
self.mse_loss = nn.MSELoss()
def forward(self,time_logits,price_value,target_time,target_value):
loss = 0.0
for y_target,y_pred in zip(target_time,time_logits):
loss += self.cross_entropy_loss(y_pred,y_target)
loss = loss*self.v_lambda+(1-self.v_lambda)*self.mse_loss(price_value,target_value)
return loss
|
the-stack_106_15013
|
import requests
import logging
from main.client.cryptostats.crypto_stats_client import CryptoStatsClient
from main.entities.crypto_stats import CryptoStats
class CoinbaseCryptoStatsClient(CryptoStatsClient):
"""Fetch crypto trading stats from Coinbase"""
def __init__ (self, base_url: str = 'https://api-public.sandbox.pro.coinbase.com'):
self.base_url = base_url
async def get_crypto_trading_stats(self, product_id: str) -> CryptoStats:
# Make the request to Coinbase Pro API
logging.debug('Sending stats request to ' + self.base_url + ' for ' + product_id.lower())
response = requests.get(self.base_url + '/products/' + product_id.lower() + '/stats')
# Check response
if response.status_code != 200:
raise Exception('An error occured during request')
# Extract JSON payload
payload = response.json()
# Return response
return CryptoStats(
open=float(payload.get('open', 0.0)),
high=float(payload.get('high', 0.0)),
low=float(payload.get('low', 0.0)),
volume=float(payload.get('volume', 0.0)),
last=float(payload.get('last', 0.0)),
volume30d=float(payload.get('volume_30day', 0.0)))
|
the-stack_106_15014
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import paddle
import paddle.fluid as fluid
import utils.utility as utility
def _calc_label_smoothing_loss(softmax_out, label, class_dim, epsilon):
"""Calculate label smoothing loss
Returns:
label smoothing loss
"""
label_one_hot = fluid.layers.one_hot(input=label, depth=class_dim)
smooth_label = fluid.layers.label_smooth(
label=label_one_hot, epsilon=epsilon, dtype="float32")
loss = fluid.layers.cross_entropy(
input=softmax_out, label=smooth_label, soft_label=True)
return loss
def _basic_model(data, model, args, is_train):
image = data[0]
label = data[1]
net_out = model.net(input=image, class_dim=args.class_dim)
softmax_out = fluid.layers.softmax(net_out, use_cudnn=False)
if is_train and args.use_label_smoothing:
cost = _calc_label_smoothing_loss(softmax_out, label, args.class_dim,
args.label_smoothing_epsilon)
else:
cost = fluid.layers.cross_entropy(input=softmax_out, label=label)
avg_cost = fluid.layers.mean(cost)
acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(
input=softmax_out, label=label, k=min(5, args.class_dim))
return [avg_cost, acc_top1, acc_top5]
def _googlenet_model(data, model, args, is_train):
"""GoogLeNet model output, include avg_cost, acc_top1 and acc_top5
Returns:
GoogLeNet model output
"""
image = data[0]
label = data[1]
out0, out1, out2 = model.net(input=image, class_dim=args.class_dim)
cost0 = fluid.layers.cross_entropy(input=out0, label=label)
cost1 = fluid.layers.cross_entropy(input=out1, label=label)
cost2 = fluid.layers.cross_entropy(input=out2, label=label)
avg_cost0 = fluid.layers.mean(x=cost0)
avg_cost1 = fluid.layers.mean(x=cost1)
avg_cost2 = fluid.layers.mean(x=cost2)
avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
acc_top5 = fluid.layers.accuracy(
input=out0, label=label, k=min(5, args.class_dim))
return [avg_cost, acc_top1, acc_top5]
def _mixup_model(data, model, args, is_train):
"""output of Mixup processing network, include avg_cost
"""
image = data[0]
y_a = data[1]
y_b = data[2]
lam = data[3]
net_out = model.net(input=image, class_dim=args.class_dim)
softmax_out = fluid.layers.softmax(net_out, use_cudnn=False)
if not args.use_label_smoothing:
loss_a = fluid.layers.cross_entropy(input=softmax_out, label=y_a)
loss_b = fluid.layers.cross_entropy(input=softmax_out, label=y_b)
else:
loss_a = _calc_label_smoothing_loss(softmax_out, y_a, args.class_dim,
args.label_smoothing_epsilon)
loss_b = _calc_label_smoothing_loss(softmax_out, y_b, args.class_dim,
args.label_smoothing_epsilon)
loss_a_mean = fluid.layers.mean(x=loss_a)
loss_b_mean = fluid.layers.mean(x=loss_b)
cost = lam * loss_a_mean + (1 - lam) * loss_b_mean
avg_cost = fluid.layers.mean(x=cost)
return [avg_cost]
def create_model(model, args, is_train):
"""Create model, include basic model, googlenet model and mixup model
"""
data_loader, data = utility.create_data_loader(is_train, args)
if args.model == "GoogLeNet":
loss_out = _googlenet_model(data, model, args, is_train)
else:
if args.use_mixup and is_train:
loss_out = _mixup_model(data, model, args, is_train)
else:
loss_out = _basic_model(data, model, args, is_train)
return data_loader, loss_out
|
the-stack_106_15015
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import os
import time
from prometheus_client import start_http_server, Gauge, Counter
from loguru import logger
# Log System imports
logger.info(f'[Exporter][✓] System imports')
import asyncio
import discord
# Log Discord imports
logger.info(f'[Exporter][✓] Discord imports')
# Exporter variables
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN', None)
if DISCORD_TOKEN is None:
logger.error(f'[Exporter][✗] ENV var DISCORD_TOKEN not found')
exit()
EXPORTER_PORT = int(os.getenv('EXPORTER_PORT', '8080'))
POLLING_INTERVAL = int(os.getenv('POLLING_INTERVAL', 10))
logger.info(f'[Exporter][✓] Listening on :{EXPORTER_PORT}')
logger.info(f'[Exporter][✓] Polling interval {POLLING_INTERVAL}s')
# Metrics definition
# Gauges
DISCORD_PING = Gauge('discord_latency',
'The time in milliseconds that discord took to respond to a REST request.')
DISCORD_MEMBERS_REGISTERED = Gauge('discord_members_registered',
'The number of connected members on a Guild.',
['guild'])
DISCORD_MEMBERS_ONLINE = Gauge('discord_members_online',
'The number of online members on a Guild.',
['guild'])
DISCORD_BOTS_REGISTERED = Gauge('discord_bots_registered',
'The number of connected bots on a Guild.',
['guild'])
DISCORD_BOTS_ONLINE = Gauge('discord_bots_online',
'The number of online bots on a Guild.',
['guild'])
DISCORD_BOOSTS = Gauge('discord_boosts',
'The number of Server Boosts on a Guild.',
['guild'])
# Counters
DISCORD_MESSAGES = Counter('discord_messages',
'The number of messages sent on a Guild by a Member.',
['guild','member'])
DISCORD_REACTIONS = Counter('discord_reactions',
'The number of messages sent on a Guild by a Member.',
['guild','member'])
logger.info(f'[Exporter][✓] Metrics defined')
try:
# Intents are needed since 2020 for Mamber and Messages infos
# Needs to be activates in bots preferences in discord portal
intents = discord.Intents.default()
intents.members = True
intents.presences = True
client = discord.Client(intents=intents)
except Exception as e:
logger.error(f'[Exporter][✗] Connection failed')
else:
logger.info(f'[Exporter][✓] Connection successed')
#
# Tasks definition
#
async def request_ping(timer):
while client.is_ready:
logger.trace(f'[Exporter][✓] Enterng loop')
try:
latency = client.latency
except Exception as e:
logger.error(f'[Exporter] Unable to retrieve data [{e}]')
else:
try:
DISCORD_PING.set(latency)
except Exception as e:
logger.error(f'[Exporter] Unable to set DISCORD_PING')
await asyncio.sleep(timer)
async def request_registered(timer):
while client.is_ready:
logger.trace(f'[Exporter][✓] Enterng loop')
try:
if client.guilds:
members_registered = 0
bots_registered = 0
for guild in client.guilds:
for member in guild.members:
if member.bot is False:
members_registered += 1
else:
bots_registered += 1
DISCORD_BOTS_REGISTERED.labels(guild = guild).set(bots_registered)
DISCORD_MEMBERS_REGISTERED.labels(guild = guild).set(members_registered)
except Exception as e:
logger.error(f'[Exporter] Unable to retrieve data [{e}]')
await asyncio.sleep(timer)
async def request_online(timer):
while client.is_ready:
logger.trace(f'[Exporter][✓] Enterng loop')
try:
if client.guilds:
members_online = 0
bots_online = 0
for guild in client.guilds:
for member in guild.members:
if member.bot is False:
if member.status is not discord.Status.offline:
members_online += 1
else:
if member.status is not discord.Status.offline:
bots_online += 1
DISCORD_BOTS_ONLINE.labels(guild = guild).set(bots_online)
DISCORD_MEMBERS_ONLINE.labels(guild = guild).set(members_online)
except Exception as e:
logger.error(f'[Exporter] Unable to retrieve data [{e}]')
await asyncio.sleep(timer)
async def request_boost(timer):
while client.is_ready:
logger.trace(f'[Exporter][✓] Enterng loop')
try:
if client.guilds:
for guild in client.guilds:
DISCORD_BOOSTS.labels(guild = guild).set(guild.premium_subscription_count)
except Exception as e:
logger.error(f'[Exporter] Unable to retrieve data [{e}]')
await asyncio.sleep(timer)
# Scheduled Tasks (Launched every POLLING_INTERVAL seconds)
client.loop.create_task(request_ping(POLLING_INTERVAL))
client.loop.create_task(request_registered(POLLING_INTERVAL))
client.loop.create_task(request_online(POLLING_INTERVAL))
client.loop.create_task(request_boost(POLLING_INTERVAL))
start_http_server(EXPORTER_PORT)
@client.event
async def on_message(ctx):
try:
if ctx.author.bot is False:
DISCORD_MESSAGES.labels(guild = ctx.guild, member = ctx.author).inc()
except Exception as e:
logger.error(f'[Exporter][on_message] Unable to retrieve data [{e}]')
@client.event
async def on_reaction_add(reaction, member):
try:
if member.bot is False:
DISCORD_REACTIONS.labels(guild = member.guild, member = member).inc()
except Exception as e:
logger.error(f'[Exporter][on_reaction_add] Unable to retrieve data [{e}]')
# Run Discord client
iter = 0
while iter < 5:
try:
client.run(DISCORD_TOKEN)
break
except:
logger.error(f'[Exporter][✗] Discord client.run failed (Attempt: {iter+1}/5) ')
iter += 1
time.sleep(5)
continue
|
the-stack_106_15016
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sum tree data structure.
Used for prioritized experience replay. See prioritized_replay_buffer.py
and Schaul et al. (2015).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
class SumTree(object):
"""A sum tree data structure for storing replay priorities.
A sum tree is a complete binary tree whose leaves contain values called
priorities. Internal nodes maintain the sum of the priorities of all leaf
nodes in their subtree.
For capacity = 4, the tree may look like this:
+---+
|2.5|
+-+-+
|
+-------+--------+
| |
+-+-+ +-+-+
|1.5| |1.0|
+-+-+ +-+-+
| |
+----+----+ +----+----+
| | | |
+-+-+ +-+-+ +-+-+ +-+-+
|0.5| |1.0| |0.5| |0.5|
+---+ +---+ +---+ +---+
This is stored in a list of numpy arrays:
self.nodes = [ [2.5], [1.5, 1], [0.5, 1, 0.5, 0.5] ]
For conciseness, we allocate arrays as powers of two, and pad the excess
elements with zero values.
This is similar to the usual array-based representation of a complete binary
tree, but is a little more user-friendly.
"""
def __init__(self, capacity):
"""Creates the sum tree data structure for the given replay capacity.
Args:
capacity: int, the maximum number of elements that can be stored in this
data structure.
Raises:
ValueError: If requested capacity is not positive.
"""
assert isinstance(capacity, int)
if capacity <= 0:
raise ValueError('Sum tree capacity should be positive. Got: {}'.
format(capacity))
self.nodes = []
tree_depth = int(math.ceil(np.log2(capacity)))
level_size = 1
for _ in range(tree_depth + 1):
nodes_at_this_depth = np.zeros(level_size)
self.nodes.append(nodes_at_this_depth)
level_size *= 2
self.max_recorded_priority = 1.0
def _total_priority(self):
"""Returns the sum of all priorities stored in this sum tree.
Returns:
float, sum of priorities stored in this sum tree.
"""
return self.nodes[0][0]
def sample(self, query_value=None):
"""Samples an element from the sum tree.
Each element has probability p_i / sum_j p_j of being picked, where p_i is
the (positive) value associated with node i (possibly unnormalized).
Args:
query_value: float in [0, 1], used as the random value to select a
sample. If None, will select one randomly in [0, 1).
Returns:
int, a random element from the sum tree.
Raises:
Exception: If the sum tree is empty (i.e. its node values sum to 0), or if
the supplied query_value is larger than the total sum.
"""
if self._total_priority() == 0.0:
raise Exception('Cannot sample from an empty sum tree.')
if query_value and (query_value < 0. or query_value > 1.):
raise ValueError('query_value must be in [0, 1].')
# Sample a value in range [0, R), where R is the value stored at the root.
query_value = random.random() if query_value is None else query_value
query_value *= self._total_priority()
# Now traverse the sum tree.
node_index = 0
for nodes_at_this_depth in self.nodes[1:]:
# Compute children of previous depth's node.
left_child = node_index * 2
left_sum = nodes_at_this_depth[left_child]
# Each subtree describes a range [0, a), where a is its value.
if query_value < left_sum: # Recurse into left subtree.
node_index = left_child
else: # Recurse into right subtree.
node_index = left_child + 1
# Adjust query to be relative to right subtree.
query_value -= left_sum
return node_index
def stratified_sample(self, batch_size):
"""Performs stratified sampling using the sum tree.
Let R be the value at the root (total value of sum tree). This method will
divide [0, R) into batch_size segments, pick a random number from each of
those segments, and use that random number to sample from the sum_tree. This
is as specified in Schaul et al. (2015).
Args:
batch_size: int, the number of strata to use.
Returns:
list of batch_size elements sampled from the sum tree.
Raises:
Exception: If the sum tree is empty (i.e. its node values sum to 0).
"""
if self._total_priority() == 0.0:
raise Exception('Cannot sample from an empty sum tree.')
bounds = np.linspace(0., 1., batch_size + 1)
assert len(bounds) == batch_size + 1
segments = [(bounds[i], bounds[i+1]) for i in range(batch_size)]
query_values = [random.uniform(x[0], x[1]) for x in segments]
return [self.sample(query_value=x) for x in query_values]
def get(self, node_index):
"""Returns the value of the leaf node corresponding to the index.
Args:
node_index: The index of the leaf node.
Returns:
The value of the leaf node.
"""
return self.nodes[-1][node_index]
def set(self, node_index, value):
"""Sets the value of a leaf node and updates internal nodes accordingly.
This operation takes O(log(capacity)).
Args:
node_index: int, the index of the leaf node to be updated.
value: float, the value which we assign to the node. This value must be
nonnegative. Setting value = 0 will cause the element to never be
sampled.
Raises:
ValueError: If the given value is negative.
"""
if value < 0.0:
raise ValueError('Sum tree values should be nonnegative. Got {}'.
format(value))
self.max_recorded_priority = max(value, self.max_recorded_priority)
delta_value = value - self.nodes[-1][node_index]
# Now traverse back the tree, adjusting all sums along the way.
for nodes_at_this_depth in reversed(self.nodes):
# Note: Adding a delta leads to some tolerable numerical inaccuracies.
nodes_at_this_depth[node_index] += delta_value
node_index //= 2
assert node_index == 0, ('Sum tree traversal failed, final node index '
'is not 0.')
|
the-stack_106_15017
|
"""!
@brief History and callback update functions
@author Efthymios Tzinis {[email protected]}
@copyright University of illinois at Urbana Champaign
"""
def values_update(list_of_pairs,
history_dic,
update_mode='batch'):
"""! Update the history dictionary for each key, value pair
INPLACE and stores values for batch and epoch
:param update_mode: In batch mode the values of the specific key
would be summed and in epoch mode would be averaged throughout
the batches.
:param list_of_pairs: list of tuples e.g. [('loss', 0.9987), ...,]
:param history_dic: a dictionary that we want to keep track for
a metric under all epochs
:return: history_dic updated with all the appropriate values for
batch and epoch
"""
if update_mode == 'batch':
for k, v in list_of_pairs:
if not k+"_batch_total" in history_dic:
history_dic[k] = []
history_dic[k+"_batch_total"] = v
history_dic[k + '_batch_counter'] = 1
else:
history_dic[k + "_batch_total"] += v
history_dic[k+'_batch_counter'] += 1
elif update_mode == 'epoch':
for k, v in list_of_pairs:
history_dic[k].append(history_dic[k + "_batch_total"] /
history_dic[k + '_batch_counter'])
history_dic[k + "_batch_total"] = 0.
history_dic[k + '_batch_counter'] = 0
else:
raise NotImplementedError('Please use an update mode of epoch '
'or batch')
return history_dic
def update_best_performance(performance_dic,
epoch,
history_dic,
buffer_size=0):
"""! Update the history dictionary for the best performance so far
INPLACE and stores them in a list which has length equal to the
predefined buffer size
:return: history_dic updated with all the appropriate values for
the best performance so far
"""
if 'best_performances' not in history_dic:
history_dic['best_performances'] = [(performance_dic, epoch)]
else:
history_dic['best_performances'].append((performance_dic,
epoch))
history_dic['best_performances'] = \
sorted(history_dic['best_performances'],
key=lambda x: x[0]['sdr'])[::-1][:buffer_size]
return history_dic
|
the-stack_106_15021
|
from collections import defaultdict
from toolz import merge
from tornado import gen
from distributed import Queue
from distributed.utils import sync
import tensorflow as tf
def start_and_attach_server(spec, job_name=None, task_index=None, dask_worker=None):
server = tf.train.Server(spec, job_name=job_name, task_index=task_index)
dask_worker.tensorflow_server = server
dask_worker.tensorflow_queue = Queue()
return 'OK'
@gen.coroutine
def _start_tensorflow(client, **job_counts):
info = yield client.scheduler.identity()
if not info['workers']:
return
if not job_counts:
job_counts = {'worker': len(info['workers'])}
if sum(job_counts.values()) > len(info['workers']):
raise ValueError("Dask cluster not large enough."
"Need %d workers, have %d"
% (sum(job_counts.values()), len(info['workers'])))
ports = defaultdict(lambda: 2221)
tf_spec = {job_name: [] for job_name in job_counts}
dask_spec = {job_name: [] for job_name in job_counts}
job_names = {}
task_index = {}
workers = iter(info['workers'])
for job_name, count in job_counts.items():
for i in range(count):
w = next(workers)
host = w.split('://')[-1].rsplit(':')[0]
ports[host] += 1
tf_name = '%s:%d' % (host, ports[host])
tf_spec[job_name].append(tf_name)
dask_spec[job_name].append(w)
task_index[w] = i
job_names[w] = job_name
tf_spec = tf.train.ClusterSpec(tf_spec)
resp = yield {w: client._run(start_and_attach_server, tf_spec,
job_name=job_names[w],
task_index=task_index[w],
workers=[w]) for w in task_index}
resp = merge(resp.values())
if not all(v == 'OK' for v in resp.values()):
raise ValueError("Setup did not succeed")
raise gen.Return((tf_spec, dask_spec))
def start_tensorflow(client, **kwargs):
""" Start Tensorflow on Dask Cluster
This launches Tensorflow Servers alongside Dask workers
Examples
--------
>>> client = Client('dask-scheduler-address:8786')
>>> tf_spec, dask_spec = start_tensorflow(client)
>>> tf_spec.as_dict()
{'worker': ['192.168.1.100:2222', '192.168.1.101:2222']}
Specify desired number of jobs types as keyword args
>>> tf_spec, dask_spec = start_tensorflow(client, ps=2, worker=4)
>>> tf_spec.as_dict()
{'worker': ['192.168.1.100:2222', '192.168.1.101:2222',
'192.168.1.102:2222', '192.168.1.103:2222'],
'ps': ['192.168.1.104:2222', '192.168.1.105:2222']}
"""
return sync(client.loop, _start_tensorflow, client, **kwargs)
|
the-stack_106_15022
|
# -*- coding: utf-8 -*-
"""
@inproceedings{DBLP:conf/iclr/GordonBBNT19,
author = {Jonathan Gordon and
John Bronskill and
Matthias Bauer and
Sebastian Nowozin and
Richard E. Turner},
title = {Meta-Learning Probabilistic Inference for Prediction},
booktitle = {7th International Conference on Learning Representations, {ICLR} 2019,
New Orleans, LA, USA, May 6-9, 2019},
year = {2019},
url = {https://openreview.net/forum?id=HkxStoC5F7}
}
https://openreview.net/forum?id=HkxStoC5F7
Adapted from https://github.com/Gordonjo/versa.
"""
import torch
import torch.nn as nn
from core.utils import accuracy
from .meta_model import MetaModel
class Predictor(nn.Module):
def __init__(self, feat_dim, hid_dim, out_dim):
super(Predictor, self).__init__()
self.layers = nn.Sequential(
nn.Linear(feat_dim, hid_dim),
nn.ELU(),
nn.Linear(hid_dim, hid_dim),
nn.ELU(),
nn.Linear(hid_dim, out_dim),
)
def forward(self, x):
out = self.layers(x)
return out
class VERSALayer(nn.Module):
def __init__(self, sample_num):
super(VERSALayer, self).__init__()
self.sample_num = sample_num
self.loss_func = nn.CrossEntropyLoss(reduction="none")
def forward(
self,
way_num,
query_feat,
query_target,
weight_mean,
weight_logvar,
bias_mean,
bias_logvar,
):
query_target = query_target.contiguous().reshape(-1)
episode_size = query_feat.size(0)
logits_mean_query = torch.matmul(query_feat, weight_mean) + bias_mean
logits_log_var_query = torch.log(
torch.matmul(query_feat ** 2, torch.exp(weight_logvar)) + torch.exp(bias_logvar)
)
logits_sample_query = (
self.sample_normal(logits_mean_query, logits_log_var_query, self.sample_num)
.contiguous()
.reshape(-1, way_num)
)
query_label_tiled = query_target.repeat(self.sample_num)
loss = -self.loss_func(logits_sample_query, query_label_tiled)
# FIXME nan
loss = (
loss.contiguous()
.reshape(episode_size, self.sample_num, -1)
.permute([1, 0, 2])
.contiguous()
.reshape(self.sample_num, -1)
)
task_score = torch.logsumexp(loss, dim=0) - torch.log(
torch.as_tensor(self.sample_num, dtype=torch.float).to(query_feat.device)
)
# loss = -torch.mean(task_score, dim=0)
logits_sample_query = logits_sample_query.contiguous().reshape(self.sample_num, -1, way_num)
averaged_prediction = torch.logsumexp(logits_sample_query, dim=0) - torch.log(
torch.as_tensor(self.sample_num, dtype=torch.float).to(query_feat.device)
)
return averaged_prediction, task_score
def sample_normal(self, mu, log_variance, num_samples):
shape = torch.cat([torch.as_tensor([num_samples]), torch.as_tensor(mu.size())])
eps = torch.randn(shape.cpu().numpy().tolist()).to(log_variance.device)
return mu + eps * torch.sqrt(torch.exp(log_variance))
class VERSA(MetaModel):
def __init__(self, feat_dim, sample_num, d_theta=256, drop_rate=0.0, **kwargs):
super(VERSA, self).__init__(**kwargs)
self.feat_dim = feat_dim
self.sample_num = sample_num
self.h = nn.Sequential(
nn.Linear(feat_dim, d_theta), nn.BatchNorm1d(d_theta), nn.ReLU(), nn.Dropout(drop_rate)
)
self.weight_mean = Predictor(d_theta, d_theta, d_theta)
self.weight_logvar = Predictor(d_theta, d_theta, d_theta)
self.bias_mean = Predictor(d_theta, d_theta, 1)
self.bias_logvar = Predictor(d_theta, d_theta, 1)
self.head = VERSALayer(sample_num)
def set_forward(self, batch):
image, global_target = batch
image = image.to(self.device)
feat = self.emb_func(image)
feat = self.h(feat)
support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=1)
episode_size = support_feat.size(0)
query_target = query_target.contiguous().reshape(episode_size, -1)
class_feat = torch.mean(
support_feat.contiguous().reshape(episode_size, self.way_num, self.shot_num, -1),
dim=2,
keepdim=False,
)
weight_mean = self.weight_mean(class_feat).permute((0, 2, 1))
weight_logvar = self.weight_logvar(class_feat).permute((0, 2, 1))
bias_mean = self.bias_mean(class_feat).permute((0, 2, 1))
bias_logvar = self.bias_logvar(class_feat).permute((0, 2, 1))
output, _ = self.head(
self.way_num,
query_feat,
query_target,
weight_mean,
weight_logvar,
bias_mean,
bias_logvar,
)
acc = accuracy(output, query_target.reshape(-1))
return output, acc
def set_forward_loss(self, batch):
image, global_target = batch
image = image.to(self.device)
feat = self.emb_func(image)
feat = self.h(feat)
support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=1)
episode_size = support_feat.size(0)
query_target = query_target.contiguous().reshape(episode_size, -1)
class_feat = torch.mean(
support_feat.contiguous().reshape(episode_size, self.way_num, self.shot_num, -1),
dim=2,
keepdim=False,
)
weight_mean = self.weight_mean(class_feat).permute((0, 2, 1))
weight_logvar = self.weight_logvar(class_feat).permute((0, 2, 1))
bias_mean = self.bias_mean(class_feat).permute((0, 2, 1))
bias_logvar = self.bias_logvar(class_feat).permute((0, 2, 1))
output, task_score = self.head(
self.way_num,
query_feat,
query_target,
weight_mean,
weight_logvar,
bias_mean,
bias_logvar,
)
acc = accuracy(output, query_target.reshape(-1))
loss = -torch.mean(task_score, dim=0)
return output, acc, loss
def set_forward_adaptation(self, *args, **kwargs):
raise NotImplementedError
|
the-stack_106_15024
|
#!/usr/bin/env python
import logging
from optparse import OptionParser
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django import contrib
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TransactionTestCase, TestCase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils._os import upath
from django.utils import six
warnings.simplefilter("default", RemovedInDjango19Warning)
warnings.simplefilter("default", RemovedInDjango20Warning)
CONTRIB_MODULE_PATH = 'django.contrib'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'data',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'django.contrib.humanize',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
'servers.another_app',
]
def get_test_modules():
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)
]
if HAS_SPATIAL_DB:
discovery_paths.append(
('django.contrib.gis.tests', os.path.join(CONTRIB_DIR, 'gis', 'tests'))
)
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
if not connection.vendor == 'postgresql' and f == 'postgres_tests':
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
RemovedInDjango19Warning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--bisect', action='store', dest='bisect', default=None,
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_option(
'--pair', action='store', dest='pair', default=None,
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_option(
'--selenium', action='store_true', dest='selenium',
default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, args)
elif options.pair:
paired_tests(options.pair, options, args)
else:
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
|
the-stack_106_15025
|
"""
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
entry_points={
'console_scripts': [
'run=pet_ct.run:run',
'create=pet_ct.run:create',
'connect=pet_ct.run:connect'
]
},
name="fdg-pet-ct",
version="0.0.1",
author="Geoff Angus and Sabri Eyuboglu",
author_email="[email protected]",
description="Research software for AIMI fdg-pet-ct project.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/seyuboglu/fdg-pet-ct",
packages=setuptools.find_packages(include=['pet_ct']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'torch', 'torchvision', 'h5py', 'numpy', 'pandas', 'scipy', 'scikit-learn', 'statsmodels',
'opencv-python', 'pydicom', 'tqdm', 'Pillow', 'click', 'matplotlib', 'networkx', 'jsmin',
'ipywidgets', 'nltk', 'sentencepiece', 'plotly', 'tensorboardX', 'pytorch-pretrained-bert',
'snorkel-metal', 'py-rouge', 'seaborn', 'colorlover'
]
)
|
the-stack_106_15026
|
"""
this algorithm tries to find the pattern from every position of
the mainString if pattern is found from position i it add it to
the answer and does the same for position i+1
Complexity : O(n*m)
n=length of main string
m=length of pattern string
"""
def naivePatternSearch(mainString,pattern):
patLen=len(pattern)
strLen=len(mainString)
position=[]
for i in range(strLen-patLen+1):
match_found=True
for j in range(patLen):
if mainString[i+j]!=pattern[j]:
match_found=False
break
if match_found:
position.append(i)
return position
mainString="ABAAABCDBBABCDDEBCABC"
pattern="ABC"
position=naivePatternSearch(mainString,pattern)
print("Pattern found in position ")
for x in position:
print(x)
|
the-stack_106_15028
|
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1 / (1 + np.exp(-x))
return s
def load_planar_dataset():
np.random.seed(1)
m = 400 # number of examples
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
t = np.linspace(j * 3.12, (j + 1) * 3.12, N) + np.random.randn(N) * 0.2 # theta
r = a * np.sin(4 * t) + np.random.randn(N) * 0.2 # radius
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2,
n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
|
the-stack_106_15029
|
"""
This file implements a wrapper for facilitating compatibility with OpenAI gym.
This is useful when using these environments with code that assumes a gym-like
interface.
"""
import numpy as np
from gym import spaces
from robosuite.wrappers import Wrapper
import robosuite.utils.transform_utils as T
class GymWrapper(Wrapper):
env = None
def __init__(self, env, keys=None, generalized_goal=False):
"""
Initializes the Gym wrapper.
Args:
env (MujocoEnv instance): The environment to wrap.
keys (list of strings): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to robot-state and object-state.
"""
self.env = env
#print("mujoco env obs size: ", self.env._get_observation().keys())
#countingstates = 0
#for kay in self.env._get_observation().keys():
# countingstates += self.env._get_observation()[kay].shape[0]
# print(kay, self.env._get_observation()[kay].shape[0] )
#print("number of counted states: ", countingstates)
if keys is None:
assert self.env.use_object_obs, "Object observations need to be enabled."
keys = ["robot-state", "object-state"]
if generalized_goal:
keys.append("lift_reach_reward")
self.keys = keys
# set up observation and action spaces
flat_ob = self._flatten_obs(self.env.reset(), verbose=True)
self.obs_dim = flat_ob.size
#print("flattened dimensions of fed in obs: ",self.obs_dim)
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low=low, high=high)
#print("# of joint positions and # of joint vel and # of gripper joint pos eef pos and eef quat: \n ", self.env._ref_joint_pos_indexes , self.env._ref_joint_vel_indexes, self.env._ref_gripper_joint_pos_indexes, self.env.sim.data.site_xpos[self.env.eef_site_id], T.convert_quat(
# self.env.sim.data.get_body_xquat("right_hand"), to="xyzw"
# ) )
#print("object state: cube_pos, cube_quat, gripper to cube dist : \n",
# np.array(self.env.sim.data.body_xpos[self.env.cube_body_id]) ,
# T.convert_quat(
# np.array(self.sim.data.body_xquat[self.cube_body_id]), to="xyzw"
# ),
# np.array(self.sim.data.site_xpos[self.eef_site_id]) - np.array(self.sim.data.body_xpos[self.cube_body_id])
# )
#print("gym wrapper obs space size: ",self.observation_space.shape) # for debugging, ends up as 40
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low, high=high)
#print("gym wrapper high and low values of env: ",high , low)
# Set up a reward range, seed, spec and metadata for compatibility with baseline
self.reward_range = (-float('inf'), float('inf'))
self.metadata = {'render.modes': []} # figure out if this is useful at a later point
self.spec = None
def _flatten_obs(self, obs_dict, verbose=False):
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict: ordered dictionary of observations
"""
ob_lst = []
for key in obs_dict:
if key in self.keys:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(obs_dict[key])
return np.concatenate(ob_lst)
def reset(self):
ob_dict = self.env.reset()
return self._flatten_obs(ob_dict)
def step(self, action):
# This part clips the action so that the range of the action space is respected.
ob_dict, reward, done, info = self.env.step(action)
return self._flatten_obs(ob_dict), reward, done, info
|
the-stack_106_15030
|
from dnnv.properties import *
import numpy as np
VAE = Network("VAE")
DNN = Network("DNN")
N = DNN[2:].compose(VAE)
N_prob_coll = N[:-2, 1]
N_steer_angle = N[:-1, 0]
logit = lambda x: np.log(x / (1 - x))
P_coll_min = logit(0.2)
P_coll_max = logit(0.3)
steer_max = 20 * np.pi / 180
Forall(
x,
Implies(
And(-3 <= x <= 3, P_coll_min < N_prob_coll(x) <= P_coll_max),
-steer_max <= N_steer_angle(x) <= steer_max,
),
)
|
the-stack_106_15033
|
# -*- coding: utf-8 -*-
"""Decorators for labeling test objects.
Decorators that merely return a modified version of the original function
object are straightforward. Decorators that return a new function object need
to use nose.tools.make_decorator(original_function)(decorator) in returning the
decorator, in order to preserve metadata such as function name, setup and
teardown functions and so on - see nose.tools for more information.
This module provides a set of useful decorators meant to be ready to use in
your own tests. See the bottom of the file for the ready-made ones, and if you
find yourself writing a new one that may be of generic use, add it here.
Included decorators:
Lightweight testing that remains unittest-compatible.
- An @as_unittest decorator can be used to tag any normal parameter-less
function as a unittest TestCase. Then, both nose and normal unittest will
recognize it as such. This will make it easier to migrate away from Nose if
we ever need/want to while maintaining very lightweight tests.
NOTE: This file contains IPython-specific decorators. Using the machinery in
IPython.external.decorators, we import either numpy.testing.decorators if numpy is
available, OR use equivalent code in IPython.external._decorators, which
we've copied verbatim from numpy.
Authors
-------
- Fernando Perez <[email protected]>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
import sys
import os
import tempfile
import unittest
# Third-party imports
# This is Michele Simionato's decorator module, kept verbatim.
from IPython.external.decorator import decorator
# Expose the unittest-driven decorators
from .ipunittest import ipdoctest, ipdocstring
# Grab the numpy-specific decorators which we keep in a file that we
# occasionally update from upstream: decorators.py is a copy of
# numpy.testing.decorators, we expose all of it here.
from IPython.external.decorators import *
# For onlyif_cmd_exists decorator
from IPython.utils.process import is_cmd_found
from IPython.utils.py3compat import string_types
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# Simple example of the basic idea
def as_unittest(func):
"""Decorator to make a simple function into a normal test via unittest."""
class Tester(unittest.TestCase):
def test(self):
func()
Tester.__name__ = func.__name__
return Tester
# Utility functions
def apply_wrapper(wrapper,func):
"""Apply a wrapper to a function for decoration.
This mixes Michele Simionato's decorator tool with nose's make_decorator,
to apply a wrapper in a decorator so that all nose attributes, as well as
function signature and other properties, survive the decoration cleanly.
This will ensure that wrapped functions can still be well introspected via
IPython, for example.
"""
import nose.tools
return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
def make_label_dec(label,ds=None):
"""Factory function to create a decorator that applies one or more labels.
Parameters
----------
label : string or sequence
One or more labels that will be applied by the decorator to the functions
it decorates. Labels are attributes of the decorated function with their
value set to True.
ds : string
An optional docstring for the resulting decorator. If not given, a
default docstring is auto-generated.
Returns
-------
A decorator.
Examples
--------
A simple labeling decorator:
>>> slow = make_label_dec('slow')
>>> slow.__doc__
"Labels a test as 'slow'."
And one that uses multiple labels and a custom docstring:
>>> rare = make_label_dec(['slow','hard'],
... "Mix labels 'slow' and 'hard' for rare tests.")
>>> rare.__doc__
"Mix labels 'slow' and 'hard' for rare tests."
Now, let's test using this one:
>>> @rare
... def f(): pass
...
>>>
>>> f.slow
True
>>> f.hard
True
"""
if isinstance(label, string_types):
labels = [label]
else:
labels = label
# Validate that the given label(s) are OK for use in setattr() by doing a
# dry run on a dummy function.
tmp = lambda : None
for label in labels:
setattr(tmp,label,True)
# This is the actual decorator we'll return
def decor(f):
for label in labels:
setattr(f,label,True)
return f
# Apply the user's docstring, or autogenerate a basic one
if ds is None:
ds = "Labels a test as %r." % label
decor.__doc__ = ds
return decor
# Inspired by numpy's skipif, but uses the full apply_wrapper utility to
# preserve function metadata better and allows the skip condition to be a
# callable.
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = skip_condition
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None: out = 'Test skipped due to test condition.'
else: out = msg
return "Skipping test: %s. %s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual generator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
# A version with the condition set to true, common case just to attach a message
# to a skip decorator
def skip(msg=None):
"""Decorator factory - mark a test function for skipping from test suite.
Parameters
----------
msg : string
Optional message to be added.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised, with the optional message added.
"""
return skipif(True,msg)
def onlyif(condition, msg):
"""The reverse from skipif, see skipif for details."""
if callable(condition):
skip_condition = lambda : not condition()
else:
skip_condition = lambda : not condition
return skipif(skip_condition, msg)
#-----------------------------------------------------------------------------
# Utility functions for decorators
def module_not_available(module):
"""Can module be imported? Returns true if module does NOT import.
This is used to make a decorator to skip tests that require module to be
available, but delay the 'import numpy' to test execution time.
"""
try:
mod = __import__(module)
mod_not_avail = False
except ImportError:
mod_not_avail = True
return mod_not_avail
def decorated_dummy(dec, name):
"""Return a dummy function decorated with dec, with the given name.
Examples
--------
import IPython.testing.decorators as dec
setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
"""
dummy = lambda: None
dummy.__name__ = name
return dec(dummy)
#-----------------------------------------------------------------------------
# Decorators for public use
# Decorators to skip certain tests on specific platforms.
skip_win32 = skipif(sys.platform == 'win32',
"This test does not run under Windows")
skip_linux = skipif(sys.platform.startswith('linux'),
"This test does not run under Linux")
skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
# Decorators to skip tests if not on specific platforms.
skip_if_not_win32 = skipif(sys.platform != 'win32',
"This test only runs under Windows")
skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
"This test only runs under Linux")
skip_if_not_osx = skipif(sys.platform != 'darwin',
"This test only runs under OSX")
_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
os.environ.get('DISPLAY', '') == '')
_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
# not a decorator itself, returns a dummy function to be used as setup
def skip_file_no_x11(name):
return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
# Other skip decorators
# generic skip without module
skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
skipif_not_numpy = skip_without('numpy')
skipif_not_matplotlib = skip_without('matplotlib')
skipif_not_sympy = skip_without('sympy')
skip_known_failure = knownfailureif(True,'This test is known to fail')
known_failure_py3 = knownfailureif(sys.version_info[0] >= 3,
'This test is known to fail on Python 3.')
# A null 'decorator', useful to make more readable code that needs to pick
# between different decorators based on OS or other conditions
null_deco = lambda f: f
# Some tests only run where we can use unicode paths. Note that we can't just
# check os.path.supports_unicode_filenames, which is always False on Linux.
try:
f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
except UnicodeEncodeError:
unicode_paths = False
else:
unicode_paths = True
f.close()
onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
"where we can use unicode in filenames."))
def onlyif_cmds_exist(*commands):
"""
Decorator to skip test when at least one of `commands` is not found.
"""
for cmd in commands:
try:
if not is_cmd_found(cmd):
return skip("This test runs only if command '{0}' "
"is installed".format(cmd))
except ImportError as e:
# is_cmd_found uses pywin32 on windows, which might not be available
if sys.platform == 'win32' and 'pywin32' in str(e):
return skip("This test runs only if pywin32 and command '{0}' "
"is installed".format(cmd))
raise e
return null_deco
|
the-stack_106_15035
|
import re
import json
from collections import defaultdict
from mrjob.job import MRJob
from mrjob.protocol import RawValueProtocol
class MR_EThOS_WF(MRJob):
'''
Takes the EThOS data and replaces content with a word frequency map.
'''
# Only output the values (no key)
OUTPUT_PROTOCOL = RawValueProtocol
# A simple mapper that parses each document and reduces the content to word frequences
def mapper(self, _, line):
doc = json.loads(line)
if 'content' in doc:
d = defaultdict(int)
for word in doc['content'].split():
# Drop anything that is pure punctations:
if re.match('^\W+$', word):
continue
word = word.lower()
d[word] += 1
# Filter out low-frequency works:
wf = {}
for word in d.keys():
if d[word] > 1:
wf[word] = d[word]
# Store the word frequencies instead of the content.
doc.pop('content')
doc['word_freq'] = wf
yield None, json.dumps(doc)
def reducer(self, key, values):
for value in values:
yield None, value
if __name__ == '__main__':
MR_EThOS_WF.run()
|
the-stack_106_15036
|
import paddlex as pdx
from paddlex import transforms as T
# 下载和解压蔬菜分类数据集
veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz'
pdx.utils.download_and_decompress(veg_dataset, path='./')
# 定义训练和验证时的transforms
# API说明https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/transforms/operators.py
train_transforms = T.Compose(
[T.RandomCrop(crop_size=224), T.RandomHorizontalFlip(), T.Normalize()])
eval_transforms = T.Compose([
T.ResizeByShort(short_size=256), T.CenterCrop(crop_size=224), T.Normalize()
])
# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/datasets/imagenet.py#L21
train_dataset = pdx.datasets.ImageNet(
data_dir='vegetables_cls',
file_list='vegetables_cls/train_list.txt',
label_list='vegetables_cls/labels.txt',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.ImageNet(
data_dir='vegetables_cls',
file_list='vegetables_cls/val_list.txt',
label_list='vegetables_cls/labels.txt',
transforms=eval_transforms)
# 初始化模型,并进行训练
# 可使用VisualDL查看训练指标,参考https://github.com/PaddlePaddle/PaddleX/tree/release/2.0-rc/tutorials/train#visualdl可视化训练指标
num_classes = len(train_dataset.labels)
model = pdx.cls.MobileNetV3_large(num_classes=num_classes)
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/95c53dec89ab0f3769330fa445c6d9213986ca5f/paddlex/cv/models/classifier.py#L153
# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
model.train(
num_epochs=10,
train_dataset=train_dataset,
train_batch_size=32,
eval_dataset=eval_dataset,
lr_decay_epochs=[4, 6, 8],
learning_rate=0.025,
save_dir='output/mobilenet_v2',
use_vdl=True)
|
the-stack_106_15038
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
import mock
from h.views.api.helpers import links
from h.views.api.helpers.angular import AngularRouteTemplater
class TestServiceLink(object):
@pytest.mark.parametrize(
"name,route_name,method,description,expected_method",
[
("Create Foo", "foo.create", "POST", None, "POST"),
("Create Foo", "foo.create", ("POST", "PATCH"), None, "POST"),
("Create Foo", "foo.create", "GET", "Forever and a Day", "GET"),
],
)
def test_primary_method_returns_correct_HTTP_method(
self, name, route_name, method, description, expected_method
):
assert (
links.ServiceLink(name, route_name, method, description).primary_method()
== expected_method
)
class TestRegisterLink(object):
def test_it_creates_attrs_on_registry_if_not_present(
self, versions, pyramid_config
):
links.register_link(_service_link(), versions, pyramid_config.registry)
assert hasattr(pyramid_config.registry, "api_links")
assert "v1" in pyramid_config.registry.api_links
assert "v2" in pyramid_config.registry.api_links
def test_it_registers_link_for_every_version(self, versions, pyramid_config):
link = _service_link()
links.register_link(link, versions, pyramid_config.registry)
assert link in pyramid_config.registry.api_links["v1"]
assert link in pyramid_config.registry.api_links["v2"]
def test_it_does_not_register_link_for_unsupported_versions(
self, versions, pyramid_config
):
first_service = _service_link()
second_service = _service_link("doodad")
links.register_link(first_service, versions, pyramid_config.registry)
links.register_link(second_service, ["v1"], pyramid_config.registry)
assert first_service in pyramid_config.registry.api_links["v2"]
assert second_service not in pyramid_config.registry.api_links["v2"]
def TestFormatNestedLinks(object):
def test_it_formats_link_objects_as_dicts(self, templater):
link = _service_link(name="flat")
formatted = links.format_nested_links([link], "v1", templater)
assert "flat" in formatted
assert formatted["flat"] == {
"method": link.primary_method(),
"url": templater.route_template(link.route_name),
"desc": link.description,
}
def test_it_nests_links_based_on_service_name_split_on_periods(self, templater):
api_links = [
_service_link(name="1"),
_service_link(name="1.2"),
_service_link(name="1.2.3"),
_service_link(name="1.2.A"),
_service_link(name="1.B"),
]
formatted = links.format_nested_links([api_links], "v1", templater)
assert "1" in formatted
assert "2" in formatted["1"]
assert "B" in formatted["1"]
assert "3" in formatted["1"]["2"]
assert "A" in formatted["1"]["2"]
def _service_link(name="api.example_service"):
return links.ServiceLink(
name="name",
route_name="api.example_service",
method="POST",
description="Create a new Foo",
)
@pytest.fixture
def templater():
return mock.create_autospec(AngularRouteTemplater, spec_set=True, instance=True)
@pytest.fixture
def versions():
return ["v1", "v2"]
|
the-stack_106_15039
|
# pyOCD debugger
# Copyright (c) 2015-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ..core.target import Target
from ..core.options import OPTIONS_INFO
from ..utility.compatibility import to_str_safe
LOG = logging.getLogger(__name__)
def split_command_line(cmd_line):
"""! @brief Split command line by whitespace, supporting quoted strings."""
result = []
if type(cmd_line) is str:
args = [cmd_line]
else:
args = cmd_line
for cmd in args:
state = 0
word = ''
open_quote = ''
for c in cmd:
if state == 0:
if c in (' ', '\t', '\r', '\n'):
if word:
result.append(word)
word = ''
elif c in ('"', "'"):
open_quote = c
state = 1
else:
word += c
elif state == 1:
if c == open_quote:
result.append(word)
word = ''
state = 0
else:
word += c
if word:
result.append(word)
return result
## Map of vector char characters to masks.
VECTOR_CATCH_CHAR_MAP = {
'h': Target.CATCH_HARD_FAULT,
'b': Target.CATCH_BUS_FAULT,
'm': Target.CATCH_MEM_FAULT,
'i': Target.CATCH_INTERRUPT_ERR,
's': Target.CATCH_STATE_ERR,
'c': Target.CATCH_CHECK_ERR,
'p': Target.CATCH_COPROCESSOR_ERR,
'r': Target.CATCH_CORE_RESET,
'a': Target.CATCH_ALL,
'n': Target.CATCH_NONE,
}
def convert_vector_catch(value):
"""! @brief Convert a vector catch string to a mask.
@exception ValueError Raised if an invalid vector catch character is encountered.
"""
# Make case insensitive.
value = to_str_safe(value).lower()
# Handle special vector catch options.
if value == 'all':
return Target.CATCH_ALL
elif value == 'none':
return Target.CATCH_NONE
# Convert options string to mask.
try:
return sum([VECTOR_CATCH_CHAR_MAP[c] for c in value])
except KeyError as e:
# Reraise an error with a more helpful message.
raise ValueError("invalid vector catch option '{}'".format(e.args[0]))
def convert_session_options(option_list):
"""! @brief Convert a list of session option settings to a dictionary."""
options = {}
if option_list is not None:
for o in option_list:
if '=' in o:
name, value = o.split('=', 1)
name = name.strip().lower()
value = value.strip()
else:
name = o.strip().lower()
value = None
# Check for and strip "no-" prefix before we validate the option name.
if (value is None) and (name.startswith('no-')):
name = name[3:]
had_no_prefix = True
else:
had_no_prefix = False
# Look for this option.
try:
info = OPTIONS_INFO[name]
except KeyError:
LOG.warning("ignoring unknown session option '%s'", name)
continue
# Handle bool options without a value specially.
if value is None:
if info.type is bool:
value = not had_no_prefix
else:
LOG.warning("non-boolean option '%s' requires a value", name)
continue
# Convert string value to option type.
elif info.type is bool:
value = value in ("true", "1", "yes", "on")
elif info.type is int:
try:
value = int(value, base=0)
except ValueError:
LOG.warning("invalid value for option '%s'", name)
continue
options[name] = value
return options
## Map to convert from reset type names to enums.
RESET_TYPE_MAP = {
'default': None,
'hw': Target.ResetType.HW,
'sw': Target.ResetType.SW,
'hardware': Target.ResetType.HW,
'software': Target.ResetType.SW,
'sw_sysresetreq': Target.ResetType.SW_SYSRESETREQ,
'sw_vectreset': Target.ResetType.SW_VECTRESET,
'sw_emulated': Target.ResetType.SW_EMULATED,
'sysresetreq': Target.ResetType.SW_SYSRESETREQ,
'vectreset': Target.ResetType.SW_VECTRESET,
'emulated': Target.ResetType.SW_EMULATED,
}
def convert_reset_type(value):
"""! @brief Convert a reset_type session option value to the Target.ResetType enum.
@param value The value of the reset_type session option.
@exception ValueError Raised if an unknown reset_type value is passed.
"""
value = value.lower()
if value not in RESET_TYPE_MAP:
raise ValueError("unexpected value for reset_type option ('%s')" % value)
return RESET_TYPE_MAP[value]
|
the-stack_106_15041
|
"""This file implements the gym environment of humanoid deepmimic using PyBullet.
"""
import math
import time
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import gym
from gym import spaces
from gym.utils import seeding
import random
import numpy as np
import pybullet
import pybullet_data
from pybullet_envs.deep_mimic.humanoid import Humanoid
from pkg_resources import parse_version
from pybullet_utils import bullet_client
from pybullet_envs.deep_mimic.motion_capture_data import MotionCaptureData
RENDER_HEIGHT = 360
RENDER_WIDTH = 480
class HumanoidDeepMimicGymEnv(gym.Env):
"""The gym environment for the humanoid deep mimic.
"""
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 100
}
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
render=False):
"""Initialize the gym environment.
Args:
urdf_root: The path to the urdf data folder.
render: Whether to render the simulation.
Raises:
ValueError: If the urdf_version is not supported.
"""
# Set up logging.
self._urdf_root = urdf_root
self._observation = []
self._env_step_counter = 0
self._is_render = render
self._cam_dist = 1.0
self._cam_yaw = 0
self._cam_pitch = -30
self._ground_id = None
self._pybullet_client = None
self._humanoid = None
self._control_time_step = 8.*(1./240.)#0.033333
self.seed()
observation_high = (self._get_observation_upper_bound())
observation_low = (self._get_observation_lower_bound())
action_dim = 36
self._action_bound = 3.14 #todo: check this
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)
self.observation_space = spaces.Box(observation_low, observation_high, dtype=np.float32)
def close(self):
self._humanoid = None
self._pybullet_client.disconnect()
def reset(self):
if (self._pybullet_client==None):
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(
connection_mode=pybullet.GUI)
else:
self._pybullet_client = bullet_client.BulletClient()
self._pybullet_client.setAdditionalSearchPath(pybullet_data.getDataPath())
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_Y_AXIS_UP,1)
self._motion=MotionCaptureData()
motionPath = pybullet_data.getDataPath()+"/motions/humanoid3d_walk.txt"#humanoid3d_spinkick.txt"#/motions/humanoid3d_backflip.txt"
self._motion.Load(motionPath)
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 0)
self._pybullet_client.resetSimulation()
self._pybullet_client.setGravity(0,-9.8,0)
y2zOrn = self._pybullet_client.getQuaternionFromEuler([-1.57,0,0])
self._ground_id = self._pybullet_client.loadURDF(
"%s/plane.urdf" % self._urdf_root, [0,0,0], y2zOrn)
#self._pybullet_client.changeVisualShape(self._ground_id,-1,rgbaColor=[1,1,1,0.8])
#self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_PLANAR_REFLECTION,self._ground_id)
shift=[0,0,0]
self._humanoid = Humanoid(self._pybullet_client,self._motion,shift)
self._humanoid.Reset()
simTime = random.randint(0,self._motion.NumFrames()-2)
self._humanoid.SetSimTime(simTime)
self._initial_frame = simTime
pose = self._humanoid.InitializePoseFromMotionData()
self._humanoid.ApplyPose(pose, True, True, self._humanoid._humanoid,self._pybullet_client)
self._env_step_counter = 0
self._objectives = []
self._pybullet_client.resetDebugVisualizerCamera(
self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 1)
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: A list of desired motor angles for eight motors.
Returns:
observations: The angles, velocities and torques of all motors.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
self._last_base_position = self._humanoid.GetBasePosition()
if self._is_render:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
#time_spent = time.time() - self._last_frame_time
#self._last_frame_time = time.time()
#time_to_sleep = self._control_time_step - time_spent
#if time_to_sleep > 0:
# time.sleep(time_to_sleep)
base_pos = self._humanoid.GetBasePosition()
# Keep the previous orientation of the camera set by the user.
[yaw, pitch,
dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]
self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch,
base_pos)
self._humanoid.ApplyAction(action)
for s in range (8):
#print("step:",s)
self._pybullet_client.stepSimulation()
self._initial_frame = self._initial_frame + self._control_time_step
self._humanoid.SetSimTime(self._initial_frame)
reward = self._reward()
done = self._termination()
self._env_step_counter += 1
return np.array(self._get_observation()), reward, done, {}
def render(self, mode="rgb_array", close=False):
if mode == "human":
self._is_render = True
if mode != "rgb_array":
return np.array([])
base_pos = self._humanoid.GetBasePosition()
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=1)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60,
aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=RENDER_WIDTH,
height=RENDER_HEIGHT,
renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
if (self._humanoid):
term = self._humanoid.Terminates()
return term
return False
def _reward(self):
reward = 0
if (self._humanoid):
reward = self._humanoid.GetReward()
return reward
def get_objectives(self):
return self._objectives
@property
def objective_weights(self):
"""Accessor for the weights for all the objectives.
Returns:
List of floating points that corresponds to weights for the objectives in
the order that objectives are stored.
"""
return self._objective_weights
def _get_observation(self):
"""Get observation of this environment.
"""
observation = []
if (self._humanoid):
observation = self._humanoid.GetState()
else:
observation = [0]*197
self._observation = observation
return self._observation
def _get_observation_upper_bound(self):
"""Get the upper bound of the observation.
Returns:
The upper bound of an observation. See GetObservation() for the details
of each element of an observation.
"""
upper_bound = np.zeros(self._get_observation_dimension())
upper_bound[0] = 10 #height
upper_bound[1:107] = math.pi # Joint angle.
upper_bound[107:197] = 10 #joint velocity, check it
return upper_bound
def _get_observation_lower_bound(self):
"""Get the lower bound of the observation."""
return -self._get_observation_upper_bound()
def _get_observation_dimension(self):
"""Get the length of the observation list.
Returns:
The length of the observation list.
"""
return len(self._get_observation())
def configure(self, args):
pass
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
_close = close
@property
def pybullet_client(self):
return self._pybullet_client
@property
def ground_id(self):
return self._ground_id
@ground_id.setter
def ground_id(self, new_ground_id):
self._ground_id = new_ground_id
@property
def env_step_counter(self):
return self._env_step_counter
|
the-stack_106_15048
|
import argparse
import glob
import json
import os
import shutil
from pathlib import Path
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import (
coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, rotate_non_max_suppression,
xyxy2xywh, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class, set_logging)
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=16,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
plots=True):
# Initialize/load model and set device
# 判断是否在训练时调用test,如果是则获取训练时的设备
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
save_txt = opt.save_txt # save *.txt labels
if save_txt:
out = Path('inference/output')
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# Remove previous
# 删除之前的test_batch0_gt.jpg和test_batch0_pred.jpg
for f in glob.glob(str(save_dir / 'test_batch*.jpg')):
os.remove(f)
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
# 如果设备不是cpu,则将模型由Float32转为Float16,提高前向传播的速度
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
# 将模型字符串转变为函数
model.eval()
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
# 设置iou阈值,从0.5~0.95,每间隔0.05取一次
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
# iou个数
niou = iouv.numel()
# Dataloader
if not training:
# 创建一个全0数组测试一下前向传播是否正常运行
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
# 获取图片路径
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
# 创建dataloader
# 注意这里rect参数为True,yolov5的测试评估是基于矩形推理的
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt,
hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
# 初始化测试的图片数量
seen = 0
# 获取类别的名字
names = model.names if hasattr(model, 'names') else model.module.names
"""
获取coco数据集的类别索引
这里要说明一下,coco数据集有80个类别(索引范围应该为0~79),
但是他的索引却属于0~90(笔者是通过查看coco数据测试集的json文件发现的,具体原因不知)
coco80_to_coco91_class()就是为了与上述索引对应起来,返回一个范围在0~90的索引数组
"""
coco91class = coco80_to_coco91_class()
# 设置tqdm进度条的显示信息
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')
# 初始化指标,时间
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
# 初始化测试集的损失
loss = torch.zeros(4, device=device)
# 初始化json文件的字典,统计信息,ap
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
'''
i: batch_index, 第i个batch
imgs : torch.Size([batch_size, 3, weights, heights])
targets : torch.Size = (该batch中的目标数量, [该image属于该batch的第几个图片, class, xywh, Θ])
paths : List['img1_path','img2_path',......,'img-1_path'] len(paths)=batch_size
shape :
'''
img = img.to(device, non_blocking=True)
# 图片也由Float32->Float16
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
whwh = torch.Tensor([width, height, width, height]).to(device)
# Disable gradients
with torch.no_grad():
# Run model
t = time_synchronized()
'''
Detect层在的输出:(z,x)
if training :
x list: [small_forward, medium_forward, large_forward] eg:small_forward.size=( batch_size, 3种scale框, size1, size2, no)
else :
(z,x)
z tensor: [small+medium+large_inference] size=(batch_size, 3 * (small_size1*small_size2 + medium_size1*medium_size2 + large_size1*large_size2), no) 真实坐标
x list: [small_forward, medium_forward, large_forward] eg:small_forward.size=( batch_size, 3种scale框, size1, size2, no)
'''
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if training: # if model has loss hyperparameters
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:4] # box, obj, cls, angle
# Run NMS
t = time_synchronized()
# output : size = (batch_size, num_conf_nms, [xywhθ,conf,classid]) θ∈[0,179]
#output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres)
output = rotate_non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
'''
targets : torch.Size = (该batch中的目标数量, [该image属于该batch的第几个图片, class, xywh, θ]) θ∈[0,179]
pred : shape=(num_conf_nms, [xywhθ,conf,classid]) θ∈[0,179]
si : 该batch中的第几张图
'''
# labels: shape= (num, [class, xywh, θ])
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# # Append to text file
# if save_txt:
# gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
# x = pred.clone()
# x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
# for *xyxy, conf, cls in x:
# xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# with open(str(out / Path(paths[si]).stem) + '.txt', 'a') as f:
# f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
# Clip boxes to image bounds
# clip_coords(pred, (height, width))
# Append to pycocotools JSON dictionary
# if save_json:
# # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
# image_id = Path(paths[si]).stem
# box = pred[:, :4].clone() # xyxy
# scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
# box = xyxy2xywh(box) # xywh
# box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
# for p, b in zip(pred.tolist(), box.tolist()):
# jdict.append({'image_id': int(image_id) if image_id.isnumeric() else image_id,
# 'category_id': coco91class[int(p[5])],
# 'bbox': [round(x, 3) for x in b],
# 'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
# pred : shape=(num_conf_nms, [xywhθ,conf,classid]) θ∈[0,179]
# labels: shape= (num, [class, xywh, θ])
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0] # torch.size(num)
# target boxes -> orignal shape
tbox = labels[:, 1:5] * whwh # torch.size(num,[xywh]) 1024*1024 无所谓顺序
#ttheta = labels[:, 5] # torch.size(num,[Θ])
# Per target class
for cls in torch.unique(tcls_tensor): # unique函数去除其中重复的元素,并按元素(类别)由大到小返回一个新的无元素重复的元组或者列表
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices
pi = (cls == pred[:, 6]).nonzero(as_tuple=False).view(-1) # prediction indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
#rious, i = rbox_iou(pred[:, :4], pred[:, 4].unsqueeze(1), tbox, ttheta.unsqueeze(1)).max(1) # best rious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 5].cpu(), pred[:, 6].cpu(), tcls))
# Plot images
if plots and batch_i < 1:
f = save_dir / ('test_batch%g_gt.jpg' % batch_i) # filename
plot_images(img, targets, paths, str(f), names) # ground truth
f = save_dir / ('test_batch%g_pred.jpg' % batch_i)
plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, fname=save_dir / 'precision-recall_curve.png')
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, [email protected], [email protected]:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Save JSON
if save_json and len(jdict):
f = 'detections_val2017_%s_results.json' % \
(weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '') # filename
print('\nCOCO mAP with pycocotools... saving %s...' % f)
with open(f, 'w') as file:
json.dump(jdict, file)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
cocoDt = cocoGt.loadRes(f) # initialize COCO pred api
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.params.imgIds = imgIds # image IDs to evaluate
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
map, map50 = cocoEval.stats[:2] # update results ([email protected]:0.95, [email protected])
except Exception as e:
print('ERROR: pycocotools unable to run: %s' % e)
# Return results
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
"""
opt参数详解
weights:测试的模型权重文件
data:数据集配置文件,数据集路径,类名等
batch-size:前向传播时的批次, 默认32
img-size:输入图片分辨率大小, 默认640
conf-thres:筛选框的时候的置信度阈值, 默认0.001
iou-thres:进行NMS的时候的IOU阈值, 默认0.65
save-json:是否按照coco的json格式保存预测框,并且使用cocoapi做评估(需要同样coco的json格式的标签), 默认False
task:设置测试形式, 默认val, 具体可看下面代码解析注释
device:测试的设备,cpu;0(表示一个gpu设备cuda:0);0,1,2,3(多个gpu设备)
single-cls:数据集是否只有一个类别,默认False
augment:测试时是否使用TTA(test time augmentation), 默认False
merge:在进行NMS时,是否通过合并方式获得预测框, 默认False
verbose:是否打印出每个类别的mAP, 默认False
save-txt:是否以txt文件的形式保存模型预测的框坐标, 默认False
"""
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='../rotation-yolov5/runs/rotated_trainDOTA_0/weights/last.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=8, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=1024, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='2,3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
# check_file检查文件是否存在
opt.data = check_file(opt.data) # check file
print(opt)
# task = ['val', 'test']时就正常测试验证集、测试集
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose)
# task == 'study'时,就评估yolov5系列和yolov3-spp各个模型在各个尺度下的指标并可视化
elif opt.task == 'study': # run over a range of settings and save/plot
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
x = list(range(320, 800, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
# utils.general.plot_study_txt(f, x) # plot
|
the-stack_106_15049
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import itertools
import os
import re
import sys
import llnl.util.tty as tty
import spack.platforms
import spack.util.executable
from spack.build_environment import dso_suffix
from spack.operating_systems.mac_os import macos_sdk_path, macos_version
class Gcc(AutotoolsPackage, GNUMirrorPackage):
"""The GNU Compiler Collection includes front ends for C, C++, Objective-C,
Fortran, Ada, and Go, as well as libraries for these languages."""
homepage = 'https://gcc.gnu.org'
gnu_mirror_path = 'gcc/gcc-9.2.0/gcc-9.2.0.tar.xz'
git = 'git://gcc.gnu.org/git/gcc.git'
list_url = 'https://ftp.gnu.org/gnu/gcc/'
list_depth = 1
maintainers = ['michaelkuhn', 'alalazo']
version('master', branch='master')
version('11.2.0', sha256='d08edc536b54c372a1010ff6619dd274c0f1603aa49212ba20f7aa2cda36fa8b')
version('11.1.0', sha256='4c4a6fb8a8396059241c2e674b85b351c26a5d678274007f076957afa1cc9ddf')
version('10.3.0', sha256='64f404c1a650f27fc33da242e1f2df54952e3963a49e06e73f6940f3223ac344')
version('10.2.0', sha256='b8dd4368bb9c7f0b98188317ee0254dd8cc99d1e3a18d0ff146c855fe16c1d8c')
version('10.1.0', sha256='b6898a23844b656f1b68691c5c012036c2e694ac4b53a8918d4712ad876e7ea2')
version('9.4.0', sha256='c95da32f440378d7751dd95533186f7fc05ceb4fb65eb5b85234e6299eb9838e')
version('9.3.0', sha256='71e197867611f6054aa1119b13a0c0abac12834765fe2d81f35ac57f84f742d1')
version('9.2.0', sha256='ea6ef08f121239da5695f76c9b33637a118dcf63e24164422231917fa61fb206')
version('9.1.0', sha256='79a66834e96a6050d8fe78db2c3b32fb285b230b855d0a66288235bc04b327a0')
version('8.5.0', sha256='d308841a511bb830a6100397b0042db24ce11f642dab6ea6ee44842e5325ed50')
version('8.4.0', sha256='e30a6e52d10e1f27ed55104ad233c30bd1e99cfb5ff98ab022dc941edd1b2dd4')
version('8.3.0', sha256='64baadfe6cc0f4947a84cb12d7f0dfaf45bb58b7e92461639596c21e02d97d2c')
version('8.2.0', sha256='196c3c04ba2613f893283977e6011b2345d1cd1af9abeac58e916b1aab3e0080')
version('8.1.0', sha256='1d1866f992626e61349a1ccd0b8d5253816222cdc13390dcfaa74b093aa2b153')
version('7.5.0', sha256='b81946e7f01f90528a1f7352ab08cc602b9ccc05d4e44da4bd501c5a189ee661')
version('7.4.0', sha256='eddde28d04f334aec1604456e536416549e9b1aa137fc69204e65eb0c009fe51')
version('7.3.0', sha256='832ca6ae04636adbb430e865a1451adf6979ab44ca1c8374f61fba65645ce15c')
version('7.2.0', sha256='1cf7adf8ff4b5aa49041c8734bbcf1ad18cc4c94d0029aae0f4e48841088479a')
version('7.1.0', sha256='8a8136c235f64c6fef69cac0d73a46a1a09bb250776a050aec8f9fc880bebc17')
version('6.5.0', sha256='7ef1796ce497e89479183702635b14bb7a46b53249209a5e0f999bebf4740945')
version('6.4.0', sha256='850bf21eafdfe5cd5f6827148184c08c4a0852a37ccf36ce69855334d2c914d4')
version('6.3.0', sha256='f06ae7f3f790fbf0f018f6d40e844451e6bc3b7bc96e128e63b09825c1f8b29f')
version('6.2.0', sha256='9944589fc722d3e66308c0ce5257788ebd7872982a718aa2516123940671b7c5')
version('6.1.0', sha256='09c4c85cabebb971b1de732a0219609f93fc0af5f86f6e437fd8d7f832f1a351')
version('5.5.0', sha256='530cea139d82fe542b358961130c69cfde8b3d14556370b65823d2f91f0ced87')
version('5.4.0', sha256='608df76dec2d34de6558249d8af4cbee21eceddbcb580d666f7a5a583ca3303a')
version('5.3.0', sha256='b84f5592e9218b73dbae612b5253035a7b34a9a1f7688d2e1bfaaf7267d5c4db')
version('5.2.0', sha256='5f835b04b5f7dd4f4d2dc96190ec1621b8d89f2dc6f638f9f8bc1b1014ba8cad')
version('5.1.0', sha256='b7dafdf89cbb0e20333dbf5b5349319ae06e3d1a30bf3515b5488f7e89dca5ad')
version('4.9.4', sha256='6c11d292cd01b294f9f84c9a59c230d80e9e4a47e5c6355f046bb36d4f358092')
version('4.9.3', sha256='2332b2a5a321b57508b9031354a8503af6fdfb868b8c1748d33028d100a8b67e')
version('4.9.2', sha256='2020c98295856aa13fda0f2f3a4794490757fc24bcca918d52cc8b4917b972dd')
version('4.9.1', sha256='d334781a124ada6f38e63b545e2a3b8c2183049515a1abab6d513f109f1d717e')
version('4.8.5', sha256='22fb1e7e0f68a63cee631d85b20461d1ea6bda162f03096350e38c8d427ecf23')
version('4.8.4', sha256='4a80aa23798b8e9b5793494b8c976b39b8d9aa2e53cd5ed5534aff662a7f8695')
version('4.7.4', sha256='92e61c6dc3a0a449e62d72a38185fda550168a86702dea07125ebd3ec3996282')
version('4.6.4', sha256='35af16afa0b67af9b8eb15cafb76d2bc5f568540552522f5dc2c88dd45d977e8')
version('4.5.4', sha256='eef3f0456db8c3d992cbb51d5d32558190bc14f3bc19383dd93acc27acc6befc')
# We specifically do not add 'all' variant here because:
# (i) Ada, Go, Jit, and Objective-C++ are not default languages.
# In that respect, the name 'all' is rather misleading.
# (ii) Languages other than c,c++,fortran are prone to configure bug in GCC
# For example, 'java' appears to ignore custom location of zlib
# (iii) meaning of 'all' changes with GCC version, i.e. 'java' is not part
# of gcc7. Correctly specifying conflicts() and depends_on() in such a
# case is a PITA.
variant('languages',
default='c,c++,fortran',
values=('ada', 'brig', 'c', 'c++', 'fortran',
'go', 'java', 'jit', 'lto', 'objc', 'obj-c++'),
multi=True,
description='Compilers and runtime libraries to build')
variant('binutils',
default=False,
description='Build via binutils')
variant('piclibs',
default=False,
description='Build PIC versions of libgfortran.a and libstdc++.a')
variant('strip',
default=False,
description='Strip executables to reduce installation size')
variant('nvptx',
default=False,
description='Target nvptx offloading to NVIDIA GPUs')
variant('bootstrap',
default=True,
description='Enable 3-stage bootstrap')
variant('graphite',
default=False,
description='Enable Graphite loop optimizations (requires ISL)')
depends_on('flex', type='build', when='@master')
# https://gcc.gnu.org/install/prerequisites.html
depends_on('[email protected]:')
# mawk is not sufficient for go support
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('libtool', type='build')
# dependencies required for git versions
depends_on('[email protected]:', when='@master', type='build')
depends_on('[email protected]:', when='@master', type='build')
depends_on('[email protected]:', when='@master', type='build')
# GCC 7.3 does not compile with newer releases on some platforms, see
# https://github.com/spack/spack/issues/6902#issuecomment-433030376
depends_on('[email protected]:3.1.6', when='@:9.9')
depends_on('[email protected]:', when='@10:')
depends_on('[email protected]:', when='@4.5:')
# Already released GCC versions do not support any newer version of ISL
# GCC 5.4 https://github.com/spack/spack/issues/6902#issuecomment-433072097
# GCC 7.3 https://github.com/spack/spack/issues/6902#issuecomment-433030376
# GCC 9+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86724
with when('+graphite'):
depends_on('[email protected]', when='@5.0:5.2')
depends_on('[email protected]', when='@5.3:5.9')
depends_on('[email protected]:0.18', when='@6:8.9')
depends_on('[email protected]:0.20', when='@9:9.9')
depends_on('[email protected]:', when='@10:')
depends_on('zlib', when='@6:')
depends_on('zstd', when='@10:')
depends_on('diffutils', type='build')
depends_on('iconv', when='platform=darwin')
depends_on('gnat', when='languages=ada')
depends_on('binutils+gas+ld+plugins~libiberty', when='+binutils', type=('build', 'link', 'run'))
depends_on('zip', type='build', when='languages=java')
# The server is sometimes a bit slow to respond
timeout = {'timeout': 60}
# TODO: integrate these libraries.
# depends_on('ppl')
# depends_on('cloog')
# https://gcc.gnu.org/install/test.html
depends_on('[email protected]', type='test')
depends_on('expect', type='test')
depends_on('tcl', type='test')
depends_on('[email protected]:', type='test')
depends_on('[email protected]:', type='test')
# See https://golang.org/doc/install/gccgo#Releases
with when('languages=go'):
provides('golang', when='@4.6:')
provides('golang@:1', when='@4.7.1:')
provides('golang@:1.1', when='@4.8:')
provides('golang@:1.1.2', when='@4.8.2:')
provides('golang@:1.2', when='@4.9:')
provides('golang@:1.4', when='@5:')
provides('golang@:1.6.1', when='@6:')
provides('golang@:1.8', when='@7:')
# GCC 4.6 added support for the Go programming language.
# See https://gcc.gnu.org/gcc-4.6/changes.html
conflicts('@:4.5', msg='support for Go has been added in GCC 4.6')
# Go is not supported on macOS
conflicts('platform=darwin', msg='Go not supported on MacOS')
# For a list of valid languages for a specific release,
# run the following command in the GCC source directory:
# $ grep ^language= gcc/*/config-lang.in
# See https://gcc.gnu.org/install/configure.html
# Support for processing BRIG 1.0 files was added in GCC 7
# BRIG is a binary format for HSAIL:
# (Heterogeneous System Architecture Intermediate Language).
# See https://gcc.gnu.org/gcc-7/changes.html
conflicts('languages=brig', when='@:6')
# BRIG does not seem to be supported on macOS
conflicts('languages=brig', when='platform=darwin')
# GCC 4.8 added a 'c' language. I'm sure C was always built,
# but this is the first version that accepts 'c' as a valid language.
conflicts('languages=c', when='@:4.7')
# The GCC Java frontend and associated libjava runtime library
# have been removed from GCC as of GCC 7.
# See https://gcc.gnu.org/gcc-7/changes.html
conflicts('languages=java', when='@7:')
# GCC 5 added the ability to build GCC as a Just-In-Time compiler.
# See https://gcc.gnu.org/gcc-5/changes.html
conflicts('languages=jit', when='@:4')
with when('+nvptx'):
depends_on('cuda')
resource(
name='newlib',
url='ftp://sourceware.org/pub/newlib/newlib-3.0.0.20180831.tar.gz',
sha256='3ad3664f227357df15ff34e954bfd9f501009a647667cd307bf0658aefd6eb5b',
destination='newlibsource',
fetch_options=timeout
)
# nvptx-tools does not seem to work as a dependency,
# but does fine when the source is inside the gcc build directory
# nvptx-tools doesn't have any releases, so grabbing the last commit
resource(
name='nvptx-tools',
git='https://github.com/MentorEmbedded/nvptx-tools',
commit='d0524fbdc86dfca068db5a21cc78ac255b335be5',
)
# NVPTX offloading supported in 7 and later by limited languages
conflicts('@:6', msg='NVPTX only supported in gcc 7 and above')
conflicts('languages=ada')
conflicts('languages=brig')
conflicts('languages=go')
conflicts('languages=java')
conflicts('languages=jit')
conflicts('languages=objc')
conflicts('languages=obj-c++')
# NVPTX build disables bootstrap
conflicts('+bootstrap')
# Binutils can't build ld on macOS
conflicts('+binutils', when='platform=darwin')
# Bootstrap comparison failure:
# see https://github.com/spack/spack/issues/23296
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100340
# on XCode 12.5
conflicts('+bootstrap', when='@:11.1 %[email protected]')
# aarch64/M1 is supported in GCC 12+
conflicts('@:11', when='target=aarch64: platform=darwin',
msg='Only GCC 12 and newer support macOS M1 (aarch64)')
# Newer binutils than RHEL's is required to run `as` on some instructions
# generated by new GCC (see https://github.com/spack/spack/issues/12235)
conflicts('~binutils', when='@7: os=rhel6',
msg='New GCC cannot use system assembler on RHEL6')
# GCC 11 requires GCC 4.8 or later (https://gcc.gnu.org/gcc-11/changes.html)
conflicts('%gcc@:4.7', when='@11:')
if sys.platform == 'darwin':
# Fix parallel build on APFS filesystem
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81797
if macos_version() >= Version('10.13'):
patch('darwin/apfs.patch', when='@5.5.0,6.1:6.4,7.1:7.3')
# from homebrew via macports
# https://trac.macports.org/ticket/56502#no1
# see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83531
patch('darwin/headers-10.13-fix.patch', when='@5.5.0')
if macos_version() >= Version('10.14'):
# Fix system headers for Mojave SDK:
# https://github.com/Homebrew/homebrew-core/pull/39041
patch('https://raw.githubusercontent.com/Homebrew/formula-patches/b8b8e65e/gcc/8.3.0-xcode-bug-_Atomic-fix.patch',
sha256='33ee92bf678586357ee8ab9d2faddf807e671ad37b97afdd102d5d153d03ca84',
when='@6:8.3')
if macos_version() >= Version('10.15'):
# Fix system headers for Catalina SDK
# (otherwise __OSX_AVAILABLE_STARTING ends up undefined)
patch('https://raw.githubusercontent.com/Homebrew/formula-patches/b8b8e65e/gcc/9.2.0-catalina.patch',
sha256='0b8d14a7f3c6a2f0d2498526e86e088926671b5da50a554ffa6b7f73ac4f132b', when='@9.2.0')
# See https://raw.githubusercontent.com/Homebrew/homebrew-core/3b7db4457ac64a31e3bbffc54b04c4bd824a4a4a/Formula/gcc.rb
patch('https://github.com/iains/gcc-darwin-arm64/commit/20f61faaed3b335d792e38892d826054d2ac9f15.patch?full_index=1',
sha256='c0605179a856ca046d093c13cea4d2e024809ec2ad4bf3708543fc3d2e60504b', when='@11.2.0')
# Use -headerpad_max_install_names in the build,
# otherwise updated load commands won't fit in the Mach-O header.
# This is needed because `gcc` avoids the superenv shim.
patch('darwin/gcc-7.1.0-headerpad.patch', when='@5:11')
patch('darwin/gcc-6.1.0-jit.patch', when='@5:7')
patch('darwin/gcc-4.9.patch1', when='@4.9.0:4.9.3')
patch('darwin/gcc-4.9.patch2', when='@4.9.0:4.9.3')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92061
patch('darwin/clang13.patch', when='@:11.1 %apple-clang@13')
patch('piclibs.patch', when='+piclibs')
patch('gcc-backport.patch', when='@4.7:4.9.3,5:5.3')
# Backport libsanitizer patch for glibc >= 2.31 and 5.3.0 <= gcc <= 9.2.0
# https://bugs.gentoo.org/708346
patch('glibc-2.31-libsanitizer-1.patch', when='@7.1.0:7.5.0,8.1.0:8.3.0,9.0.0:9.2.0')
patch('glibc-2.31-libsanitizer-1-gcc-6.patch', when='@5.3.0:5.5.0,6.1.0:6.5.0')
patch('glibc-2.31-libsanitizer-2.patch', when='@8.1.0:8.3.0,9.0.0:9.2.0')
patch('glibc-2.31-libsanitizer-2-gcc-6.patch', when='@5.3.0:5.5.0,6.1.0:6.5.0')
patch('glibc-2.31-libsanitizer-2-gcc-7.patch', when='@7.1.0:7.5.0')
patch('https://gcc.gnu.org/git/?p=gcc.git;a=patch;h=2b40941d23b1570cdd90083b58fa0f66aa58c86e', sha256='b48e48736062e64a6da7cbe7e21a6c1c89422d1f49ef547c73b479a3f3f4935f', when='@6.5.0,7.4.0:7.5.0,8.2.0:9.3.0')
patch('https://gcc.gnu.org/git/?p=gcc.git;a=patch;h=745dae5923aba02982563481d75a21595df22ff8', sha256='eaa00c91e08a5e767f023911a49bc1b2d1a3eea38703b745ab260f90e8da41aa', when='@10.1.0:11.1.0')
# Older versions do not compile with newer versions of glibc
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81712
patch('ucontext_t.patch', when='@4.9,5.1:5.4,6.1:6.4,7.1')
patch('ucontext_t-java.patch', when='@4.9,5.1:5.4,6.1:6.4 languages=java')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81066
patch('stack_t-4.9.patch', when='@4.9')
patch('stack_t.patch', when='@5.1:5.4,6.1:6.4,7.1')
# https://bugs.busybox.net/show_bug.cgi?id=10061
patch('signal.patch', when='@4.9,5.1:5.4')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85835
patch('sys_ustat.h.patch', when='@5.0:6.4,7.0:7.3,8.1')
patch('sys_ustat-4.9.patch', when='@4.9')
# this patch removes cylades support from gcc-5 and allows gcc-5 to be built
# with newer glibc versions.
patch('glibc-2.31-libsanitizer-3-gcc-5.patch', when='@5.3.0:5.5.0')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95005
patch('zstd.patch', when='@10')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100102
patch('https://gcc.gnu.org/git/?p=gcc.git;a=patch;h=fc930b3010bd0de899a3da3209eab20664ddb703',
sha256='28c5ab3b564d83dd7e6e35b9c683141a4cb57ee886c5367e54a0828538b3c789', when='@10.1:10.3')
patch('https://gcc.gnu.org/git/?p=gcc.git;a=patch;h=f1feb74046e0feb0596b93bbb822fae02940a90e',
sha256='3e5029489b79fc0d47fd6719f3d5c9d3bbc727a4a0cbff161a5517e8a3c98cb6', when='@11.1')
build_directory = 'spack-build'
@property
def executables(self):
names = [r'gcc', r'[^\w]?g\+\+', r'gfortran']
suffixes = [r'', r'-mp-\d+\.\d', r'-\d+\.\d', r'-\d+', r'\d\d']
return [r''.join(x) for x in itertools.product(names, suffixes)]
@classmethod
def filter_detected_exes(cls, prefix, exes_in_prefix):
result = []
for exe in exes_in_prefix:
# On systems like Ubuntu we might get multiple executables
# with the string "gcc" in them. See:
# https://helpmanual.io/packages/apt/gcc/
basename = os.path.basename(exe)
substring_to_be_filtered = [
'c99-gcc',
'c89-gcc',
'-nm',
'-ar',
'ranlib',
'clang' # clang++ matches g++ -> clan[g++]
]
if any(x in basename for x in substring_to_be_filtered):
continue
# Filter out links in favor of real executables on
# all systems but Cray
host_platform = str(spack.platforms.host())
if os.path.islink(exe) and host_platform != 'cray':
continue
result.append(exe)
return result
@classmethod
def determine_version(cls, exe):
try:
output = spack.compiler.get_compiler_version_output(
exe, '--version'
)
except Exception:
output = ''
# Apple's gcc is actually apple clang, so skip it.
# Users can add it manually to compilers.yaml at their own risk.
if 'Apple' in output:
return None
version_regex = re.compile(r'([\d\.]+)')
for vargs in ('-dumpfullversion', '-dumpversion'):
try:
output = spack.compiler.get_compiler_version_output(exe, vargs)
match = version_regex.search(output)
if match:
return match.group(1)
except spack.util.executable.ProcessError:
pass
except Exception as e:
tty.debug(e)
return None
@classmethod
def determine_variants(cls, exes, version_str):
languages, compilers = set(), {}
for exe in exes:
basename = os.path.basename(exe)
if 'g++' in basename:
languages.add('c++')
compilers['cxx'] = exe
elif 'gfortran' in basename:
languages.add('fortran')
compilers['fortran'] = exe
elif 'gcc' in basename:
languages.add('c')
compilers['c'] = exe
variant_str = 'languages={0}'.format(','.join(languages))
return variant_str, {'compilers': compilers}
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
# For GCC 'compilers' is a mandatory attribute
msg = ('the extra attribute "compilers" must be set for '
'the detected spec "{0}"'.format(spec))
assert 'compilers' in extra_attributes, msg
compilers = extra_attributes['compilers']
for constraint, key in {
'languages=c': 'c',
'languages=c++': 'cxx',
'languages=fortran': 'fortran'
}.items():
if spec.satisfies(constraint, strict=True):
msg = '{0} not in {1}'
assert key in compilers, msg.format(key, spec)
@property
def cc(self):
msg = "cannot retrieve C compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('c', None)
result = None
if 'languages=c' in self.spec:
result = str(self.spec.prefix.bin.gcc)
return result
@property
def cxx(self):
msg = "cannot retrieve C++ compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('cxx', None)
result = None
if 'languages=c++' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'g++')
return result
@property
def fortran(self):
msg = "cannot retrieve Fortran compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('fortran', None)
result = None
if 'languages=fortran' in self.spec:
result = str(self.spec.prefix.bin.gfortran)
return result
def url_for_version(self, version):
# This function will be called when trying to fetch from url, before
# mirrors are tried. It takes care of modifying the suffix of gnu
# mirror path so that Spack will also look for the correct file in
# the mirrors
if (version < Version('6.4.0') and version != Version('5.5.0')) \
or version == Version('7.1.0'):
self.gnu_mirror_path = self.gnu_mirror_path.replace('xz', 'bz2')
return super(Gcc, self).url_for_version(version)
def patch(self):
spec = self.spec
prefix = self.spec.prefix
# Fix a standard header file for OS X Yosemite that
# is GCC incompatible by replacing non-GCC compliant macros
if 'yosemite' in spec.architecture:
if os.path.isfile('/usr/include/dispatch/object.h'):
new_dispatch_dir = join_path(prefix, 'include', 'dispatch')
mkdirp(new_dispatch_dir)
new_header = join_path(new_dispatch_dir, 'object.h')
install('/usr/include/dispatch/object.h', new_header)
filter_file(r'typedef void \(\^dispatch_block_t\)\(void\)',
'typedef void* dispatch_block_t',
new_header)
# Use installed libz
if self.version >= Version('6'):
filter_file('@zlibdir@',
'-L{0}'.format(spec['zlib'].prefix.lib),
'gcc/Makefile.in')
filter_file('@zlibinc@',
'-I{0}'.format(spec['zlib'].prefix.include),
'gcc/Makefile.in')
if spec.satisfies('+nvptx'):
# backport of 383400a6078d upstream to allow support of cuda@11:
filter_file('#define ASM_SPEC "%{misa=*:-m %*}"',
'#define ASM_SPEC "%{misa=*:-m %*; :-m sm_35}"',
'gcc/config/nvptx/nvptx.h',
string=True)
filter_file('Target RejectNegative ToLower Joined '
'Enum(ptx_isa) Var(ptx_isa_option) Init(PTX_ISA_SM30)',
'Target RejectNegative ToLower Joined '
'Enum(ptx_isa) Var(ptx_isa_option) Init(PTX_ISA_SM35)',
'gcc/config/nvptx/nvptx.opt',
string=True)
# https://gcc.gnu.org/install/configure.html
def configure_args(self):
spec = self.spec
# Generic options to compile GCC
options = [
# Distributor options
'--with-pkgversion=Spack GCC',
'--with-bugurl=https://github.com/spack/spack/issues',
# Xcode 10 dropped 32-bit support
'--disable-multilib',
'--enable-languages={0}'.format(
','.join(spec.variants['languages'].value)),
# Drop gettext dependency
'--disable-nls'
]
# Use installed libz
if self.version >= Version('6'):
options.append('--with-system-zlib')
if 'zstd' in spec:
options.append('--with-zstd-include={0}'.format(
spec['zstd'].headers.directories[0]))
options.append('--with-zstd-lib={0}'.format(
spec['zstd'].libs.directories[0]))
# Enabling language "jit" requires --enable-host-shared.
if 'languages=jit' in spec:
options.append('--enable-host-shared')
# Binutils
if spec.satisfies('+binutils'):
binutils = spec['binutils'].prefix.bin
options.extend([
'--with-gnu-ld',
'--with-ld=' + binutils.ld,
'--with-gnu-as',
'--with-as=' + binutils.join('as'),
])
# enable_bootstrap
if spec.satisfies('+bootstrap'):
options.extend([
'--enable-bootstrap',
])
else:
options.extend([
'--disable-bootstrap',
])
# Configure include and lib directories explicitly for these
# dependencies since the short GCC option assumes that libraries
# are installed in "/lib" which might not be true on all OS
# (see #10842)
#
# More info at: https://gcc.gnu.org/install/configure.html
for dep_str in ('mpfr', 'gmp', 'mpc', 'isl'):
if dep_str not in spec:
options.append('--without-{0}'.format(dep_str))
continue
dep_spec = spec[dep_str]
include_dir = dep_spec.headers.directories[0]
lib_dir = dep_spec.libs.directories[0]
options.extend([
'--with-{0}-include={1}'.format(dep_str, include_dir),
'--with-{0}-lib={1}'.format(dep_str, lib_dir)
])
# nvptx-none offloading for host compiler
if spec.satisfies('+nvptx'):
options.extend(['--enable-offload-targets=nvptx-none',
'--with-cuda-driver-include={0}'.format(
spec['cuda'].prefix.include),
'--with-cuda-driver-lib={0}'.format(
spec['cuda'].libs.directories[0]),
'--disable-bootstrap',
'--disable-multilib'])
if sys.platform == 'darwin':
options.extend([
'--with-native-system-header-dir=/usr/include',
'--with-sysroot={0}'.format(macos_sdk_path()),
'--with-libiconv-prefix={0}'.format(spec['iconv'].prefix)
])
# enable appropriate bootstrapping flags
stage1_ldflags = str(self.rpath_args)
boot_ldflags = stage1_ldflags + ' -static-libstdc++ -static-libgcc'
options.append('--with-stage1-ldflags=' + stage1_ldflags)
options.append('--with-boot-ldflags=' + boot_ldflags)
return options
# run configure/make/make(install) for the nvptx-none target
# before running the host compiler phases
@run_before('configure')
def nvptx_install(self):
spec = self.spec
prefix = self.prefix
if not spec.satisfies('+nvptx'):
return
# config.guess returns the host triple, e.g. "x86_64-pc-linux-gnu"
guess = Executable('./config.guess')
targetguess = guess(output=str).rstrip('\n')
options = getattr(self, 'configure_flag_args', [])
options += ['--prefix={0}'.format(prefix)]
options += [
'--with-cuda-driver-include={0}'.format(
spec['cuda'].prefix.include),
'--with-cuda-driver-lib={0}'.format(
spec['cuda'].libs.directories[0]),
]
with working_dir('nvptx-tools'):
configure = Executable("./configure")
configure(*options)
make()
make('install')
pattern = join_path(self.stage.source_path, 'newlibsource', '*')
files = glob.glob(pattern)
if files:
symlink(join_path(files[0], 'newlib'), 'newlib')
# self.build_directory = 'spack-build-nvptx'
with working_dir('spack-build-nvptx', create=True):
options = ['--prefix={0}'.format(prefix),
'--enable-languages={0}'.format(
','.join(spec.variants['languages'].value)),
'--with-mpfr={0}'.format(spec['mpfr'].prefix),
'--with-gmp={0}'.format(spec['gmp'].prefix),
'--target=nvptx-none',
'--with-build-time-tools={0}'.format(
join_path(prefix,
'nvptx-none', 'bin')),
'--enable-as-accelerator-for={0}'.format(
targetguess),
'--disable-sjlj-exceptions',
'--enable-newlib-io-long-long',
]
configure = Executable("../configure")
configure(*options)
make()
make('install')
@property
def install_targets(self):
if '+strip' in self.spec:
return ['install-strip']
return ['install']
@property
def spec_dir(self):
# e.g. lib/gcc/x86_64-unknown-linux-gnu/4.9.2
spec_dir = glob.glob('{0}/gcc/*/*'.format(self.prefix.lib))
return spec_dir[0] if spec_dir else None
@run_after('install')
def write_rpath_specs(self):
"""Generate a spec file so the linker adds a rpath to the libs
the compiler used to build the executable.
.. caution::
The custom spec file by default with *always* pass ``-Wl,-rpath
...`` to the linker, which will cause the linker to *ignore* the
value of ``LD_RUN_PATH``, which otherwise would be saved to the
binary as the default rpath. See the mitigation below for how to
temporarily disable this behavior.
Structure the specs file so that users can define a custom spec file
to suppress the spack-linked rpaths to facilitate rpath adjustment
for relocatable binaries. The custom spec file
:file:`{norpath}.spec` will have a single
line followed by two blanks lines::
*link_libgcc_rpath:
It can be passed to the GCC linker using the argument
``--specs=norpath.spec`` to disable the automatic rpath and restore
the behavior of ``LD_RUN_PATH``."""
if not self.spec_dir:
tty.warn('Could not install specs for {0}.'.format(
self.spec.format('{name}{@version}')))
return
gcc = self.spec['gcc'].command
lines = gcc('-dumpspecs', output=str).splitlines(True)
specs_file = join_path(self.spec_dir, 'specs')
# Save a backup
with open(specs_file + '.orig', 'w') as out:
out.writelines(lines)
# Find which directories have shared libraries
rpath_libdirs = []
for dir in ['lib', 'lib64']:
libdir = join_path(self.prefix, dir)
if glob.glob(join_path(libdir, "*." + dso_suffix)):
rpath_libdirs.append(libdir)
if not rpath_libdirs:
# No shared libraries
tty.warn('No dynamic libraries found in lib/lib64')
return
# Overwrite the specs file
with open(specs_file, 'w') as out:
for line in lines:
out.write(line)
if line.startswith('*link_libgcc:'):
# Insert at start of line following link_libgcc, which gets
# inserted into every call to the linker
out.write('%(link_libgcc_rpath) ')
# Add easily-overridable rpath string at the end
out.write('*link_libgcc_rpath:\n')
if 'platform=darwin' in self.spec:
# macOS linker requires separate rpath commands
out.write(' '.join('-rpath ' + lib for lib in rpath_libdirs))
else:
# linux linker uses colon-separated rpath
out.write('-rpath ' + ':'.join(rpath_libdirs))
out.write('\n')
set_install_permissions(specs_file)
tty.info('Wrote new spec file to {0}'.format(specs_file))
def setup_run_environment(self, env):
# Search prefix directory for possibly modified compiler names
from spack.compilers.gcc import Gcc as Compiler
# Get the contents of the installed binary directory
bin_path = self.spec.prefix.bin
if not os.path.isdir(bin_path):
return
bin_contents = os.listdir(bin_path)
# Find the first non-symlink compiler binary present for each language
for lang in ['cc', 'cxx', 'fc', 'f77']:
for filename, regexp in itertools.product(
bin_contents,
Compiler.search_regexps(lang)
):
if not regexp.match(filename):
continue
abspath = os.path.join(bin_path, filename)
if os.path.islink(abspath):
continue
# Set the proper environment variable
env.set(lang.upper(), abspath)
# Stop searching filename/regex combos for this language
break
|
the-stack_106_15050
|
'''
Functions for application-level replication.
Therminology:
reflect::
Find an object with the same identifier in the target database without
changing or creating it.
replicate::
Find or create new object with the same identifier in the target database
and update it with data of the current object. Only SQLAlchemy attributes
found in both source and target classes are copied. For objects found via
relationships the following rules apply: private ones are replecated and
references to independent objects are reflected.
'''
from weakref import WeakSet
from sqlalchemy.schema import Column
from sqlalchemy.util import duck_type_collection
from sqlalchemy.orm import object_session
from sqlalchemy.orm.util import identity_key
from sqlalchemy.orm.attributes import manager_of_class, QueryableAttribute
from sqlalchemy.orm.properties import ColumnProperty, RelationshipProperty
from sqlalchemy.orm.collections import collection_adapter
from sqlalchemy.orm.attributes import instance_state, instance_dict
from sqlalchemy.orm.interfaces import MANYTOMANY, MANYTOONE, ONETOMANY
_included = WeakSet()
_excluded = WeakSet()
def include(prop):
'''Replicate property that is normally not replicated. Right now it's
meaningful for one-to-many relations only.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
#assert isinstance(prop, RelationshipProperty)
_included.add(prop)
def exclude(prop):
'''Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
_excluded.add(prop)
if isinstance(prop, RelationshipProperty):
# Also exclude columns that participate in this relationship
for local in prop.local_columns:
_excluded.add(local)
def reflect(source, model, cache=None):
'''Finds an object of class `model` with the same identifier as the
`source` object'''
if source is None:
return None
if cache and source in cache:
return cache[source]
db = object_session(source)
ident = identity_key(instance=source)[1]
assert ident is not None
return db.query(model).get(ident)
class _PrimaryKeyIsNull(BaseException):
'''Used when setting relationship property to None if this causes setting
not nullable primary key column to NULL. Such objects should be skipped
from replicate_filter.'''
def replicate_relation(source, target, attr, target_attr, cache=None):
if attr.property.cascade.delete_orphan:
process_scalar = replicate_no_merge
process_list = replicate_filter
else:
process_scalar = reflect
process_list = reflect_filter
value = getattr(source, attr.key)
target_attr_model = target_attr.property.mapper.class_
if attr.property.uselist:
adapter = collection_adapter(value)
if adapter:
# XXX The magic passes below are adapted from logic in
# CollectionAttributeImpl.set() method without proper
# understanding. The `elif` branch isn't even coverered by tests.
if hasattr(value, '_sa_iterator'):
value = value._sa_iterator()
elif duck_type_collection(value) is dict:
value = value.values()
reflection = process_list(value, target_attr_model, cache=cache)
impl = instance_state(target).get_impl(attr.key)
impl.set(instance_state(target), instance_dict(target), reflection,
# XXX We either have to convert reflection back to original
# collection type or use this private parameter.
_adapt=False)
else:
reflection = process_scalar(value, target_attr_model, cache=cache)
setattr(target, attr.key, reflection)
if (reflection is None and
attr.property.direction is MANYTOONE and
any(col.primary_key and not col.nullable
for col in attr.property.local_columns)):
raise _PrimaryKeyIsNull()
def is_relation_replicatable(attr):
if attr.property in _included:
return True
elif attr.property in _excluded:
return False
elif attr.property.viewonly:
return False
elif attr.property.cascade.delete_orphan:
# Private, replicate
return True
elif attr.property.direction is MANYTOMANY:
# Many-to-many. Usualy one side is short list and other is long or
# absent. Reflect if not dynamic, other cases should be excluded
# manually.
assert attr.property.lazy in (True, False, 'dynamic', 'select')
return attr.property.lazy!='dynamic'
elif attr.property.direction is MANYTOONE:
# Many-to-one and one-to-one with FK pointing from from this side to
# other.
return True
else:
assert attr.property.direction is ONETOMANY
return False
def _column_property_in_registry(prop, registry):
if prop in registry:
return True
elif len(prop.columns)==1:
# Column() is translated to ColumnProperty with single column
return prop.columns[0] in registry
else:
return False
def replicate_attributes(source, target, cache=None):
'''Replicates common SQLAlchemy attributes from the `source` object to the
`target` object.'''
target_manager = manager_of_class(type(target))
column_attrs = set()
relationship_attrs = set()
relationship_columns = set()
for attr in manager_of_class(type(source)).attributes:
if attr.key not in target_manager:
# It's not common attribute
continue
target_attr = target_manager[attr.key]
if isinstance(attr.property, ColumnProperty):
assert isinstance(target_attr.property, ColumnProperty)
column_attrs.add(attr)
elif isinstance(attr.property, RelationshipProperty):
assert isinstance(target_attr.property, RelationshipProperty)
relationship_attrs.add(attr)
if attr.property.direction is MANYTOONE:
relationship_columns.update(attr.property.local_columns)
for attr in column_attrs:
if _column_property_in_registry(attr.property, _excluded):
continue
elif (not _column_property_in_registry(attr.property, _included) and
all(column in relationship_columns
for column in attr.property.columns)):
continue
setattr(target, attr.key, getattr(source, attr.key))
for attr in relationship_attrs:
target_attr_model = target_manager[attr.key].property.argument
if not is_relation_replicatable(attr):
continue
replicate_relation(source, target, attr, target_manager[attr.key],
cache=cache)
def replicate_no_merge(source, model, cache=None):
'''Replicates the `source` object to `model` class and returns its
reflection.'''
# `cache` is used to break circular dependency: we need to replicate
# attributes before merging target into the session, but replication of
# some attributes may require target to be in session to avoid infinite
# loop.
if source is None:
return None
if cache is None:
cache = {}
elif source in cache:
return cache[source]
db = object_session(source)
cls, ident = identity_key(instance=source)
target = db.query(model).get(ident)
if target is None:
target = model()
cache[source] = target
try:
replicate_attributes(source, target, cache=cache)
except _PrimaryKeyIsNull:
return None
else:
return target
def replicate(source, model, cache=None):
'''Replicates the `source` object to `model` class and returns its
reflection.'''
target = replicate_no_merge(source, model, cache=cache)
if target is not None:
db = object_session(source)
target = db.merge(target)
return target
def replicate_filter(sources, model, cache=None):
'''Replicates the list of objects to other class and returns their
reflections'''
targets = [replicate_no_merge(source, model, cache=cache)
for source in sources]
# Some objects may not be available in target DB (not published), so we
# have to exclude None from the list.
return [target for target in targets if target is not None]
def reflect_filter(sources, model, cache=None):
'''Returns the list of reflections of objects in the `source` list to other
class. Objects that are not found in target table are silently discarded.
'''
targets = [reflect(source, model, cache=cache) for source in sources]
# Some objects may not be available in target DB (not published), so we
# have to exclude None from the list.
return [target for target in targets if target is not None]
|
the-stack_106_15051
|
import numpy as np
__all__ = [
"img_2x2",
"mask_2x2",
"img_3x3_rgb",
"img_3x3",
"img_3x4",
"mask_3x3",
"mask_3x4",
"img_5x5",
"mask_5x5",
"img_6x6",
"img_6x6_lc",
"img_6x6_rgb",
"mask_6x6",
"img_7x7",
"cube_3x3x3",
]
def img_2x2():
"""
Generates a 2x2 grayscale image (uint8)
Returns
-------
out : ndarray
2x2x1 uint8 image
"""
return np.array([[1, 0], [1, 1]]).reshape((2, 2, 1)).astype(np.uint8)
def mask_2x2():
"""
Generates 2x2 mask (doesn't have the 3rd dimension compare to an image).
Returns
-------
out : ndarray
2x2 mask, uint8
"""
return np.array([[1, 0], [0, 1]]).reshape((2, 2)).astype(np.uint8)
def img_3x4():
"""
Generates a grayscale image 3x4
Returns
-------
out : ndarray
3x4x1 uint8 image
"""
img = np.array([[1, 1, 1, 0], [1, 0, 1, 1], [1, 1, 1, 1]]).reshape((3, 4, 1)).astype(np.uint8) * 255
return img
def mask_3x4():
"""
Generates a mask 3x4
Returns
-------
out : ndarray
3x4 uint8 image
"""
mask = np.array([[0, 1, 1, 1], [0, 1, 1, 0], [0, 1, 1, 0]]).reshape((3, 4)).astype(np.uint8)
return mask
def img_3x3():
"""
Generates a grayscale image 3x4
Returns
-------
out : ndarray
3x4x1 uint8 image
"""
img = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 1]]).reshape((3, 3, 1)).astype(np.uint8)
return img
def img_3x3_rgb():
"""
Generates a grayscale image 3x4
Returns
-------
out : ndarray
3x4x1 uint8 image
"""
img = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 1]]).reshape((3, 3, 1)).astype(np.uint8)
return np.dstack((img, img, img)) * 255
def mask_3x3():
"""
Generates a image+mask 3x4
Returns
-------
out : ndarray
3x4 uint8 image
"""
mask = np.array([[1, 1, 1], [1, 1, 1], [0, 1, 1]]).reshape((3, 3)).astype(np.uint8)
return mask
def img_5x5():
"""
Generates a gs image 5x5. It is all ones, besides the edges
Returns
-------
out : ndarray
5x5 uint8 image
"""
img = np.ones((5, 5, 1))
img[:, 0] = 0
img[:, -1] = 0
img[0, :] = 0
img[-1, :] = 0
return img.astype(np.uint8)
def cube_3x3x3():
return np.ones((3, 3, 3, 1)).astype(np.uint8)
def mask_5x5():
"""
Generates a mask 5x5. It is all ones, besides the edges
Returns
-------
out : ndarray
5x5 uint8 image
"""
img = np.ones((5, 5))
img[:, :2] = 2
img[:, -2:] = 2
img[:2, :] = 2
img[-2, :] = 2
return img.astype(np.uint8)
def img_6x6():
"""
Generates a gs image 5x5. It is all ones, besides the edges
Returns
-------
out : ndarray
6x6 uint8 image
"""
img = np.ones((6, 6, 1))
img[:, 0] = 0
img[:, -1] = 0
img[0, :] = 0
img[-1, :] = 0
return img.astype(np.uint8) * 255
def img_7x7():
"""
Generates a gs image 7x7. It is all ones, besides the edges
Returns
-------
out : ndarray
6x6 uint8 image
"""
img = np.ones((7, 7, 1))
img[:, 0] = 0
img[:, -1] = 0
img[0, :] = 0
img[-1, :] = 0
return img.astype(np.uint8) * 255
def mask_6x6():
"""
Generates a mask 6x6. It is all ones, besides the edges
Returns
-------
out : ndarray
3x5 uint8 image
"""
img = np.ones((6, 6))
img[:, 0] = 0
img[:, -1] = 0
img[0, :] = 0
img[-1, :] = 0
return img.astype(np.uint8)
def img_6x6_rgb():
"""
Generates an RGB image 6x6. It is all 255, besides the edges
Returns
-------
out : ndarray
6x6 uint8 image
"""
img = np.ones((6, 6, 1))
img[:, 0] = 0
img[:, -1] = 0
img[0, :] = 0
img[-1, :] = 0
return np.dstack((img, img, img)).astype(np.uint8) * 255
def img_6x6_lc():
"""
Generates an RGB image 6x6. It is all 7x7, besides the edges (low contrast)
Returns
-------
out : ndarray
6x6 uint8 image
"""
img = np.ones((6, 6, 1))
img[:, 0] = 0
img[:, -1] = 0
img[0, :] = 0
img[-1, :] = 0
return np.dstack((img, img, img)).astype(np.uint8) * 127
|
the-stack_106_15054
|
"""Test various algorithmic properties of selectables."""
from sqlalchemy import alias
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import join
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import not_
from sqlalchemy import null
from sqlalchemy import or_
from sqlalchemy import outerjoin
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import union
from sqlalchemy import util
from sqlalchemy.sql import column
from sqlalchemy.sql import elements
from sqlalchemy.sql import expression
from sqlalchemy.sql import table
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql import visitors
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
Column("col3", Integer),
Column("colx", Integer),
)
table2 = Table(
"table2",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", Integer, ForeignKey("table1.col1")),
Column("col3", String(20)),
Column("coly", Integer),
)
keyed = Table(
"keyed",
metadata,
Column("x", Integer, key="colx"),
Column("y", Integer, key="coly"),
Column("z", Integer),
)
class SelectableTest(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL
):
__dialect__ = "default"
def test_indirect_correspondence_on_labels(self):
# this test depends upon 'distance' to
# get the right result
# same column three times
s = select(
[
table1.c.col1.label("c2"),
table1.c.col1,
table1.c.col1.label("c1"),
]
)
# this tests the same thing as
# test_direct_correspondence_on_labels below -
# that the presence of label() affects the 'distance'
assert s.corresponding_column(table1.c.col1) is s.c.col1
assert s.corresponding_column(s.c.col1) is s.c.col1
assert s.corresponding_column(s.c.c1) is s.c.c1
def test_labeled_subquery_twice(self):
scalar_select = select([table1.c.col1]).label("foo")
s1 = select([scalar_select])
s2 = select([scalar_select, scalar_select])
eq_(
s1.c.foo.proxy_set,
set([s1.c.foo, scalar_select, scalar_select.element]),
)
eq_(
s2.c.foo.proxy_set,
set([s2.c.foo, scalar_select, scalar_select.element]),
)
assert s1.corresponding_column(scalar_select) is s1.c.foo
assert s2.corresponding_column(scalar_select) is s2.c.foo
def test_label_grouped_still_corresponds(self):
label = select([table1.c.col1]).label("foo")
label2 = label.self_group()
s1 = select([label])
s2 = select([label2])
assert s1.corresponding_column(label) is s1.c.foo
assert s2.corresponding_column(label) is s2.c.foo
def test_direct_correspondence_on_labels(self):
# this test depends on labels being part
# of the proxy set to get the right result
l1, l2 = table1.c.col1.label("foo"), table1.c.col1.label("bar")
sel = select([l1, l2])
sel2 = sel.alias()
assert sel2.corresponding_column(l1) is sel2.c.foo
assert sel2.corresponding_column(l2) is sel2.c.bar
sel2 = select([table1.c.col1.label("foo"), table1.c.col2.label("bar")])
sel3 = sel.union(sel2).alias()
assert sel3.corresponding_column(l1) is sel3.c.foo
assert sel3.corresponding_column(l2) is sel3.c.bar
def test_keyed_gen(self):
s = select([keyed])
eq_(s.c.colx.key, "colx")
eq_(s.c.colx.name, "x")
assert s.corresponding_column(keyed.c.colx) is s.c.colx
assert s.corresponding_column(keyed.c.coly) is s.c.coly
assert s.corresponding_column(keyed.c.z) is s.c.z
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.z
def test_keyed_label_gen(self):
s = select([keyed]).apply_labels()
assert s.corresponding_column(keyed.c.colx) is s.c.keyed_colx
assert s.corresponding_column(keyed.c.coly) is s.c.keyed_coly
assert s.corresponding_column(keyed.c.z) is s.c.keyed_z
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.keyed_colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.keyed_coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.keyed_z
def test_keyed_c_collection_upper(self):
c = Column("foo", Integer, key="bar")
t = Table("t", MetaData(), c)
is_(t.c.bar, c)
def test_keyed_c_collection_lower(self):
c = column("foo")
c.key = "bar"
t = table("t", c)
is_(t.c.bar, c)
def test_clone_c_proxy_key_upper(self):
c = Column("foo", Integer, key="bar")
t = Table("t", MetaData(), c)
s = select([t])._clone()
assert c in s.c.bar.proxy_set
def test_clone_c_proxy_key_lower(self):
c = column("foo")
c.key = "bar"
t = table("t", c)
s = select([t])._clone()
assert c in s.c.bar.proxy_set
def test_no_error_on_unsupported_expr_key(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
t = table("t", column("x"), column("y"))
expr = BinaryExpression(t.c.x, t.c.y, myop)
s = select([t, expr])
eq_(s.c.keys(), ["x", "y", expr.anon_label])
def test_cloned_intersection(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("x"))
s1 = t1.select()
s2 = t2.select()
s3 = t1.select()
s1c1 = s1._clone()
s1c2 = s1._clone()
s2c1 = s2._clone()
s3c1 = s3._clone()
eq_(
expression._cloned_intersection([s1c1, s3c1], [s2c1, s1c2]),
set([s1c1]),
)
def test_cloned_difference(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("x"))
s1 = t1.select()
s2 = t2.select()
s3 = t1.select()
s1c1 = s1._clone()
s1c2 = s1._clone()
s2c1 = s2._clone()
s2c2 = s2._clone()
s3c1 = s3._clone()
eq_(
expression._cloned_difference([s1c1, s2c1, s3c1], [s2c1, s1c2]),
set([s3c1]),
)
def test_distance_on_aliases(self):
a1 = table1.alias("a1")
for s in (
select([a1, table1], use_labels=True),
select([table1, a1], use_labels=True),
):
assert s.corresponding_column(table1.c.col1) is s.c.table1_col1
assert s.corresponding_column(a1.c.col1) is s.c.a1_col1
def test_join_against_self(self):
jj = select([table1.c.col1.label("bar_col1")])
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
# test column directly against itself
assert jjj.corresponding_column(jjj.c.table1_col1) is jjj.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jjj.c.bar_col1
# test alias of the join
j2 = jjj.alias("foo")
assert j2.corresponding_column(table1.c.col1) is j2.c.table1_col1
def test_clone_append_column(self):
sel = select([literal_column("1").label("a")])
eq_(list(sel.c.keys()), ["a"])
cloned = visitors.ReplacingCloningVisitor().traverse(sel)
cloned.append_column(literal_column("2").label("b"))
cloned.append_column(func.foo())
eq_(list(cloned.c.keys()), ["a", "b", "foo()"])
def test_append_column_after_replace_selectable(self):
basesel = select([literal_column("1").label("a")])
tojoin = select(
[literal_column("1").label("a"), literal_column("2").label("b")]
)
basefrom = basesel.alias("basefrom")
joinfrom = tojoin.alias("joinfrom")
sel = select([basefrom.c.a])
replaced = sel.replace_selectable(
basefrom, basefrom.join(joinfrom, basefrom.c.a == joinfrom.c.a)
)
self.assert_compile(
replaced,
"SELECT basefrom.a FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a",
)
replaced.append_column(joinfrom.c.b)
self.assert_compile(
replaced,
"SELECT basefrom.a, joinfrom.b FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a",
)
def test_against_cloned_non_table(self):
# test that corresponding column digs across
# clone boundaries with anonymous labeled elements
col = func.count().label("foo")
sel = select([col])
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
assert sel2.corresponding_column(col) is sel2.c.foo
sel3 = visitors.ReplacingCloningVisitor().traverse(sel2)
assert sel3.corresponding_column(col) is sel3.c.foo
def test_with_only_generative(self):
s1 = table1.select().as_scalar()
self.assert_compile(
s1.with_only_columns([s1]),
"SELECT (SELECT table1.col1, table1.col2, "
"table1.col3, table1.colx FROM table1) AS anon_1",
)
def test_type_coerce_preserve_subq(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column("x"), MyType).label("foo")])
stmt2 = stmt.select()
assert isinstance(stmt._raw_columns[0].type, MyType)
assert isinstance(stmt.c.foo.type, MyType)
assert isinstance(stmt2.c.foo.type, MyType)
def test_select_on_table(self):
sel = select([table1, table2], use_labels=True)
assert sel.corresponding_column(table1.c.col1) is sel.c.table1_col1
assert (
sel.corresponding_column(table1.c.col1, require_embedded=True)
is sel.c.table1_col1
)
assert table1.corresponding_column(sel.c.table1_col1) is table1.c.col1
assert (
table1.corresponding_column(
sel.c.table1_col1, require_embedded=True
)
is None
)
def test_join_against_join(self):
j = outerjoin(table1, table2, table1.c.col1 == table2.c.col2)
jj = select([table1.c.col1.label("bar_col1")], from_obj=[j]).alias(
"foo"
)
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
assert jjj.corresponding_column(jjj.c.table1_col1) is jjj.c.table1_col1
j2 = jjj.alias("foo")
assert j2.corresponding_column(jjj.c.table1_col1) is j2.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jj.c.bar_col1
def test_table_alias(self):
a = table1.alias("a")
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_alias_handles_column_context(self):
# not quite a use case yet but this is expected to become
# prominent w/ PostgreSQL's tuple functions
stmt = select([table1.c.col1, table1.c.col2])
a = stmt.alias("a")
self.assert_compile(
select([func.foo(a)]),
"SELECT foo(SELECT table1.col1, table1.col2 FROM table1) "
"AS foo_1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2 FROM table1) "
"AS a",
)
def test_union(self):
# tests that we can correspond a column in a Select statement
# with a certain Table, against a column in a Union where one of
# its underlying Selects matches to that same Table
u = select(
[
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
]
).union(
select(
[
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
]
)
)
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
def test_union_precedence(self):
# conflicting column correspondence should be resolved based on
# the order of the select()s in the union
s1 = select([table1.c.col1, table1.c.col2])
s2 = select([table1.c.col2, table1.c.col1])
s3 = select([table1.c.col3, table1.c.colx])
s4 = select([table1.c.colx, table1.c.col3])
u1 = union(s1, s2)
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
u1 = union(s1, s2, s3, s4)
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
assert u1.corresponding_column(table1.c.colx) is u1.c.col2
assert u1.corresponding_column(table1.c.col3) is u1.c.col1
def test_singular_union(self):
u = union(
select([table1.c.col1, table1.c.col2, table1.c.col3]),
select([table1.c.col1, table1.c.col2, table1.c.col3]),
)
u = union(select([table1.c.col1, table1.c.col2, table1.c.col3]))
assert u.c.col1 is not None
assert u.c.col2 is not None
assert u.c.col3 is not None
def test_alias_union(self):
# same as testunion, except its an alias of the union
u = (
select(
[
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
]
)
.union(
select(
[
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
]
)
)
.alias("analias")
)
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_coly) is u.c.coly
assert s2.corresponding_column(u.c.coly) is s2.c.table2_coly
def test_union_of_alias(self):
s1 = select([table1.c.col1, table1.c.col2])
s2 = select([table1.c.col1, table1.c.col2]).alias()
u1 = union(s1, s2)
assert u1.corresponding_column(s1.c.col1) is u1.c.col1
assert u1.corresponding_column(s2.c.col1) is u1.c.col1
u2 = union(s2, s1)
assert u2.corresponding_column(s1.c.col1) is u2.c.col1
assert u2.corresponding_column(s2.c.col1) is u2.c.col1
def test_union_of_text(self):
s1 = select([table1.c.col1, table1.c.col2])
s2 = text("select col1, col2 from foo").columns(
column("col1"), column("col2")
)
u1 = union(s1, s2)
assert u1.corresponding_column(s1.c.col1) is u1.c.col1
assert u1.corresponding_column(s2.c.col1) is u1.c.col1
u2 = union(s2, s1)
assert u2.corresponding_column(s1.c.col1) is u2.c.col1
assert u2.corresponding_column(s2.c.col1) is u2.c.col1
@testing.emits_warning("Column 'col1'")
def test_union_dupe_keys(self):
s1 = select([table1.c.col1, table1.c.col2, table2.c.col1])
s2 = select([table2.c.col1, table2.c.col2, table2.c.col3])
u1 = union(s1, s2)
assert (
u1.corresponding_column(s1.c._all_columns[0])
is u1.c._all_columns[0]
)
assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(s1.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[2]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
@testing.emits_warning("Column 'col1'")
def test_union_alias_dupe_keys(self):
s1 = select([table1.c.col1, table1.c.col2, table2.c.col1]).alias()
s2 = select([table2.c.col1, table2.c.col2, table2.c.col3])
u1 = union(s1, s2)
assert (
u1.corresponding_column(s1.c._all_columns[0])
is u1.c._all_columns[0]
)
assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(s1.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
# this differs from the non-alias test because table2.c.col1 is
# more directly at s2.c.col1 than it is s1.c.col1.
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
@testing.emits_warning("Column 'col1'")
def test_union_alias_dupe_keys_grouped(self):
s1 = (
select([table1.c.col1, table1.c.col2, table2.c.col1])
.limit(1)
.alias()
)
s2 = select([table2.c.col1, table2.c.col2, table2.c.col3]).limit(1)
u1 = union(s1, s2)
assert (
u1.corresponding_column(s1.c._all_columns[0])
is u1.c._all_columns[0]
)
assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(s1.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
# this differs from the non-alias test because table2.c.col1 is
# more directly at s2.c.col1 than it is s1.c.col1.
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
def test_select_union(self):
# like testaliasunion, but off a Select off the union.
u = (
select(
[
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
]
)
.union(
select(
[
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
]
)
)
.alias("analias")
)
s = select([u])
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert s.corresponding_column(s1.c.table1_col2) is s.c.col2
assert s.corresponding_column(s2.c.table2_col2) is s.c.col2
def test_union_against_join(self):
# same as testunion, except its an alias of the union
u = (
select(
[
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
]
)
.union(
select(
[
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
]
)
)
.alias("analias")
)
j1 = table1.join(table2)
assert u.corresponding_column(j1.c.table1_colx) is u.c.colx
assert j1.corresponding_column(u.c.colx) is j1.c.table1_colx
def test_join(self):
a = join(table1, table2)
print(str(a.select(use_labels=True)))
b = table2.alias("b")
j = join(a, b)
print(str(j))
criterion = a.c.table1_col1 == b.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_alias(self):
a = table1.select().alias("a")
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_labels(self):
a = table1.select(use_labels=True)
j = join(a, table2)
criterion = a.c.table1_col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_scalar_cloned_comparator(self):
sel = select([table1.c.col1]).as_scalar()
expr = sel == table1.c.col1
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
expr2 = sel2 == table1.c.col1
is_(expr2.left, sel2)
def test_column_labels(self):
a = select(
[
table1.c.col1.label("acol1"),
table1.c.col2.label("acol2"),
table1.c.col3.label("acol3"),
]
)
j = join(a, table2)
criterion = a.c.acol1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_labeled_select_correspoinding(self):
l1 = select([func.max(table1.c.col1)]).label("foo")
s = select([l1])
eq_(s.corresponding_column(l1), s.c.foo)
s = select([table1.c.col1, l1])
eq_(s.corresponding_column(l1), s.c.foo)
def test_select_alias_labels(self):
a = table2.select(use_labels=True).alias("a")
j = join(a, table1)
criterion = table1.c.col1 == a.c.table2_col2
self.assert_(criterion.compare(j.onclause))
def test_table_joined_to_select_of_table(self):
metadata = MetaData()
a = Table("a", metadata, Column("id", Integer, primary_key=True))
j2 = select([a.c.id.label("aid")]).alias("bar")
j3 = a.join(j2, j2.c.aid == a.c.id)
j4 = select([j3]).alias("foo")
assert j4.corresponding_column(j2.c.aid) is j4.c.aid
assert j4.corresponding_column(a.c.id) is j4.c.id
def test_two_metadata_join_raises(self):
m = MetaData()
m2 = MetaData()
t1 = Table("t1", m, Column("id", Integer), Column("id2", Integer))
t2 = Table("t2", m, Column("id", Integer, ForeignKey("t1.id")))
t3 = Table("t3", m2, Column("id", Integer, ForeignKey("t1.id2")))
s = select([t2, t3], use_labels=True)
assert_raises(exc.NoReferencedTableError, s.join, t1)
def test_multi_label_chain_naming_col(self):
# See [ticket:2167] for this one.
l1 = table1.c.col1.label("a")
l2 = select([l1]).label("b")
s = select([l2])
assert s.c.b is not None
self.assert_compile(
s.select(),
"SELECT b FROM "
"(SELECT (SELECT table1.col1 AS a FROM table1) AS b)",
)
s2 = select([s.label("c")])
self.assert_compile(
s2.select(),
"SELECT c FROM (SELECT (SELECT ("
"SELECT table1.col1 AS a FROM table1) AS b) AS c)",
)
def test_self_referential_select_raises(self):
t = table("t", column("x"))
s = select([t])
s.append_whereclause(s.c.x > 5)
assert_raises_message(
exc.InvalidRequestError,
r"select\(\) construct refers to itself as a FROM",
s.compile,
)
def test_unusual_column_elements_text(self):
"""test that .c excludes text()."""
s = select([table1.c.col1, text("foo")])
eq_(list(s.c), [s.c.col1])
def test_unusual_column_elements_clauselist(self):
"""Test that raw ClauseList is expanded into .c."""
from sqlalchemy.sql.expression import ClauseList
s = select([table1.c.col1, ClauseList(table1.c.col2, table1.c.col3)])
eq_(list(s.c), [s.c.col1, s.c.col2, s.c.col3])
def test_unusual_column_elements_boolean_clauselist(self):
"""test that BooleanClauseList is placed as single element in .c."""
c2 = and_(table1.c.col2 == 5, table1.c.col3 == 4)
s = select([table1.c.col1, c2])
eq_(list(s.c), [s.c.col1, s.corresponding_column(c2)])
def test_from_list_deferred_constructor(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
s = select([c1])
t = Table("t", MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select([c1]), "SELECT t.c1 FROM t")
self.assert_compile(select([c2]), "SELECT t.c2 FROM t")
def test_from_list_deferred_whereclause(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
s = select([c1]).where(c1 == 5)
t = Table("t", MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select([c1]), "SELECT t.c1 FROM t")
self.assert_compile(select([c2]), "SELECT t.c2 FROM t")
def test_from_list_deferred_fromlist(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer))
c1 = Column("c1", Integer)
s = select([c1]).where(c1 == 5).select_from(t1)
t2 = Table("t2", MetaData(), c1)
eq_(c1._from_objects, [t2])
self.assert_compile(select([c1]), "SELECT t2.c1 FROM t2")
def test_from_list_deferred_cloning(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
s = select([c1])
s2 = select([c2])
s3 = sql_util.ClauseAdapter(s).traverse(s2)
Table("t", MetaData(), c1, c2)
self.assert_compile(s3, "SELECT t.c2 FROM t")
def test_from_list_with_columns(self):
table1 = table("t1", column("a"))
table2 = table("t2", column("b"))
s1 = select([table1.c.a, table2.c.b])
self.assert_compile(s1, "SELECT t1.a, t2.b FROM t1, t2")
s2 = s1.with_only_columns([table2.c.b])
self.assert_compile(s2, "SELECT t2.b FROM t2")
s3 = sql_util.ClauseAdapter(table1).traverse(s1)
self.assert_compile(s3, "SELECT t1.a, t2.b FROM t1, t2")
s4 = s3.with_only_columns([table2.c.b])
self.assert_compile(s4, "SELECT t2.b FROM t2")
def test_from_list_warning_against_existing(self):
c1 = Column("c1", Integer)
s = select([c1])
# force a compile.
self.assert_compile(s, "SELECT c1")
Table("t", MetaData(), c1)
self.assert_compile(s, "SELECT t.c1 FROM t")
def test_from_list_recovers_after_warning(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
s = select([c1])
# force a compile.
eq_(str(s), "SELECT c1")
@testing.emits_warning()
def go():
return Table("t", MetaData(), c1, c2)
t = go()
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
# 's' has been baked. Can't afford
# not caching select._froms.
# hopefully the warning will clue the user
self.assert_compile(s, "SELECT t.c1 FROM t")
self.assert_compile(select([c1]), "SELECT t.c1 FROM t")
self.assert_compile(select([c2]), "SELECT t.c2 FROM t")
def test_label_gen_resets_on_table(self):
c1 = Column("c1", Integer)
eq_(c1._label, "c1")
Table("t1", MetaData(), c1)
eq_(c1._label, "t1_c1")
class RefreshForNewColTest(fixtures.TestBase):
def test_join_uninit(self):
a = table("a", column("x"))
b = table("b", column("y"))
j = a.join(b, a.c.x == b.c.y)
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column("x")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_x is q
def test_select_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
s = select([a, b]).apply_labels()
s.c
q = column("x")
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_uninit(self):
a = table("a", column("x"))
b = table("b", column("y"))
s = select([a, b]).apply_labels().alias()
q = column("x")
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
s = select([a, b]).apply_labels().alias()
s.c
q = column("x")
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_irrelevant(self):
a = table("a", column("x"))
b = table("b", column("y"))
c = table("c", column("z"))
s = select([a, b]).apply_labels().alias()
s.c
q = column("x")
c.append_column(q)
s._refresh_for_new_column(q)
assert "c_x" not in s.c
def test_aliased_select_no_cols_clause(self):
a = table("a", column("x"))
s = select([a.c.x]).apply_labels().alias()
s.c
q = column("q")
a.append_column(q)
s._refresh_for_new_column(q)
assert "a_q" not in s.c
def test_union_uninit(self):
a = table("a", column("x"))
s1 = select([a])
s2 = select([a])
s3 = s1.union(s2)
q = column("q")
a.append_column(q)
s3._refresh_for_new_column(q)
assert a.c.q in s3.c.q.proxy_set
def test_union_init_raises(self):
a = table("a", column("x"))
s1 = select([a])
s2 = select([a])
s3 = s1.union(s2)
s3.c
q = column("q")
a.append_column(q)
assert_raises_message(
NotImplementedError,
"CompoundSelect constructs don't support addition of "
"columns to underlying selectables",
s3._refresh_for_new_column,
q,
)
def test_nested_join_uninit(self):
a = table("a", column("x"))
b = table("b", column("y"))
c = table("c", column("z"))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_nested_join_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
c = table("c", column("z"))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
j.c
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_fk_table(self):
m = MetaData()
fk = ForeignKey("x.id")
Table("x", m, Column("id", Integer))
a = Table("a", m, Column("x", Integer, fk))
a.c
q = Column("q", Integer)
a.append_column(q)
a._refresh_for_new_column(q)
eq_(a.foreign_keys, set([fk]))
fk2 = ForeignKey("g.id")
p = Column("p", Integer, fk2)
a.append_column(p)
a._refresh_for_new_column(p)
eq_(a.foreign_keys, set([fk, fk2]))
def test_fk_join(self):
m = MetaData()
fk = ForeignKey("x.id")
Table("x", m, Column("id", Integer))
a = Table("a", m, Column("x", Integer, fk))
b = Table("b", m, Column("y", Integer))
j = a.join(b, a.c.x == b.c.y)
j.c
q = Column("q", Integer)
b.append_column(q)
j._refresh_for_new_column(q)
eq_(j.foreign_keys, set([fk]))
fk2 = ForeignKey("g.id")
p = Column("p", Integer, fk2)
b.append_column(p)
j._refresh_for_new_column(p)
eq_(j.foreign_keys, set([fk, fk2]))
class AnonLabelTest(fixtures.TestBase):
"""Test behaviors fixed by [ticket:2168]."""
def test_anon_labels_named_column(self):
c1 = column("x")
assert c1.label(None) is not c1
eq_(str(select([c1.label(None)])), "SELECT x AS x_1")
def test_anon_labels_literal_column(self):
c1 = literal_column("x")
assert c1.label(None) is not c1
eq_(str(select([c1.label(None)])), "SELECT x AS x_1")
def test_anon_labels_func(self):
c1 = func.count("*")
assert c1.label(None) is not c1
eq_(str(select([c1])), "SELECT count(:count_2) AS count_1")
c2 = select([c1]).compile()
eq_(str(select([c1.label(None)])), "SELECT count(:count_2) AS count_1")
def test_named_labels_named_column(self):
c1 = column("x")
eq_(str(select([c1.label("y")])), "SELECT x AS y")
def test_named_labels_literal_column(self):
c1 = literal_column("x")
eq_(str(select([c1.label("y")])), "SELECT x AS y")
class JoinAliasingTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_flat_ok_on_non_join(self):
a = table("a", column("a"))
s = a.select()
self.assert_compile(
s.alias(flat=True).select(),
"SELECT anon_1.a FROM (SELECT a.a AS a FROM a) AS anon_1",
)
def test_join_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b).alias(),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b",
)
def test_join_standalone_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
alias(a.join(b, a.c.a == b.c.b)),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b",
)
def test_join_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b).alias(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b",
)
def test_join_standalone_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
alias(a.join(b, a.c.a == b.c.b), flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b",
)
def test_composed_join_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
j1.join(j2, b.c.b == c.c.c).alias(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b JOIN "
"(c AS c_1 JOIN d AS d_1 ON c_1.c = d_1.d) ON b_1.b = c_1.c",
)
def test_composed_join_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
select([j1.join(j2, b.c.b == c.c.c).alias()]),
"SELECT anon_1.a_a, anon_1.b_b, anon_1.c_c, anon_1.d_d "
"FROM (SELECT a.a AS a_a, b.b AS b_b, c.c AS c_c, d.d AS d_d "
"FROM a JOIN b ON a.a = b.b "
"JOIN (c JOIN d ON c.c = d.d) ON b.b = c.c) AS anon_1",
)
class JoinConditionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_join_condition(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4", m, Column("id", Integer), Column("t2id", ForeignKey("t2.id"))
)
t5 = Table(
"t5",
m,
Column("t1id1", ForeignKey("t1.id")),
Column("t1id2", ForeignKey("t1.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
for (left, right, a_subset, expected) in [
(t1, t2, None, t1.c.id == t2.c.t1id),
(t1t2, t3, t2, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t3, t1.c.id == t3.c.t1id),
(t2t3, t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t3, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t1, t2t3.c.t2_id == t4.c.t2id),
(t1t2, t2t3, t2, t1t2.c.t2_id == t2t3.c.t3_t2id),
]:
assert expected.compare(
sql_util.join_condition(left, right, a_subset=a_subset)
)
# these are ambiguous, or have no joins
for left, right, a_subset in [
(t1t2, t3, None),
(t2t3, t1, None),
(t1, t4, None),
(t1t2, t2t3, None),
(t5, t1, None),
(t5.select(use_labels=True), t1, None),
]:
assert_raises(
exc.ArgumentError,
sql_util.join_condition,
left,
right,
a_subset=a_subset,
)
als = t2t3.alias()
# test join's behavior, including natural
for left, right, expected in [
(t1, t2, t1.c.id == t2.c.t1id),
(t1t2, t3, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t1.c.id == t3.c.t1id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t1t2, als, t1t2.c.t2_id == als.c.t3_t2id),
]:
assert expected.compare(left.join(right).onclause)
# these are right-nested joins
j = t1t2.join(t2t3)
assert j.onclause.compare(t2.c.id == t3.c.t2id)
self.assert_compile(
j,
"t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(t2 JOIN t3 ON t2.id = t3.t2id) ON t2.id = t3.t2id",
)
st2t3 = t2t3.select(use_labels=True)
j = t1t2.join(st2t3)
assert j.onclause.compare(t2.c.id == st2t3.c.t3_t2id)
self.assert_compile(
j,
"t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(SELECT t2.id AS t2_id, t2.t1id AS t2_t1id, "
"t3.id AS t3_id, t3.t1id AS t3_t1id, t3.t2id AS t3_t2id "
"FROM t2 JOIN t3 ON t2.id = t3.t2id) ON t2.id = t3_t2id",
)
def test_join_multiple_equiv_fks(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m,
Column("t1id", Integer, ForeignKey("t1.id"), ForeignKey("t1.id")),
)
assert sql_util.join_condition(t1, t2).compare(t1.c.id == t2.c.t1id)
def test_join_cond_no_such_unrelated_table(self):
m = MetaData()
# bounding the "good" column with two "bad" ones is so to
# try to get coverage to get the "continue" statements
# in the loop...
t1 = Table(
"t1",
m,
Column("y", Integer, ForeignKey("t22.id")),
Column("x", Integer, ForeignKey("t2.id")),
Column("q", Integer, ForeignKey("t22.id")),
)
t2 = Table("t2", m, Column("id", Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_column(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, ForeignKey("t2.id")),
Column("y", Integer, ForeignKey("t3.q")),
)
t2 = Table("t2", m, Column("id", Integer))
Table("t3", m, Column("id", Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_related_table(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table("t1", m1, Column("x", Integer, ForeignKey("t2.id")))
t2 = Table("t2", m2, Column("id", Integer))
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition,
t1,
t2,
)
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition,
t2,
t1,
)
def test_join_cond_no_such_related_column(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer, ForeignKey("t2.q")))
t2 = Table("t2", m, Column("id", Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition,
t1,
t2,
)
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition,
t2,
t1,
)
class PrimaryKeyTest(fixtures.TestBase, AssertsExecutionResults):
def test_join_pk_collapse_implicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
foreign key relationships."""
meta = MetaData()
a = Table("a", meta, Column("id", Integer, primary_key=True))
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
c = Table(
"c",
meta,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
)
d = Table(
"d",
meta,
Column("id", Integer, ForeignKey("c.id"), primary_key=True),
)
assert c.c.id.references(b.c.id)
assert not d.c.id.references(a.c.id)
assert list(a.join(b).primary_key) == [a.c.id]
assert list(b.join(c).primary_key) == [b.c.id]
assert list(a.join(b).join(c).primary_key) == [a.c.id]
assert list(b.join(c).join(d).primary_key) == [b.c.id]
assert list(d.join(c).join(b).primary_key) == [b.c.id]
assert list(a.join(b).join(c).join(d).primary_key) == [a.c.id]
def test_join_pk_collapse_explicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
explicit join conditions."""
meta = MetaData()
a = Table(
"a",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("x", Integer),
)
c = Table(
"c",
meta,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
Column("x", Integer),
)
d = Table(
"d",
meta,
Column("id", Integer, ForeignKey("c.id"), primary_key=True),
Column("x", Integer),
)
print(list(a.join(b, a.c.x == b.c.id).primary_key))
assert list(a.join(b, a.c.x == b.c.id).primary_key) == [a.c.id]
assert list(b.join(c, b.c.x == c.c.id).primary_key) == [b.c.id]
assert list(a.join(b).join(c, c.c.id == b.c.x).primary_key) == [a.c.id]
assert list(b.join(c, c.c.x == b.c.id).join(d).primary_key) == [b.c.id]
assert list(b.join(c, c.c.id == b.c.x).join(d).primary_key) == [b.c.id]
assert list(
d.join(b, d.c.id == b.c.id).join(c, b.c.id == c.c.x).primary_key
) == [b.c.id]
assert list(
a.join(b).join(c, c.c.id == b.c.x).join(d).primary_key
) == [a.c.id]
assert list(
a.join(b, and_(a.c.id == b.c.id, a.c.x == b.c.id)).primary_key
) == [a.c.id]
def test_init_doesnt_blowitaway(self):
meta = MetaData()
a = Table(
"a",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("x", Integer),
)
j = a.join(b)
assert list(j.primary_key) == [a.c.id]
j.foreign_keys
assert list(j.primary_key) == [a.c.id]
def test_non_column_clause(self):
meta = MetaData()
a = Table(
"a",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("x", Integer, primary_key=True),
)
j = a.join(b, and_(a.c.id == b.c.id, b.c.x == 5))
assert str(j) == "a JOIN b ON a.id = b.id AND b.x = :x_1", str(j)
assert list(j.primary_key) == [a.c.id, b.c.x]
def test_onclause_direction(self):
metadata = MetaData()
employee = Table(
"Employee",
metadata,
Column("name", String(100)),
Column("id", Integer, primary_key=True),
)
engineer = Table(
"Engineer",
metadata,
Column("id", Integer, ForeignKey("Employee.id"), primary_key=True),
)
eq_(
util.column_set(
employee.join(
engineer, employee.c.id == engineer.c.id
).primary_key
),
util.column_set([employee.c.id]),
)
eq_(
util.column_set(
employee.join(
engineer, engineer.c.id == employee.c.id
).primary_key
),
util.column_set([employee.c.id]),
)
class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
def test_reduce(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("t1id", Integer, primary_key=True),
Column("t1data", String(30)),
)
t2 = Table(
"t2",
meta,
Column("t2id", Integer, ForeignKey("t1.t1id"), primary_key=True),
Column("t2data", String(30)),
)
t3 = Table(
"t3",
meta,
Column("t3id", Integer, ForeignKey("t2.t2id"), primary_key=True),
Column("t3data", String(30)),
)
eq_(
util.column_set(
sql_util.reduce_columns(
[
t1.c.t1id,
t1.c.t1data,
t2.c.t2id,
t2.c.t2data,
t3.c.t3id,
t3.c.t3data,
]
)
),
util.column_set(
[t1.c.t1id, t1.c.t1data, t2.c.t2data, t3.c.t3data]
),
)
def test_reduce_selectable(self):
metadata = MetaData()
engineers = Table(
"engineers",
metadata,
Column("engineer_id", Integer, primary_key=True),
Column("engineer_name", String(50)),
)
managers = Table(
"managers",
metadata,
Column("manager_id", Integer, primary_key=True),
Column("manager_name", String(50)),
)
s = select([engineers, managers]).where(
engineers.c.engineer_name == managers.c.manager_name
)
eq_(
util.column_set(sql_util.reduce_columns(list(s.c), s)),
util.column_set(
[s.c.engineer_id, s.c.engineer_name, s.c.manager_id]
),
)
def test_reduce_generation(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t2 = Table(
"t2",
m,
Column("z", Integer, ForeignKey("t1.x")),
Column("q", Integer),
)
s1 = select([t1, t2])
s2 = s1.reduce_columns(only_synonyms=False)
eq_(set(s2.inner_columns), set([t1.c.x, t1.c.y, t2.c.q]))
s2 = s1.reduce_columns()
eq_(set(s2.inner_columns), set([t1.c.x, t1.c.y, t2.c.z, t2.c.q]))
def test_reduce_only_synonym_fk(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t2 = Table(
"t2",
m,
Column("x", Integer, ForeignKey("t1.x")),
Column("q", Integer, ForeignKey("t1.y")),
)
s1 = select([t1, t2])
s1 = s1.reduce_columns(only_synonyms=True)
eq_(set(s1.c), set([s1.c.x, s1.c.y, s1.c.q]))
def test_reduce_only_synonym_lineage(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
Column("z", Integer),
)
# test that the first appearance in the columns clause
# wins - t1 is first, t1.c.x wins
s1 = select([t1])
s2 = select([t1, s1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().inner_columns),
set([t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]),
)
# reverse order, s1.c.x wins
s1 = select([t1])
s2 = select([s1, t1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().inner_columns),
set([s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]),
)
def test_reduce_aliased_join(self):
metadata = MetaData()
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
Sequence("person_id_seq", optional=True),
primary_key=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
engineers = Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("engineer_name", String(50)),
Column("primary_language", String(50)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("manager_name", String(50)),
)
pjoin = (
people.outerjoin(engineers)
.outerjoin(managers)
.select(use_labels=True)
.alias("pjoin")
)
eq_(
util.column_set(
sql_util.reduce_columns(
[
pjoin.c.people_person_id,
pjoin.c.engineers_person_id,
pjoin.c.managers_person_id,
]
)
),
util.column_set([pjoin.c.people_person_id]),
)
def test_reduce_aliased_union(self):
metadata = MetaData()
item_table = Table(
"item",
metadata,
Column(
"id", Integer, ForeignKey("base_item.id"), primary_key=True
),
Column("dummy", Integer, default=0),
)
base_item_table = Table(
"base_item",
metadata,
Column("id", Integer, primary_key=True),
Column("child_name", String(255), default=None),
)
from sqlalchemy.orm.util import polymorphic_union
item_join = polymorphic_union(
{
"BaseItem": base_item_table.select(
base_item_table.c.child_name == "BaseItem"
),
"Item": base_item_table.join(item_table),
},
None,
"item_join",
)
eq_(
util.column_set(
sql_util.reduce_columns(
[item_join.c.id, item_join.c.dummy, item_join.c.child_name]
)
),
util.column_set(
[item_join.c.id, item_join.c.dummy, item_join.c.child_name]
),
)
def test_reduce_aliased_union_2(self):
metadata = MetaData()
page_table = Table(
"page", metadata, Column("id", Integer, primary_key=True)
)
magazine_page_table = Table(
"magazine_page",
metadata,
Column(
"page_id", Integer, ForeignKey("page.id"), primary_key=True
),
)
classified_page_table = Table(
"classified_page",
metadata,
Column(
"magazine_page_id",
Integer,
ForeignKey("magazine_page.page_id"),
primary_key=True,
),
)
# this is essentially the union formed by the ORM's
# polymorphic_union function. we define two versions with
# different ordering of selects.
#
# the first selectable has the "real" column
# classified_page.magazine_page_id
pjoin = union(
select(
[
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id,
]
).select_from(
page_table.join(magazine_page_table).join(
classified_page_table
)
),
select(
[
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label("magazine_page_id"),
]
).select_from(page_table.join(magazine_page_table)),
).alias("pjoin")
eq_(
util.column_set(
sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id]
)
),
util.column_set([pjoin.c.id]),
)
# the first selectable has a CAST, which is a placeholder for
# classified_page.magazine_page_id in the second selectable.
# reduce_columns needs to take into account all foreign keys
# derived from pjoin.c.magazine_page_id. the UNION construct
# currently makes the external column look like that of the
# first selectable only.
pjoin = union(
select(
[
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label("magazine_page_id"),
]
).select_from(page_table.join(magazine_page_table)),
select(
[
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id,
]
).select_from(
page_table.join(magazine_page_table).join(
classified_page_table
)
),
).alias("pjoin")
eq_(
util.column_set(
sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id]
)
),
util.column_set([pjoin.c.id]),
)
class DerivedTest(fixtures.TestBase, AssertsExecutionResults):
def test_table(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.is_derived_from(t1)
assert not t2.is_derived_from(t1)
def test_alias(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.alias().is_derived_from(t1)
assert not t2.alias().is_derived_from(t1)
assert not t1.is_derived_from(t1.alias())
assert not t1.is_derived_from(t2.alias())
def test_select(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.select().is_derived_from(t1)
assert not t2.select().is_derived_from(t1)
assert select([t1, t2]).is_derived_from(t1)
assert t1.select().alias("foo").is_derived_from(t1)
assert select([t1, t2]).alias("foo").is_derived_from(t1)
assert not t2.select().alias("foo").is_derived_from(t1)
class AnnotationsTest(fixtures.TestBase):
def test_hashing(self):
t = table("t", column("x"))
a = t.alias()
s = t.select()
s2 = a.select()
for obj in [t, t.c.x, a, s, s2, t.c.x > 1, (t.c.x > 1).label(None)]:
annot = obj._annotate({})
eq_(set([obj]), set([annot]))
def test_compare(self):
t = table("t", column("x"), column("y"))
x_a = t.c.x._annotate({})
assert t.c.x.compare(x_a)
assert x_a.compare(t.c.x)
assert not x_a.compare(t.c.y)
assert not t.c.y.compare(x_a)
assert (t.c.x == 5).compare(x_a == 5)
assert not (t.c.y == 5).compare(x_a == 5)
s = select([t])
x_p = s.c.x
assert not x_a.compare(x_p)
assert not t.c.x.compare(x_p)
x_p_a = x_p._annotate({})
assert x_p_a.compare(x_p)
assert x_p.compare(x_p_a)
assert not x_p_a.compare(x_a)
def test_late_name_add(self):
from sqlalchemy.schema import Column
c1 = Column(Integer)
c1_a = c1._annotate({"foo": "bar"})
c1.name = "somename"
eq_(c1_a.name, "somename")
def test_late_table_add(self):
c1 = Column("foo", Integer)
c1_a = c1._annotate({"foo": "bar"})
t = Table("t", MetaData(), c1)
is_(c1_a.table, t)
def test_basic_attrs(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, info={"q": "p"}),
Column("y", Integer, key="q"),
)
x_a = t.c.x._annotate({})
y_a = t.c.q._annotate({})
t.c.x.info["z"] = "h"
eq_(y_a.key, "q")
is_(x_a.table, t)
eq_(x_a.info, {"q": "p", "z": "h"})
eq_(t.c.x.anon_label, x_a.anon_label)
def test_custom_constructions(self):
from sqlalchemy.schema import Column
class MyColumn(Column):
def __init__(self):
Column.__init__(self, "foo", Integer)
_constructor = Column
t1 = Table("t1", MetaData(), MyColumn())
s1 = t1.select()
assert isinstance(t1.c.foo, MyColumn)
assert isinstance(s1.c.foo, Column)
annot_1 = t1.c.foo._annotate({})
s2 = select([annot_1])
assert isinstance(s2.c.foo, Column)
annot_2 = s1._annotate({})
assert isinstance(annot_2.c.foo, Column)
def test_custom_construction_correct_anno_subclass(self):
# [ticket:2918]
from sqlalchemy.schema import Column
from sqlalchemy.sql.elements import AnnotatedColumnElement
class MyColumn(Column):
pass
assert isinstance(
MyColumn("x", Integer)._annotate({"foo": "bar"}),
AnnotatedColumnElement,
)
def test_custom_construction_correct_anno_expr(self):
# [ticket:2918]
from sqlalchemy.schema import Column
class MyColumn(Column):
pass
col = MyColumn("x", Integer)
binary_1 = col == 5
col_anno = MyColumn("x", Integer)._annotate({"foo": "bar"})
binary_2 = col_anno == 5
eq_(binary_2.left._annotations, {"foo": "bar"})
def test_annotated_corresponding_column(self):
table1 = table("table1", column("col1"))
s1 = select([table1.c.col1])
t1 = s1._annotate({})
t2 = s1
# t1 needs to share the same _make_proxy() columns as t2, even
# though it's annotated. otherwise paths will diverge once they
# are corresponded against "inner" below.
assert t1.c is t2.c
assert t1.c.col1 is t2.c.col1
inner = select([s1])
assert (
inner.corresponding_column(t2.c.col1, require_embedded=False)
is inner.corresponding_column(t2.c.col1, require_embedded=True)
is inner.c.col1
)
assert (
inner.corresponding_column(t1.c.col1, require_embedded=False)
is inner.corresponding_column(t1.c.col1, require_embedded=True)
is inner.c.col1
)
def test_annotated_visit(self):
table1 = table("table1", column("col1"), column("col2"))
bin_ = table1.c.col1 == bindparam("foo", value=None)
assert str(bin_) == "table1.col1 = :foo"
def visit_binary(b):
b.right = table1.c.col2
b2 = visitors.cloned_traverse(bin_, {}, {"binary": visit_binary})
assert str(b2) == "table1.col1 = table1.col2"
b3 = visitors.cloned_traverse(
bin_._annotate({}), {}, {"binary": visit_binary}
)
assert str(b3) == "table1.col1 = table1.col2"
def visit_binary(b):
b.left = bindparam("bar")
b4 = visitors.cloned_traverse(b2, {}, {"binary": visit_binary})
assert str(b4) == ":bar = table1.col2"
b5 = visitors.cloned_traverse(b3, {}, {"binary": visit_binary})
assert str(b5) == ":bar = table1.col2"
def test_label_accessors(self):
t1 = table("t1", column("c1"))
l1 = t1.c.c1.label(None)
is_(l1._order_by_label_element, l1)
l1a = l1._annotate({"foo": "bar"})
is_(l1a._order_by_label_element, l1a)
def test_annotate_aliased(self):
t1 = table("t1", column("c1"))
s = select([(t1.c.c1 + 3).label("bat")])
a = s.alias()
a = sql_util._deep_annotate(a, {"foo": "bar"})
eq_(a._annotations["foo"], "bar")
eq_(a.element._annotations["foo"], "bar")
def test_annotate_expressions(self):
table1 = table("table1", column("col1"), column("col2"))
for expr, expected in [
(table1.c.col1, "table1.col1"),
(table1.c.col1 == 5, "table1.col1 = :col1_1"),
(
table1.c.col1.in_([2, 3, 4]),
"table1.col1 IN (:col1_1, :col1_2, " ":col1_3)",
),
]:
eq_(str(expr), expected)
eq_(str(expr._annotate({})), expected)
eq_(str(sql_util._deep_annotate(expr, {})), expected)
eq_(
str(
sql_util._deep_annotate(expr, {}, exclude=[table1.c.col1])
),
expected,
)
def test_deannotate(self):
table1 = table("table1", column("col1"), column("col2"))
bin_ = table1.c.col1 == bindparam("foo", value=None)
b2 = sql_util._deep_annotate(bin_, {"_orm_adapt": True})
b3 = sql_util._deep_deannotate(b2)
b4 = sql_util._deep_deannotate(bin_)
for elem in (b2._annotations, b2.left._annotations):
assert "_orm_adapt" in elem
for elem in (
b3._annotations,
b3.left._annotations,
b4._annotations,
b4.left._annotations,
):
assert elem == {}
assert b2.left is not bin_.left
assert b3.left is not b2.left and b2.left is not bin_.left
assert b4.left is bin_.left # since column is immutable
# deannotate copies the element
assert (
bin_.right is not b2.right
and b2.right is not b3.right
and b3.right is not b4.right
)
def test_annotate_unique_traversal(self):
"""test that items are copied only once during
annotate, deannotate traversal
#2453 - however note this was modified by
#1401, and it's likely that re49563072578
is helping us with the str() comparison
case now, as deannotate is making
clones again in some cases.
"""
table1 = table("table1", column("x"))
table2 = table("table2", column("y"))
a1 = table1.alias()
s = select([a1.c.x]).select_from(a1.join(table2, a1.c.x == table2.c.y))
for sel in (
sql_util._deep_deannotate(s),
visitors.cloned_traverse(s, {}, {}),
visitors.replacement_traverse(s, {}, lambda x: None),
):
# the columns clause isn't changed at all
assert sel._raw_columns[0].table is a1
assert sel._froms[0] is sel._froms[1].left
eq_(str(s), str(sel))
# when we are modifying annotations sets only
# partially, each element is copied unconditionally
# when encountered.
for sel in (
sql_util._deep_deannotate(s, {"foo": "bar"}),
sql_util._deep_annotate(s, {"foo": "bar"}),
):
assert sel._froms[0] is not sel._froms[1].left
# but things still work out due to
# re49563072578
eq_(str(s), str(sel))
def test_annotate_varied_annot_same_col(self):
"""test two instances of the same column with different annotations
preserving them when deep_annotate is run on them.
"""
t1 = table("table1", column("col1"), column("col2"))
s = select([t1.c.col1._annotate({"foo": "bar"})])
s2 = select([t1.c.col1._annotate({"bat": "hoho"})])
s3 = s.union(s2)
sel = sql_util._deep_annotate(s3, {"new": "thing"})
eq_(
sel.selects[0]._raw_columns[0]._annotations,
{"foo": "bar", "new": "thing"},
)
eq_(
sel.selects[1]._raw_columns[0]._annotations,
{"bat": "hoho", "new": "thing"},
)
def test_deannotate_2(self):
table1 = table("table1", column("col1"), column("col2"))
j = table1.c.col1._annotate(
{"remote": True}
) == table1.c.col2._annotate({"local": True})
j2 = sql_util._deep_deannotate(j)
eq_(j.left._annotations, {"remote": True})
eq_(j2.left._annotations, {})
def test_deannotate_3(self):
table1 = table(
"table1",
column("col1"),
column("col2"),
column("col3"),
column("col4"),
)
j = and_(
table1.c.col1._annotate({"remote": True})
== table1.c.col2._annotate({"local": True}),
table1.c.col3._annotate({"remote": True})
== table1.c.col4._annotate({"local": True}),
)
j2 = sql_util._deep_deannotate(j)
eq_(j.clauses[0].left._annotations, {"remote": True})
eq_(j2.clauses[0].left._annotations, {})
def test_annotate_fromlist_preservation(self):
"""test the FROM list in select still works
even when multiple annotate runs have created
copies of the same selectable
#2453, continued
"""
table1 = table("table1", column("x"))
table2 = table("table2", column("y"))
a1 = table1.alias()
s = select([a1.c.x]).select_from(a1.join(table2, a1.c.x == table2.c.y))
assert_s = select([select([s])])
for fn in (
sql_util._deep_deannotate,
lambda s: sql_util._deep_annotate(s, {"foo": "bar"}),
lambda s: visitors.cloned_traverse(s, {}, {}),
lambda s: visitors.replacement_traverse(s, {}, lambda x: None),
):
sel = fn(select([fn(select([fn(s)]))]))
eq_(str(assert_s), str(sel))
def test_bind_unique_test(self):
table("t", column("a"), column("b"))
b = bindparam("bind", value="x", unique=True)
# the annotation of "b" should render the
# same. The "unique" test in compiler should
# also pass, [ticket:2425]
eq_(str(or_(b, b._annotate({"foo": "bar"}))), ":bind_1 OR :bind_1")
def test_comparators_cleaned_out_construction(self):
c = column("a")
comp1 = c.comparator
c1 = c._annotate({"foo": "bar"})
comp2 = c1.comparator
assert comp1 is not comp2
def test_comparators_cleaned_out_reannotate(self):
c = column("a")
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert comp1 is not comp2
def test_comparator_cleanout_integration(self):
c = column("a")
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert (c2 == 5).left._annotations == {"foo": "bar", "bat": "hoho"}
class ReprTest(fixtures.TestBase):
def test_ensure_repr_elements(self):
for obj in [
elements.Cast(1, 2),
elements.TypeClause(String()),
elements.ColumnClause("x"),
elements.BindParameter("q"),
elements.Null(),
elements.True_(),
elements.False_(),
elements.ClauseList(),
elements.BooleanClauseList.and_(),
elements.Tuple(),
elements.Case([]),
elements.Extract("foo", column("x")),
elements.UnaryExpression(column("x")),
elements.Grouping(column("x")),
elements.Over(func.foo()),
elements.Label("q", column("x")),
]:
repr(obj)
class WithLabelsTest(fixtures.TestBase):
def _assert_labels_warning(self, s):
assert_raises_message(
exc.SAWarning,
r"replaced by Column.*, which has the same key",
lambda: s.c,
)
def _assert_result_keys(self, s, keys):
compiled = s.compile()
eq_(set(compiled._create_result_map()), set(keys))
def _assert_subq_result_keys(self, s, keys):
compiled = s.select().compile()
eq_(set(compiled._create_result_map()), set(keys))
def _names_overlap(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer))
t2 = Table("t2", m, Column("x", Integer))
return select([t1, t2])
def test_names_overlap_nolabel(self):
sel = self._names_overlap()
self._assert_labels_warning(sel)
self._assert_result_keys(sel, ["x"])
def test_names_overlap_label(self):
sel = self._names_overlap().apply_labels()
eq_(list(sel.c.keys()), ["t1_x", "t2_x"])
self._assert_result_keys(sel, ["t1_x", "t2_x"])
def _names_overlap_keys_dont(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer, key="a"))
t2 = Table("t2", m, Column("x", Integer, key="b"))
return select([t1, t2])
def test_names_overlap_keys_dont_nolabel(self):
sel = self._names_overlap_keys_dont()
eq_(list(sel.c.keys()), ["a", "b"])
self._assert_result_keys(sel, ["x"])
def test_names_overlap_keys_dont_label(self):
sel = self._names_overlap_keys_dont().apply_labels()
eq_(list(sel.c.keys()), ["t1_a", "t2_b"])
self._assert_result_keys(sel, ["t1_x", "t2_x"])
def _labels_overlap(self):
m = MetaData()
t1 = Table("t", m, Column("x_id", Integer))
t2 = Table("t_x", m, Column("id", Integer))
return select([t1, t2])
def test_labels_overlap_nolabel(self):
sel = self._labels_overlap()
eq_(list(sel.c.keys()), ["x_id", "id"])
self._assert_result_keys(sel, ["x_id", "id"])
def test_labels_overlap_label(self):
sel = self._labels_overlap().apply_labels()
t2 = sel.froms[1]
eq_(list(sel.c.keys()), ["t_x_id", t2.c.id.anon_label])
self._assert_result_keys(sel, ["t_x_id", "id_1"])
self._assert_subq_result_keys(sel, ["t_x_id", "id_1"])
def _labels_overlap_keylabels_dont(self):
m = MetaData()
t1 = Table("t", m, Column("x_id", Integer, key="a"))
t2 = Table("t_x", m, Column("id", Integer, key="b"))
return select([t1, t2])
def test_labels_overlap_keylabels_dont_nolabel(self):
sel = self._labels_overlap_keylabels_dont()
eq_(list(sel.c.keys()), ["a", "b"])
self._assert_result_keys(sel, ["x_id", "id"])
def test_labels_overlap_keylabels_dont_label(self):
sel = self._labels_overlap_keylabels_dont().apply_labels()
eq_(list(sel.c.keys()), ["t_a", "t_x_b"])
self._assert_result_keys(sel, ["t_x_id", "id_1"])
def _keylabels_overlap_labels_dont(self):
m = MetaData()
t1 = Table("t", m, Column("a", Integer, key="x_id"))
t2 = Table("t_x", m, Column("b", Integer, key="id"))
return select([t1, t2])
def test_keylabels_overlap_labels_dont_nolabel(self):
sel = self._keylabels_overlap_labels_dont()
eq_(list(sel.c.keys()), ["x_id", "id"])
self._assert_result_keys(sel, ["a", "b"])
def test_keylabels_overlap_labels_dont_label(self):
sel = self._keylabels_overlap_labels_dont().apply_labels()
t2 = sel.froms[1]
eq_(list(sel.c.keys()), ["t_x_id", t2.c.id.anon_label])
self._assert_result_keys(sel, ["t_a", "t_x_b"])
self._assert_subq_result_keys(sel, ["t_a", "t_x_b"])
def _keylabels_overlap_labels_overlap(self):
m = MetaData()
t1 = Table("t", m, Column("x_id", Integer, key="x_a"))
t2 = Table("t_x", m, Column("id", Integer, key="a"))
return select([t1, t2])
def test_keylabels_overlap_labels_overlap_nolabel(self):
sel = self._keylabels_overlap_labels_overlap()
eq_(list(sel.c.keys()), ["x_a", "a"])
self._assert_result_keys(sel, ["x_id", "id"])
self._assert_subq_result_keys(sel, ["x_id", "id"])
def test_keylabels_overlap_labels_overlap_label(self):
sel = self._keylabels_overlap_labels_overlap().apply_labels()
t2 = sel.froms[1]
eq_(list(sel.c.keys()), ["t_x_a", t2.c.a.anon_label])
self._assert_result_keys(sel, ["t_x_id", "id_1"])
self._assert_subq_result_keys(sel, ["t_x_id", "id_1"])
def _keys_overlap_names_dont(self):
m = MetaData()
t1 = Table("t1", m, Column("a", Integer, key="x"))
t2 = Table("t2", m, Column("b", Integer, key="x"))
return select([t1, t2])
def test_keys_overlap_names_dont_nolabel(self):
sel = self._keys_overlap_names_dont()
self._assert_labels_warning(sel)
self._assert_result_keys(sel, ["a", "b"])
def test_keys_overlap_names_dont_label(self):
sel = self._keys_overlap_names_dont().apply_labels()
eq_(list(sel.c.keys()), ["t1_x", "t2_x"])
self._assert_result_keys(sel, ["t1_a", "t2_b"])
class ResultMapTest(fixtures.TestBase):
def _fixture(self):
m = MetaData()
t = Table("t", m, Column("x", Integer), Column("y", Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return dict(
(elem, key)
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
)
def test_select_label_alt_name(self):
t = self._fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = select([l1, l2])
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = select([l1, l2]).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
s = select([x, y]).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column_apply_labels(self):
t = self._fixture()
x, y = t.c.x, t.c.y
s = select([x, y]).apply_labels().alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
s = select([ta.c.x, ta.c.y])
mapping = self._mapping(s)
assert x not in mapping
def test_select_label_alt_name_table_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
l1, l2 = ta.c.x.label("a"), ta.c.y.label("b")
s = select([l1, l2])
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_column_subquery_exists(self):
t = self._fixture()
s = exists().where(t.c.x == 5).select()
mapping = self._mapping(s)
assert t.c.x not in mapping
eq_(
[type(entry[-1]) for entry in s.compile()._result_columns],
[Boolean],
)
def test_plain_exists(self):
expr = exists([1])
eq_(type(expr.type), Boolean)
eq_(
[
type(entry[-1])
for entry in select([expr]).compile()._result_columns
],
[Boolean],
)
def test_plain_exists_negate(self):
expr = ~exists([1])
eq_(type(expr.type), Boolean)
eq_(
[
type(entry[-1])
for entry in select([expr]).compile()._result_columns
],
[Boolean],
)
def test_plain_exists_double_negate(self):
expr = ~(~exists([1]))
eq_(type(expr.type), Boolean)
eq_(
[
type(entry[-1])
for entry in select([expr]).compile()._result_columns
],
[Boolean],
)
def test_column_subquery_plain(self):
t = self._fixture()
s1 = select([t.c.x]).where(t.c.x > 5).as_scalar()
s2 = select([s1])
mapping = self._mapping(s2)
assert t.c.x not in mapping
assert s1 in mapping
eq_(
[type(entry[-1]) for entry in s2.compile()._result_columns],
[Integer],
)
def test_unary_boolean(self):
s1 = select([not_(True)], use_labels=True)
eq_(
[type(entry[-1]) for entry in s1.compile()._result_columns],
[Boolean],
)
class ForUpdateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic_clone(self):
t = table("t", column("c"))
s = select([t]).with_for_update(read=True, of=t.c.c)
s2 = visitors.ReplacingCloningVisitor().traverse(s)
assert s2._for_update_arg is not s._for_update_arg
eq_(s2._for_update_arg.read, True)
eq_(s2._for_update_arg.of, [t.c.c])
self.assert_compile(
s2, "SELECT t.c FROM t FOR SHARE OF t", dialect="postgresql"
)
def test_adapt(self):
t = table("t", column("c"))
s = select([t]).with_for_update(read=True, of=t.c.c)
a = t.alias()
s2 = sql_util.ClauseAdapter(a).traverse(s)
eq_(s2._for_update_arg.of, [a.c.c])
self.assert_compile(
s2,
"SELECT t_1.c FROM t AS t_1 FOR SHARE OF t_1",
dialect="postgresql",
)
|
the-stack_106_15055
|
import os
import subprocess
import time
import pytest
import parse
import logging
from cassandra.util import sortedset
from ccmlib import common
from dtest import Tester
from tools.data import rows_to_list
since = pytest.mark.since
logger = logging.getLogger(__name__)
@since('2.2', max_version='3.0.0')
class TestTokenGenerator(Tester):
"""
Basic tools/bin/token-generator test.
Token-generator was removed in CASSANDRA-5261
@jira_ticket CASSANDRA-5261
@jira_ticket CASSANDRA-9300
"""
def call_token_generator(self, install_dir, randomPart, nodes):
executable = os.path.join(install_dir, 'tools', 'bin', 'token-generator')
if common.is_win():
executable += ".bat"
args = [executable]
if randomPart is not None:
if randomPart:
args.append("--random")
else:
args.append("--murmur3")
for n in nodes:
args.append(str(n))
logger.debug('Invoking {}'.format(args))
token_gen_output = subprocess.check_output(args).decode()
lines = token_gen_output.split("\n")
dc_tokens = None
generated_tokens = []
for line in lines:
if line.startswith("DC #"):
if dc_tokens is not None:
assert dc_tokens.__len__(), 0 > "dc_tokens is empty from token-generator {}".format(args)
generated_tokens.append(dc_tokens)
dc_tokens = []
else:
if line:
m = parse.search('Node #{node_num:d}:{:s}{node_token:d}', line)
assert m, "Line \"{}\" does not match pattern from token-generator {}".format(line is not None, args)
node_num = int(m.named['node_num'])
node_token = int(m.named['node_token'])
dc_tokens.append(node_token)
assert node_num, dc_tokens.__len__() == "invalid token count from token-generator {}".format(args)
assert dc_tokens is not None, "No tokens from token-generator {}".format(args)
assert dc_tokens.__len__(), 0 > "No tokens from token-generator {}".format(args)
generated_tokens.append(dc_tokens)
return generated_tokens
def prepare(self, randomPart=None, nodes=1):
cluster = self.cluster
install_dir = cluster.get_install_dir()
generated_tokens = self.call_token_generator(install_dir, randomPart, [nodes])
if not randomPart:
cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner")
else:
if randomPart:
cluster.set_partitioner("org.apache.cassandra.dht.RandomPartitioner")
else:
cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner")
# remove these from cluster options - otherwise node's config would be overridden with cluster._config_options_
cluster._config_options.__delitem__('num_tokens')
if self.dtest_config.use_vnodes:
cluster._config_options.__delitem__('initial_token')
assert not cluster.nodelist(), "nodelist() already initialized"
cluster.populate(nodes, use_vnodes=False, tokens=generated_tokens[0]).start()
time.sleep(0.2)
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
return generated_tokens, session
def _token_gen_test(self, nodes, randomPart=None):
generated_tokens, session = self.prepare(randomPart, nodes=nodes)
dc_tokens = generated_tokens[0]
tokens = []
local_tokens = rows_to_list(session.execute("SELECT tokens FROM system.local"))[0]
assert local_tokens.__len__(), 1 == "too many tokens for peer"
for tok in local_tokens:
tokens += tok
rows = rows_to_list(session.execute("SELECT tokens FROM system.peers"))
assert rows.__len__() == nodes - 1
for row in rows:
peer_tokens = row[0]
assert peer_tokens.__len__(), 1 == "too many tokens for peer"
for tok in peer_tokens:
tokens.append(tok)
assert tokens.__len__() == dc_tokens.__len__()
for cluster_token in tokens:
tok = int(cluster_token)
assert dc_tokens.index(tok), 0 >= "token in cluster does not match generated tokens"
def token_gen_def_test(self, nodes=3):
""" Validate token-generator with Murmur3Partitioner with default token-generator behavior """
self._token_gen_test(nodes)
def token_gen_murmur3_test(self, nodes=3):
""" Validate token-generator with Murmur3Partitioner with explicit murmur3 """
self._token_gen_test(nodes, False)
def token_gen_random_test(self, nodes=3):
""" Validate token-generator with Murmur3Partitioner with explicit random """
self._token_gen_test(nodes, True)
dc_nodes_combinations = [
[3, 5],
[3, 5, 5],
[12, 5, 7],
[50, 100, 250],
[100, 100, 100],
[250, 250, 250],
[1000, 1000, 1000],
[2500, 2500, 2500, 2500]
]
def _multi_dc_tokens(self, random=None):
t_min = 0
t_max = 1 << 127
if random is None or not random:
t_min = -1 << 63
t_max = 1 << 63
for dc_nodes in self.dc_nodes_combinations:
all_tokens = sortedset()
node_count = 0
generated_tokens = self.call_token_generator(self.cluster.get_install_dir(), random, dc_nodes)
assert dc_nodes.__len__() == generated_tokens.__len__()
for n in range(0, dc_nodes.__len__()):
nodes = dc_nodes[n]
node_count += nodes
tokens = generated_tokens[n]
assert nodes == tokens.__len__()
for tok in tokens:
assert t_min <= tok < t_max, "Generated token %r out of Murmur3Partitioner range %r..%r" % (tok, t_min, t_max - 1)
assert not all_tokens.__contains__(tok), "Duplicate token %r for nodes-counts %r" % (tok, dc_nodes)
all_tokens.add(tok)
assert all_tokens.__len__() == node_count, "Number of tokens %r and number of nodes %r does not match for %r" % (all_tokens.__len__(), node_count, dc_nodes)
def test_multi_dc_tokens_default(self):
self._multi_dc_tokens()
def test_multi_dc_tokens_murmur3(self):
self._multi_dc_tokens(False)
def test_multi_dc_tokens_random(self):
self._multi_dc_tokens(True)
|
the-stack_106_15058
|
#!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import models
def parse_args(args):
parser = argparse.ArgumentParser(description='Script for converting a training model to an inference model.')
parser.add_argument('model_in', help='The model to convert.')
parser.add_argument('model_out', help='Path to save the converted model to.')
parser.add_argument('--backbone', help='The backbone of the model to convert.', default='resnet50')
parser.add_argument('--no-nms', help='Disables non maximum suppression.', dest='nms', action='store_false')
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# load and convert model
model = models.load_model(args.model_in, convert=True, backbone=args.backbone, nms=args.nms)
# save model
model.save(args.model_out)
if __name__ == '__main__':
main()
|
the-stack_106_15060
|
import importlib
import logging
import os
import platform
import re
import socket
import sys
import warnings
from urllib.parse import urlsplit
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.validators import URLValidator
from netbox.config import PARAMS
# Monkey patch to fix Django 4.0 support for graphene-django (see
# https://github.com/graphql-python/graphene-django/issues/1284)
# TODO: Remove this when graphene-django 2.16 becomes available
import django
from django.utils.encoding import force_str
django.utils.encoding.force_text = force_str
#
# Environment setup
#
VERSION = '3.2.1-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Validate Python version
if sys.version_info < (3, 8):
raise RuntimeError(
f"NetBox requires Python 3.8 or later. (Currently installed: Python {platform.python_version()})"
)
#
# Configuration import
#
# Import configuration parameters
config_path = os.getenv('NETBOX_CONFIGURATION', 'netbox.configuration')
try:
configuration = importlib.import_module(config_path)
except ModuleNotFoundError as e:
if getattr(e, 'name') == config_path:
raise ImproperlyConfigured(
f"Specified configuration module ({config_path}) not found. Please define netbox/netbox/configuration.py "
f"per the documentation, or specify an alternate module in the NETBOX_CONFIGURATION environment variable."
)
raise
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(f"Required parameter {parameter} is missing from configuration.")
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
REDIS = getattr(configuration, 'REDIS')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set static config parameters
ADMINS = getattr(configuration, 'ADMINS', [])
AUTH_PASSWORD_VALIDATORS = getattr(configuration, 'AUTH_PASSWORD_VALIDATORS', [])
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
CORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])
CORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])
CSRF_TRUSTED_ORIGINS = getattr(configuration, 'CSRF_TRUSTED_ORIGINS', [])
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
DEVELOPER = getattr(configuration, 'DEVELOPER', False)
DOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))
EMAIL = getattr(configuration, 'EMAIL', {})
EXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])
FIELD_CHOICES = getattr(configuration, 'FIELD_CHOICES', {})
HTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)
INTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))
LOGGING = getattr(configuration, 'LOGGING', {})
LOGIN_PERSISTENCE = getattr(configuration, 'LOGIN_PERSISTENCE', False)
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
LOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)
MEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')
METRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)
PLUGINS = getattr(configuration, 'PLUGINS', [])
PLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})
RELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)
REMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)
REMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')
REMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])
REMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})
REMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)
REMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')
REMOTE_AUTH_GROUP_HEADER = getattr(configuration, 'REMOTE_AUTH_GROUP_HEADER', 'HTTP_REMOTE_USER_GROUP')
REMOTE_AUTH_GROUP_SYNC_ENABLED = getattr(configuration, 'REMOTE_AUTH_GROUP_SYNC_ENABLED', False)
REMOTE_AUTH_SUPERUSER_GROUPS = getattr(configuration, 'REMOTE_AUTH_SUPERUSER_GROUPS', [])
REMOTE_AUTH_SUPERUSERS = getattr(configuration, 'REMOTE_AUTH_SUPERUSERS', [])
REMOTE_AUTH_STAFF_GROUPS = getattr(configuration, 'REMOTE_AUTH_STAFF_GROUPS', [])
REMOTE_AUTH_STAFF_USERS = getattr(configuration, 'REMOTE_AUTH_STAFF_USERS', [])
REMOTE_AUTH_GROUP_SEPARATOR = getattr(configuration, 'REMOTE_AUTH_GROUP_SEPARATOR', '|')
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
RQ_DEFAULT_TIMEOUT = getattr(configuration, 'RQ_DEFAULT_TIMEOUT', 300)
SCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')
SESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)
SESSION_COOKIE_NAME = getattr(configuration, 'SESSION_COOKIE_NAME', 'sessionid')
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
STORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)
STORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
# Check for hard-coded dynamic config parameters
for param in PARAMS:
if hasattr(configuration, param.name):
globals()[param.name] = getattr(configuration, param.name)
# Validate update repo URL and timeout
if RELEASE_CHECK_URL:
validator = URLValidator(
message=(
"RELEASE_CHECK_URL must be a valid API URL. Example: "
"https://api.github.com/repos/netbox-community/netbox"
)
)
try:
validator(RELEASE_CHECK_URL)
except ValidationError as err:
raise ImproperlyConfigured(str(err))
#
# Database
#
# Only PostgreSQL is supported
if METRICS_ENABLED:
DATABASE.update({
'ENGINE': 'django_prometheus.db.backends.postgresql'
})
else:
DATABASE.update({
'ENGINE': 'django.db.backends.postgresql'
})
DATABASES = {
'default': DATABASE,
}
#
# Media storage
#
if STORAGE_BACKEND is not None:
DEFAULT_FILE_STORAGE = STORAGE_BACKEND
# django-storages
if STORAGE_BACKEND.startswith('storages.'):
try:
import storages.utils
except ModuleNotFoundError as e:
if getattr(e, 'name') == 'storages':
raise ImproperlyConfigured(
f"STORAGE_BACKEND is set to {STORAGE_BACKEND} but django-storages is not present. It can be "
f"installed by running 'pip install django-storages'."
)
raise e
# Monkey-patch django-storages to fetch settings from STORAGE_CONFIG
def _setting(name, default=None):
if name in STORAGE_CONFIG:
return STORAGE_CONFIG[name]
return globals().get(name, default)
storages.utils.setting = _setting
if STORAGE_CONFIG and STORAGE_BACKEND is None:
warnings.warn(
"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be "
"ignored."
)
#
# Redis
#
# Background task queuing
if 'tasks' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing the 'tasks' subsection."
)
TASKS_REDIS = REDIS['tasks']
TASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')
TASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)
TASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])
TASKS_REDIS_USING_SENTINEL = all([
isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),
len(TASKS_REDIS_SENTINELS) > 0
])
TASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')
TASKS_REDIS_SENTINEL_TIMEOUT = TASKS_REDIS.get('SENTINEL_TIMEOUT', 10)
TASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')
TASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)
TASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)
TASKS_REDIS_SKIP_TLS_VERIFY = TASKS_REDIS.get('INSECURE_SKIP_TLS_VERIFY', False)
# Caching
if 'caching' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing caching subsection."
)
CACHING_REDIS_HOST = REDIS['caching'].get('HOST', 'localhost')
CACHING_REDIS_PORT = REDIS['caching'].get('PORT', 6379)
CACHING_REDIS_DATABASE = REDIS['caching'].get('DATABASE', 0)
CACHING_REDIS_PASSWORD = REDIS['caching'].get('PASSWORD', '')
CACHING_REDIS_SENTINELS = REDIS['caching'].get('SENTINELS', [])
CACHING_REDIS_SENTINEL_SERVICE = REDIS['caching'].get('SENTINEL_SERVICE', 'default')
CACHING_REDIS_PROTO = 'rediss' if REDIS['caching'].get('SSL', False) else 'redis'
CACHING_REDIS_SKIP_TLS_VERIFY = REDIS['caching'].get('INSECURE_SKIP_TLS_VERIFY', False)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_HOST}:{CACHING_REDIS_PORT}/{CACHING_REDIS_DATABASE}',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': CACHING_REDIS_PASSWORD,
}
}
}
if CACHING_REDIS_SENTINELS:
DJANGO_REDIS_CONNECTION_FACTORY = 'django_redis.pool.SentinelConnectionFactory'
CACHES['default']['LOCATION'] = f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_SENTINEL_SERVICE}/{CACHING_REDIS_DATABASE}'
CACHES['default']['OPTIONS']['CLIENT_CLASS'] = 'django_redis.client.SentinelClient'
CACHES['default']['OPTIONS']['SENTINELS'] = CACHING_REDIS_SENTINELS
if CACHING_REDIS_SKIP_TLS_VERIFY:
CACHES['default']['OPTIONS'].setdefault('CONNECTION_POOL_KWARGS', {})
CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_cert_reqs'] = False
#
# Sessions
#
if LOGIN_TIMEOUT is not None:
# Django default is 1209600 seconds (14 days)
SESSION_COOKIE_AGE = LOGIN_TIMEOUT
SESSION_SAVE_EVERY_REQUEST = bool(LOGIN_PERSISTENCE)
if SESSION_FILE_PATH is not None:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#
# Email
#
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')
EMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')
EMAIL_SUBJECT_PREFIX = '[NetBox] '
EMAIL_USE_SSL = EMAIL.get('USE_SSL', False)
EMAIL_USE_TLS = EMAIL.get('USE_TLS', False)
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'corsheaders',
'debug_toolbar',
'graphiql_debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'graphene_django',
'mptt',
'rest_framework',
'social_django',
'taggit',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'tenancy',
'users',
'utilities',
'virtualization',
'wireless',
'django_rq', # Must come after extras to allow overriding management commands
'drf_yasg',
]
# Middleware
MIDDLEWARE = [
'graphiql_debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'netbox.middleware.ExceptionHandlingMiddleware',
'netbox.middleware.RemoteUserMiddleware',
'netbox.middleware.LoginRequiredMiddleware',
'netbox.middleware.DynamicConfigMiddleware',
'netbox.middleware.APIVersionMiddleware',
'netbox.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'netbox.urls'
TEMPLATES_DIR = BASE_DIR + '/templates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'builtins': [
'utilities.templatetags.builtins.filters',
'utilities.templatetags.builtins.tags',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'netbox.context_processors.settings_and_registry',
],
},
},
]
# Set up authentication backends
AUTHENTICATION_BACKENDS = [
REMOTE_AUTH_BACKEND,
'netbox.authentication.ObjectPermissionBackend',
]
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = False
USE_TZ = True
USE_DEPRECATED_PYTZ = True
# WSGI
WSGI_APPLICATION = 'netbox.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static'
STATIC_URL = f'/{BASE_PATH}static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'project-static', 'dist'),
os.path.join(BASE_DIR, 'project-static', 'img'),
('docs', os.path.join(BASE_DIR, 'project-static', 'docs')), # Prefix with /docs
)
# Media
MEDIA_URL = '/{}media/'.format(BASE_PATH)
# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Authentication URLs
LOGIN_URL = f'/{BASE_PATH}login/'
LOGIN_REDIRECT_URL = f'/{BASE_PATH}'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted
# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.
EXEMPT_EXCLUDE_MODELS = (
('auth', 'group'),
('auth', 'user'),
('users', 'objectpermission'),
)
# All URLs starting with a string listed here are exempt from login enforcement
EXEMPT_PATHS = (
f'/{BASE_PATH}api/',
f'/{BASE_PATH}graphql/',
f'/{BASE_PATH}login/',
f'/{BASE_PATH}oauth/',
f'/{BASE_PATH}metrics',
)
#
# Django social auth
#
# Load all SOCIAL_AUTH_* settings from the user configuration
for param in dir(configuration):
if param.startswith('SOCIAL_AUTH_'):
globals()[param] = getattr(configuration, param)
SOCIAL_AUTH_JSONFIELD_ENABLED = True
#
# Django Prometheus
#
PROMETHEUS_EXPORT_MIGRATIONS = False
#
# Django filters
#
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = 'null'
#
# Django REST framework (API)
#
REST_FRAMEWORK_VERSION = '.'.join(VERSION.split('-')[0].split('.')[:2]) # Use major.minor as API version
REST_FRAMEWORK = {
'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],
'COERCE_DECIMAL_TO_STRING': False,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'netbox.api.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_METADATA_CLASS': 'netbox.api.metadata.BulkOperationMetadata',
'DEFAULT_PAGINATION_CLASS': 'netbox.api.pagination.OptionalLimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'netbox.api.authentication.TokenPermissions',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'netbox.api.renderers.FormlessBrowsableAPIRenderer',
),
'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
# 'PAGE_SIZE': PAGINATE_COUNT,
'SCHEMA_COERCE_METHOD_NAMES': {
# Default mappings
'retrieve': 'read',
'destroy': 'delete',
# Custom operations
'bulk_destroy': 'bulk_delete',
},
'VIEW_NAME_FUNCTION': 'utilities.api.get_view_name',
}
#
# Graphene
#
GRAPHENE = {
# Avoids naming collision on models with 'type' field; see
# https://github.com/graphql-python/graphene-django/issues/185
'DJANGO_CHOICE_FIELD_ENUM_V3_NAMING': True,
}
#
# drf_yasg (OpenAPI/Swagger)
#
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',
'DEFAULT_FIELD_INSPECTORS': [
'utilities.custom_inspectors.CustomFieldsDataFieldInspector',
'utilities.custom_inspectors.JSONFieldInspector',
'utilities.custom_inspectors.NullableBooleanFieldInspector',
'utilities.custom_inspectors.ChoiceFieldInspector',
'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',
'drf_yasg.inspectors.CamelCaseJSONFilter',
'drf_yasg.inspectors.ReferencingSerializerInspector',
'drf_yasg.inspectors.RelatedFieldInspector',
'drf_yasg.inspectors.ChoiceFieldInspector',
'drf_yasg.inspectors.FileFieldInspector',
'drf_yasg.inspectors.DictFieldInspector',
'drf_yasg.inspectors.SerializerMethodFieldInspector',
'drf_yasg.inspectors.SimpleFieldInspector',
'drf_yasg.inspectors.StringDefaultFieldInspector',
],
'DEFAULT_FILTER_INSPECTORS': [
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'DEFAULT_INFO': 'netbox.urls.openapi_info',
'DEFAULT_MODEL_DEPTH': 1,
'DEFAULT_PAGINATOR_INSPECTORS': [
'utilities.custom_inspectors.NullablePaginatorInspector',
'drf_yasg.inspectors.DjangoRestResponsePagination',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
},
'VALIDATOR_URL': None,
}
#
# Django RQ (Webhooks backend)
#
if TASKS_REDIS_USING_SENTINEL:
RQ_PARAMS = {
'SENTINELS': TASKS_REDIS_SENTINELS,
'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'CONNECTION_KWARGS': {
'socket_connect_timeout': TASKS_REDIS_SENTINEL_TIMEOUT
},
}
else:
RQ_PARAMS = {
'HOST': TASKS_REDIS_HOST,
'PORT': TASKS_REDIS_PORT,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SSL': TASKS_REDIS_SSL,
'SSL_CERT_REQS': None if TASKS_REDIS_SKIP_TLS_VERIFY else 'required',
'DEFAULT_TIMEOUT': RQ_DEFAULT_TIMEOUT,
}
RQ_QUEUES = {
'high': RQ_PARAMS,
'default': RQ_PARAMS,
'low': RQ_PARAMS,
}
#
# Plugins
#
for plugin_name in PLUGINS:
# Import plugin module
try:
plugin = importlib.import_module(plugin_name)
except ModuleNotFoundError as e:
if getattr(e, 'name') == plugin_name:
raise ImproperlyConfigured(
"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the "
"correct Python environment.".format(plugin_name)
)
raise e
# Determine plugin config and add to INSTALLED_APPS.
try:
plugin_config = plugin.config
INSTALLED_APPS.append("{}.{}".format(plugin_config.__module__, plugin_config.__name__))
except AttributeError:
raise ImproperlyConfigured(
"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file "
"and point to the PluginConfig subclass.".format(plugin_name)
)
# Validate user-provided configuration settings and assign defaults
if plugin_name not in PLUGINS_CONFIG:
PLUGINS_CONFIG[plugin_name] = {}
plugin_config.validate(PLUGINS_CONFIG[plugin_name], VERSION)
# Add middleware
plugin_middleware = plugin_config.middleware
if plugin_middleware and type(plugin_middleware) in (list, tuple):
MIDDLEWARE.extend(plugin_middleware)
# Create RQ queues dedicated to the plugin
# we use the plugin name as a prefix for queue name's defined in the plugin config
# ex: mysuperplugin.mysuperqueue1
if type(plugin_config.queues) is not list:
raise ImproperlyConfigured(
"Plugin {} queues must be a list.".format(plugin_name)
)
RQ_QUEUES.update({
f"{plugin_name}.{queue}": RQ_PARAMS for queue in plugin_config.queues
})
|
the-stack_106_15065
|
"""
@ProjectName: DXY-2019-nCov-Crawler
@FileName: crawler.py
@Author: Jiabao Lin
@Date: 2020/1/21
"""
from bs4 import BeautifulSoup
from service.nameMap import country_type_map, city_name_map, country_name_map, continent_name_map
import lxml
import os
import datetime
import re
import json
import time
import logging
import datetime
import requests
from github import Github
from github import InputGitTreeElement
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'
}
class Crawler:
def __init__(self):
self.session = requests.session()
self.session.headers.update(headers)
def run(self):
self.crawler()
def crawler(self):
while True:
try:
r = self.session.get(url='https://3g.dxy.cn/newh5/view/pneumonia')
except requests.exceptions.ChunkedEncodingError:
continue
soup = BeautifulSoup(r.content, "html.parser")
overall_information = re.search(r'\{("id".*?)\]\}', str(soup.find('script', attrs={'id': 'getStatisticsService'})))
province_information = re.search(r'\[(.*?)\]', str(soup.find('script', attrs={'id': 'getListByCountryTypeService1'})))
area_information = re.search(r'\[(.*)\]', str(soup.find('script', attrs={'id': 'getAreaStat'})))
abroad_information = re.search(r'\[(.*)\]', str(soup.find('script', attrs={'id': 'getListByCountryTypeService2'})))
if not overall_information or not province_information or not area_information:
continue
overall_information = self.overall_parser(overall_information=overall_information)
province_information = self.province_parser(province_information=province_information)
area_information = self.area_parser(area_information=area_information)
abroad_information = self.abroad_parser(abroad_information=abroad_information)
file_list=[
area_information + abroad_information,
overall_information,
province_information,
abroad_information
]
file_names=[
"area.json",
"overall.json",
"province.json",
"abroad.json"
]
commit_message="update data {}".format(datetime.datetime.now())
self.commit_file(file_list, file_names, commit_message)
break
logger.info('Successfully crawled.')
def overall_parser(self, overall_information):
overall_information = json.loads(overall_information.group(0))
overall_information.pop('id')
overall_information.pop('createTime')
overall_information.pop('modifyTime')
overall_information.pop('imgUrl')
overall_information.pop('deleted')
overall_information['countRemark'] = overall_information['countRemark'].replace(' 疑似', ',疑似').replace(' 治愈', ',治愈').replace(' 死亡', ',死亡').replace(' ', '')
return overall_information
def province_parser(self, province_information):
provinces = json.loads(province_information.group(0))
for province in provinces:
province.pop('id')
province.pop('tags')
province.pop('sort')
province['comment'] = province['comment'].replace(' ', '')
province['provinceEnglishName'] = city_name_map[province['provinceShortName']]['engName']
province['country'] = country_type_map.get(province['countryType'])
return provinces
def area_parser(self, area_information):
area_information = json.loads(area_information.group(0))
for area in area_information:
area['comment'] = area['comment'].replace(' ', '')
# Because the cities are given other attributes,
# this part should not be used when checking the identical document.
cities_backup = area.pop('cities')
# If this document is not in current database, insert this attribute back to the document.
area['cities'] = cities_backup
area['countryName'] = '中国'
area['countryEnglishName'] = 'China'
area['continentName'] = '亚洲'
area['continentEnglishName'] = 'Asia'
area['provinceEnglishName'] = city_name_map[area['provinceShortName']]['engName']
for city in area['cities']:
if city['cityName'] != '待明确地区':
try:
city['cityEnglishName'] = city_name_map[area['provinceShortName']]['cities'][city['cityName']]
except KeyError:
print(area['provinceShortName'], city['cityName'])
pass
else:
city['cityEnglishName'] = 'Area not defined'
return area_information
def abroad_parser(self, abroad_information):
countries = json.loads(abroad_information.group(0))
for country in countries:
country.pop('id')
country.pop('tags')
country.pop('countryType')
country.pop('provinceId')
country.pop('cityName')
country.pop('sort')
# The original provinceShortName are blank string
country.pop('provinceShortName')
# Rename the key continents to continentName
country['continentName'] = country.pop('continents')
country['comment'] = country['comment'].replace(' ', '')
country['countryName'] = country.get('provinceName')
country['provinceShortName'] = country.get('provinceName')
country['continentEnglishName'] = continent_name_map.get(country['continentName'])
country['countryEnglishName'] = country_name_map.get(country['countryName'])
country['provinceEnglishName'] = country_name_map.get(country['countryName'])
return countries
def commit_file(self, file_list, file_names, commit_message):
user=os.environ["GITHUB_USER"]
password=os.environ["GITHUB_TOKEN"]
g = Github(user, password)
repo = g.get_user().get_repo("covid-19.global-event-tracker.website-data")
master_ref = repo.get_git_ref('heads/master')
master_sha = master_ref.object.sha
base_tree = repo.get_git_tree(master_sha)
element_list = list()
for i, entry in enumerate(file_list):
# with open(entry) as input_file:
# data = input_file.read()
element = InputGitTreeElement(file_names[i], '100644', 'blob', json.dumps(entry))
element_list.append(element)
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(master_sha)
commit = repo.create_git_commit(commit_message, tree, [parent])
master_ref.edit(commit.sha)
print(commit_message)
if __name__ == '__main__':
crawler = Crawler()
crawler.run()
|
the-stack_106_15066
|
import ivy
import inspect
import importlib
import functools
import numpy as np
from types import ModuleType
from typing import Callable, Optional, List, Union
wrapped_modules_n_classes = []
NON_WRAPPED_FUNCTIONS = [
"copy_nest",
"current_backend",
"current_backend_str",
"set_backend",
"get_backend",
"unset_backend",
"get_referrers_recursive",
"set_debug_mode",
"set_breakpoint_debug_mode",
"set_exception_debug_mode",
"unset_debug_mode",
"debug_mode",
"nested_map",
"to_ivy",
"args_to_ivy",
"to_native",
"args_to_native",
"default",
"exists",
"set_min_base",
"get_min_base",
"set_min_denominator",
"get_min_denominator",
"split_func_call_across_gpus",
"cache_fn",
"split_func_call",
"compile",
"compile_graph",
"dev",
"as_ivy_dev",
"as_native_dev",
"memory_on_dev",
"gpu_is_available",
"num_gpus",
"tpu_is_available",
"dtype",
"as_ivy_dtype",
"cprint",
"to_ivy_module",
"tree_flatten",
"tree_unflatten",
"start_compiling",
"stop_compiling",
"get_compiled",
"index_nest",
"set_nest_at_index",
"map_nest_at_index",
"multi_index_nest",
"set_nest_at_indices",
"map_nest_at_indices",
"nested_indices_where",
"map",
"set_default_device",
"unset_default_device",
"closest_valid_dtype",
"set_default_dtype",
"default_dtype",
"default_device",
"as_native_dtype",
"is_ivy_array",
"is_ivy_container",
"inplace_update",
"inplace_increment",
"inplace_decrement",
"prune_nest_at_index",
"prune_nest_at_indices",
"is_array",
"is_native_array",
"nested_any",
"fn_array_spec",
"insert_into_nest_at_index",
"insert_into_nest_at_indices",
"vec_sig_fig",
"native_array",
]
FUNCTIONS_W_CONT_SUPPORT = [
"multi_head_attention",
"execute_with_gradients",
"adam_step",
"optimizer_update",
"gradient_descent_update",
"lars_update",
"adam_update",
"lamb_update",
"stable_divide",
"stable_pow",
]
ARRAYLESS_RET_FUNCTIONS = [
"to_numpy",
"to_list",
"to_scalar",
"is_native_array",
"is_ivy_array",
"is_variable",
]
NESTED_ARRAY_RET_FUNCTIONS = ["unstack", "split"]
NON_DTYPE_WRAPPED_FUNCTIONS = [
"arange",
"asarray",
"array",
"full",
"prod",
"sum",
"astype",
]
NON_DEV_WRAPPED_FUNCTIONS = [
"get_all_ivy_arrays_on_dev",
"num_ivy_arrays_on_dev",
"print_all_ivy_arrays_on_dev",
"clear_mem_on_dev",
"total_mem_on_dev",
"to_dev",
"split_factor",
"set_split_factor",
"split_func_call",
"as_native_dev",
"as_ivy_dev",
"dev_unify_iter",
"dev_unify_nest",
"dev_unify",
"dev_unify_array",
"dev_util",
"percent_used_mem_on_dev",
"used_mem_on_dev",
]
FW_FN_KEYWORDS = {
"numpy": [],
"jax": [],
"tensorflow": [],
"torch": [],
"mxnet": ["ndarray"],
}
NATIVE_KEYS_TO_SKIP = {
"numpy": [],
"jax": [],
"tensorflow": [],
"torch": [
"classes",
"torch",
"is_grad_enabled",
"get_default_dtype",
"numel",
"clone",
"cpu",
"set_",
"type",
"requires_grad_",
],
"mxnet": [],
}
# Helpers #
# --------#
# noinspection DuplicatedCode
def _get_first_array(*args, **kwargs):
# ToDo: make this more efficient, with function ivy.nested_nth_index_where
arr = None
if args:
arr_idxs = ivy.nested_indices_where(args, ivy.is_array)
if arr_idxs:
arr = ivy.index_nest(args, arr_idxs[0])
else:
arr_idxs = ivy.nested_indices_where(kwargs, ivy.is_array)
if arr_idxs:
arr = ivy.index_nest(kwargs, arr_idxs[0])
elif kwargs:
arr_idxs = ivy.nested_indices_where(kwargs, ivy.is_array)
if arr_idxs:
arr = ivy.index_nest(kwargs, arr_idxs[0])
return arr
# Array Handling #
# ---------------#
def inputs_to_native_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, **kwargs):
"""
Converts all `ivy.Array` instances in both the positional and keyword arguments
into `ivy.NativeArray` instances, and then calls the function with the updated
arguments.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with native arrays passed in the arguments.
"""
# convert all arrays in the inputs to ivy.NativeArray instances
native_args, native_kwargs = ivy.args_to_native(
*args, **kwargs, include_derived={tuple: True}
)
return fn(*native_args, **native_kwargs)
return new_fn
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, **kwargs):
"""
Converts all `ivy.NativeArray` instances in both the positional and keyword
arguments into `ivy.Array` instances, and then calls the function with the
updated arguments.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with ivy arrays passed in the arguments.
"""
# convert all arrays in the inputs to ivy.Array instances
ivy_args, ivy_kwargs = ivy.args_to_ivy(
*args, **kwargs, include_derived={tuple: True}
)
return fn(*ivy_args, **ivy_kwargs)
return new_fn
def outputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, **kwargs):
"""
Calls the function, and then converts all `ivy.NativeArray` instances in
the function return into `ivy.Array` instances.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with native arrays as ivy arrays.
"""
# call unmodified function
ret = fn(*args, **kwargs)
# convert all arrays in the return to `ivy.Array` instances
return ivy.to_ivy(ret, nested=True, include_derived={tuple: True})
return new_fn
def to_native_arrays_and_back(fn: Callable) -> Callable:
"""
Wraps `fn` so that input arrays are all converted to `ivy.NativeArray` instances
and return arrays are all converted to `ivy.Array` instances.
"""
return outputs_to_ivy_arrays(inputs_to_native_arrays(fn))
# Data Type Handling #
# -------------------#
def infer_dtype(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, dtype=None, **kwargs):
"""
Determines the correct `dtype`, and then calls the function with the `dtype`
passed explicitly.
Parameters
----------
args
The arguments to be passed to the function.
dtype
The data type for the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `dtype` passed explicitly.
"""
# find the first array argument, if required
arr = None if ivy.exists(dtype) else _get_first_array(*args, **kwargs)
# infer the correct data type
dtype = ivy.default_dtype(dtype, item=arr, as_native=True)
# call the function with dtype provided explicitly
return fn(*args, dtype=dtype, **kwargs)
return new_fn
# Device Handling #
# ----------------#
def infer_device(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, device=None, **kwargs):
"""
Determines the correct `device`, and then calls the function with the `device`
passed explicitly.
Parameters
----------
args
The arguments to be passed to the function.
device
The device for the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `device` passed explicitly.
"""
# find the first array argument, if required
arr = None if ivy.exists(device) else _get_first_array(*args, **kwargs)
# infer the correct device
device = ivy.default_device(device, item=arr, as_native=True)
# call the function with device provided explicitly
return fn(*args, device=device, **kwargs)
return new_fn
# Inplace Update Handling #
# ------------------------#
def handle_out_argument(fn: Callable) -> Callable:
handle_out_in_backend = "out" in inspect.signature(fn).parameters.keys()
@functools.wraps(fn)
def new_fn(*args, out=None, **kwargs):
"""
Calls `fn` with the `out` argument handled correctly for performing an inplace
update.
Parameters
----------
args
The arguments to be passed to the function.
out
The array to write the result to.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `out` handled correctly for
inplace updates.
"""
if out is None:
return fn(*args, **kwargs)
if handle_out_in_backend:
# extract underlying native array for out
native_out = ivy.to_native(out)
# compute return, with backend inplace update handled by
# the backend function
ret = fn(*args, out=native_out, **kwargs)
out.data = ivy.to_native(ret)
return out
# compute return, and then handle the inplace update explicitly
ret = fn(*args, **kwargs)
return ivy.inplace_update(out, ret)
return new_fn
# Nestable Handling #
# ------------------#
def handle_nestable(fn: Callable) -> Callable:
fn_name = fn.__name__
cont_fn = getattr(ivy.Container, "static_" + fn_name)
@functools.wraps(fn)
def new_fn(*args, **kwargs):
"""
Calls `fn` with the *nestable* property of the function correctly handled.
This means mapping the function to the container leaves if any containers are
passed in the input.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with the nestable property handled correctly.
"""
# if any of the arguments or keyword arguments passed to the function contains
# a container, get the container's version of the function and call it using
# the passed arguments.
if ivy.nested_any(
args, ivy.is_ivy_container, check_nests=True
) or ivy.nested_any(kwargs, ivy.is_ivy_container, check_nests=True):
return cont_fn(*args, **kwargs)
# if the passed arguments does not contain a container, the function using
# the passed arguments, returning an ivy or a native array.
return fn(*args, **kwargs)
return new_fn
# Functions #
def _wrap_function(fn: Callable) -> Callable:
"""
Creates a wrapped ivy version of the function if it is not a private function and
not in the non wrapped functions list. This allows the new function to accept as
inputs an ivy array before performing the required operation and then returning
an ivy array.
Parameters
----------
fn
function to be wrapped
Returns
-------
The wrapped version of the function with all the necessary attributes updated.
"""
# do nothing if the function is private or in the non wrapped functions list
if hasattr(fn, "__name__") and (
fn.__name__[0] == "_" or fn.__name__ in NON_WRAPPED_FUNCTIONS
):
return fn
# determine whether the function has an out argument
keys = inspect.signature(fn).parameters.keys()
handle_dtype = "dtype" in keys
handle_dev = "device" in keys
# get function name
fn_name = fn.__name__
# with outputs converted to ivy arrays
if fn_name not in ARRAYLESS_RET_FUNCTIONS + NESTED_ARRAY_RET_FUNCTIONS:
fn = outputs_to_ivy_arrays(fn)
# with input converted to native arrays
fn = inputs_to_native_arrays(fn)
# with inplace updates handled
fn = handle_out_argument(fn)
# with dtypes handled
if handle_dtype and fn_name not in NON_DTYPE_WRAPPED_FUNCTIONS:
fn = infer_dtype(fn)
# with device handled
if handle_dev and fn_name not in NON_DEV_WRAPPED_FUNCTIONS:
fn = infer_device(fn)
# with nestable property handled
if hasattr(ivy.Container, fn_name) and fn_name not in FUNCTIONS_W_CONT_SUPPORT:
fn = handle_nestable(fn)
# return the wrapped function
return fn
def _unwrap_function(function_wrapped: Callable) -> Callable:
"""
Unwraps the function `function_wrapped`.
Parameters
----------
function_wrapped
The function to be unwrapped.
Returns
-------
The unwrapped version of the function which is the same as the passed function
for unwrapped functions and the inner_fn if the function is wrapped.
The newly unwrapped function accepts inputs and returns outputs as native arrays
instead of ivy arrays.
"""
if not hasattr(function_wrapped, "wrapped") or not function_wrapped.wrapped:
return function_wrapped
return function_wrapped.inner_fn
def _invalid_function(function: Callable, framework: Optional[str] = None) -> bool:
if framework is None:
framework = ivy.current_backend_str()
if isinstance(function, np.ufunc):
return False
if not hasattr(function, "__module__") or not function.__module__:
return True
fw_fn_keywords = ["ivy", framework] + FW_FN_KEYWORDS[framework]
for kw in fw_fn_keywords:
if kw in function.__module__:
return False
return True
def _wrap_or_unwrap_functions(
wrap_or_unwrap_function: Callable,
val: Optional[Union[ModuleType, Callable]] = None,
framework: Optional[str] = None,
classes_to_wrap: Optional[List] = [],
native: Optional[bool] = False,
depth: Optional[int] = 0,
) -> Union[Callable, ModuleType]:
if framework is None:
framework = ivy.current_backend_str()
if val is None:
val = importlib.import_module(ivy.current_backend_str()) if native else ivy
str_to_check = framework if native else "ivy"
is_class = inspect.isclass(val)
if isinstance(val, ModuleType) or (val in classes_to_wrap):
if val in wrapped_modules_n_classes or (
(
"__file__" not in val.__dict__
or (str_to_check not in val.__file__)
or "framework_handler" in val.__file__
)
and not is_class
):
return val
wrapped_modules_n_classes.append(val)
# if `val` is a class we recursively call `_wrap_or_unwrap_functions`
# on every member of the class
if is_class:
for k in dir(val):
if native and (k in NATIVE_KEYS_TO_SKIP[framework]):
continue
v = getattr(val, k)
if v is not None:
# noinspection PyBroadException
try:
setattr(
val,
k,
_wrap_or_unwrap_functions(
wrap_or_unwrap_function,
v,
framework,
classes_to_wrap,
native,
depth + 1,
),
)
except Exception:
pass
# or if `val` is a module, we recursively call
# `_wrap_or_unwrap_functions` on each value of its dict
else:
for k, v in val.__dict__.items():
if native and (k in NATIVE_KEYS_TO_SKIP[framework] or k[0] == "_"):
continue
if v is None:
val.__dict__[k] = v
else:
# noinspection PyBroadException
try:
val.__dict__[k] = _wrap_or_unwrap_functions(
wrap_or_unwrap_function,
v,
framework,
classes_to_wrap,
native,
depth + 1,
)
except Exception:
pass
if depth == 0:
wrapped_modules_n_classes.clear()
return val
# if `val` is a function/method we wrap it and return it (unless
# there are issues with it being an invalid function)
elif callable(val) and not is_class:
if depth == 0:
wrapped_modules_n_classes.clear()
if (
hasattr(val, "inner_fn")
and (_invalid_function(val.inner_fn) and not native)
) or (_invalid_function(val) and not native):
return val
return wrap_or_unwrap_function(val)
if depth == 0:
wrapped_modules_n_classes.clear()
return val
def _wrap_functions():
return _wrap_or_unwrap_functions(_wrap_function)
def _unwrap_functions():
return _wrap_or_unwrap_functions(_unwrap_function)
|
the-stack_106_15068
|
# Copyright (c) 2008 Princeton University
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Tushar Krishna
#
from m5.params import *
from m5.proxy import *
from m5.objects.Network import RubyNetwork
from m5.objects.BasicRouter import BasicRouter
from m5.objects.ClockedObject import ClockedObject
class GarnetNetwork(RubyNetwork):
type = 'GarnetNetwork'
cxx_header = "mem/ruby/network/garnet2.0/GarnetNetwork.hh"
num_rows = Param.Int(0, "number of rows if 2D (mesh/torus/..) topology");
ni_flit_size = Param.UInt32(16, "network interface flit size in bytes")
vcs_per_vnet = Param.UInt32(4, "virtual channels per virtual network");
buffers_per_data_vc = Param.UInt32(4, "buffers per data virtual channel");
buffers_per_ctrl_vc = Param.UInt32(1, "buffers per ctrl virtual channel");
routing_algorithm = Param.Int(0,
"0: Weight-based Table, 1: XY, 2: Custom");
enable_fault_model = Param.Bool(False, "enable network fault model");
fault_model = Param.FaultModel(NULL, "network fault model");
garnet_deadlock_threshold = Param.UInt32(50000,
"network-level deadlock threshold")
class GarnetNetworkInterface(ClockedObject):
type = 'GarnetNetworkInterface'
cxx_class = 'NetworkInterface'
cxx_header = "mem/ruby/network/garnet2.0/NetworkInterface.hh"
id = Param.UInt32("ID in relation to other network interfaces")
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
garnet_deadlock_threshold = Param.UInt32(Parent.garnet_deadlock_threshold,
"network-level deadlock threshold")
class GarnetRouter(BasicRouter):
type = 'GarnetRouter'
cxx_class = 'Router'
cxx_header = "mem/ruby/network/garnet2.0/Router.hh"
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
|
the-stack_106_15073
|
from collections import Counter
from unittest import TestCase
from django_query_profiler.query_profiler_storage import (
QueryProfiledData, QueryProfiledSummaryData, QuerySignature, QuerySignatureStatistics, SqlStatement,
StackTraceElement
)
class QuerySignatureStatisticsTest(TestCase):
""" Tests for checking if "QuerySignatureStatistics" class is additive or not, and to verify if its correct """
def test_query_signature_statistics_addition(self):
query_signature_statistics_1 = QuerySignatureStatistics(
frequency=1,
query_execution_time_in_micros=11,
db_row_count=111,)
query_signature_statistics_2 = QuerySignatureStatistics(
frequency=2,
query_execution_time_in_micros=12,
db_row_count=112, )
query_signature_combined = query_signature_statistics_1 + query_signature_statistics_2
self.assertEqual(query_signature_combined.frequency, 1 + 2)
self.assertEqual(query_signature_combined.query_execution_time_in_micros, 11 + 12)
self.assertEqual(query_signature_combined.db_row_count, 111 + 112)
class QueryProfiledDataTest(TestCase):
"""
Tests for checking if "QueryProfiledData" class has the correct code for calculating summary, and if it is additive
"""
query_without_params = "SELECT * FROM table WHERE id=%s"
params = "1"
django_stack_trace = [
StackTraceElement('django.db', 'find', None),
StackTraceElement('django.models', 'get', None),
StackTraceElement('django.core', 'wsgi', None),
]
app_stack_trace = [
StackTraceElement('mysite.food', 'find_restaurant', 14),
StackTraceElement('mysite.food', 'find_restaurant', 15),
StackTraceElement('mysite.restaurant', 'get_restaurant', 15), ]
target_db = 'master'
query_signature_1 = QuerySignature(
query_without_params=query_without_params,
app_stack_trace=tuple(app_stack_trace),
django_stack_trace=tuple(django_stack_trace),
target_db=target_db)
query_signature_statistics_1 = QuerySignatureStatistics(
frequency=1,
query_execution_time_in_micros=11,
db_row_count=111, )
query_signature_2 = QuerySignature(
query_without_params=query_without_params,
app_stack_trace=tuple(app_stack_trace[1:]),
django_stack_trace=tuple(django_stack_trace[1:]),
target_db=target_db)
query_signature_statistics_2 = QuerySignatureStatistics(
frequency=2,
query_execution_time_in_micros=12,
db_row_count=112)
def test_query_profiled_data_summary(self):
query_profiled_data = QueryProfiledData(
query_signature_to_query_signature_statistics={
self.query_signature_1: self.query_signature_statistics_1,
self.query_signature_2: self.query_signature_statistics_2},
_query_params_db_hash_counter=Counter(_Some_Hash_=3))
query_profiled_summary_data = query_profiled_data.summary
expected_query_profiled_summary_data = QueryProfiledSummaryData(
sql_statement_type_counter=Counter({SqlStatement.SELECT: 3}),
exact_query_duplicates=3,
total_query_execution_time_in_micros=11+12,
total_db_row_count=111+112,
potential_n_plus1_query_count=2)
self.assertEqual(query_profiled_summary_data, expected_query_profiled_summary_data)
self.assertIsNotNone(str(expected_query_profiled_summary_data)) # No exception thrown is the test here
self.assertIsNotNone(str(self.query_signature_1)) # No exception thrown is the test here
def test_query_profiled_data_addition_no_overlapping(self):
""" Query signatures are unique in both query profiled data """
query_profiled_data_1 = QueryProfiledData(
query_signature_to_query_signature_statistics={self.query_signature_1: self.query_signature_statistics_1},
_query_params_db_hash_counter=Counter(_Some_Hash_=1))
query_profiled_data_2 = QueryProfiledData(
query_signature_to_query_signature_statistics={self.query_signature_2: self.query_signature_statistics_2},
_query_params_db_hash_counter=Counter(_Some_Hash_=2))
combined_query_profiled_data = query_profiled_data_1 + query_profiled_data_2
expected_query_profiled_data = QueryProfiledData(
query_signature_to_query_signature_statistics={
self.query_signature_1: self.query_signature_statistics_1,
self.query_signature_2: self.query_signature_statistics_2},
_query_params_db_hash_counter=Counter(_Some_Hash_=3))
self.assertEqual(combined_query_profiled_data, expected_query_profiled_data)
self.assertEqual(sum([query_profiled_data_1, query_profiled_data_2]), expected_query_profiled_data)
def test_query_profiled_data_addition_overlapping(self):
""" Query signature is shared between both query profiled data """
query_profiled_data_1 = QueryProfiledData(
query_signature_to_query_signature_statistics={self.query_signature_1: self.query_signature_statistics_1},
_query_params_db_hash_counter=Counter(_Some_Hash_=1))
query_profiled_data_2 = QueryProfiledData(
query_signature_to_query_signature_statistics={self.query_signature_1: self.query_signature_statistics_2},
_query_params_db_hash_counter=Counter(_Some_Hash_=2))
combined_query_profiled_data = query_profiled_data_1 + query_profiled_data_2
expected_query_profiled_data = QueryProfiledData(
query_signature_to_query_signature_statistics={
self.query_signature_1: self.query_signature_statistics_1 + self.query_signature_statistics_2},
_query_params_db_hash_counter=Counter(_Some_Hash_=3))
self.assertEqual(combined_query_profiled_data, expected_query_profiled_data)
self.assertEqual(sum([query_profiled_data_1, query_profiled_data_2]), expected_query_profiled_data)
|
the-stack_106_15074
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pdb
import trimesh
import cv2
import numpy as np
import torch
from nnutils.geom_utils import rot_angle, mat2K, Kmatinv, obj_to_cam, \
pinhole_cam, lbs, gauss_mlp_skinning, evaluate_mlp
import torch.nn.functional as F
def nerf_gradient(mlp, embed, pts, use_xyz=False,code=None, sigma_only=False):
"""
gradient of mlp params wrt pts
"""
pts.requires_grad_(True)
pts_embedded = embed(pts)
if use_xyz: xyz=pts
else: xyz=None
y = evaluate_mlp(mlp, pts_embedded, chunk=pts.shape[0],
xyz=xyz,code=code,sigma_only=sigma_only)
sdf = -y
ibetas = 1/(mlp.beta.abs()+1e-9)
sigmas = (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() * ibetas))
# get gradient for each size-1 output
gradients = []
for i in range(y.shape[-1]):
y_sub = y [...,i:i+1]
d_output = torch.ones_like(y_sub, requires_grad=False, device=y.device)
gradient = torch.autograd.grad(
outputs=y_sub,
inputs=pts,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gradients.append( gradient[...,None] )
gradients = torch.cat(gradients,-1) # ...,input-dim, output-dim
return gradients, sigmas
def eikonal_loss(mlp, embed, pts_exp, bound):
"""
pts_exp: X* backward warped points
"""
pts_exp = pts_exp.view(1,-1,3).detach()
nsample = pts_exp.shape[1]
device = next(mlp.parameters()).device
# Sample points for the eikonal loss
bound = torch.Tensor(bound)[None,None]
pts = torch.rand(1,nsample,3)*2*bound-bound
pts= pts.to(device)
pts = torch.cat([pts,pts_exp],1)
g,sigmas_unit = nerf_gradient(mlp, embed, pts, sigma_only=True)
g = g[...,0]
#sigmas_unit = sigmas_unit[...,0].detach()
sigmas_unit = ((pts.abs() < bound.to(device)).float().sum(-1)==3).float()
#need to weight by occupancy score
eikonal_loss = (g.norm(2, dim=-1) - 1) ** 2
eikonal_loss = (sigmas_unit*eikonal_loss).sum() / sigmas_unit.sum()
return eikonal_loss
def elastic_loss(mlp, embed, xyz, time_embedded):
xyz = xyz.detach().clone()
time_embedded = time_embedded.detach().clone()
g,_ = nerf_gradient(mlp, embed, xyz, use_xyz=mlp.use_xyz,code=time_embedded)
jacobian = g+torch.eye(3)[None,None].to(g.device)
sign, log_svals = jacobian.slogdet()
log_svals = log_svals.clone()
log_svals[sign<=0] = 0.
elastic_loss = log_svals**2
return elastic_loss
def bone_density_loss(mlp, embed, bones):
pts = bones[:,:3]
pts_embedded = embed(pts)
y = evaluate_mlp(mlp, pts_embedded, pts.shape[0], sigma_only=True)
return bone_density_loss
def visibility_loss(mlp, embed, xyz_pos, w_pos, bound, chunk):
"""
w_pos: num_points x num_samples, visibility returns from nerf
bound: scalar, used to sample negative samples
"""
device = next(mlp.parameters()).device
xyz_pos = xyz_pos.detach().clone()
w_pos = w_pos.detach().clone()
# negative examples
nsample = w_pos.shape[0]*w_pos.shape[1]
bound = torch.Tensor(bound)[None,None]
xyz_neg = torch.rand(1,nsample,3)*2*bound-bound
xyz_neg = xyz_neg.to(device)
xyz_neg_embedded = embed(xyz_neg)
vis_neg_pred = evaluate_mlp(mlp, xyz_neg_embedded, chunk=chunk)[...,0]
vis_loss_neg = -F.logsigmoid(-vis_neg_pred).sum()*0.1/nsample
# positive examples
xyz_pos_embedded = embed(xyz_pos)
vis_pos_pred = evaluate_mlp(mlp, xyz_pos_embedded, chunk=chunk)[...,0]
vis_loss_pos = -(F.logsigmoid(vis_pos_pred) * w_pos).sum()/nsample
vis_loss = vis_loss_pos + vis_loss_neg
return vis_loss
def rtk_loss(rtk, rtk_raw, aux_out):
rot_pred = rtk[:,:3,:3]
rot_gt = rtk_raw[:,:3,:3]
rot_loss = rot_angle(rot_pred.matmul(rot_gt.permute(0,2,1))).mean()
rot_loss = 0.01*rot_loss
trn_pred = rtk[:,:3,3]
trn_gt = rtk_raw[:,:3,3]
trn_loss = (trn_pred - trn_gt).pow(2).sum(-1).mean()
total_loss = rot_loss + trn_loss
aux_out['rot_loss'] = rot_loss
aux_out['trn_loss'] = trn_loss
return total_loss
def compute_pts_exp(pts_prob, pts):
"""
pts: ..., ndepth, 3
pts_prob: ..., ndepth
"""
ndepth = pts_prob.shape[-1]
pts_prob = pts_prob.clone()
pts_prob = pts_prob.view(-1, ndepth,1)
pts_prob = pts_prob/(1e-9+pts_prob.sum(1)[:,None])
pts_exp = (pts * pts_prob).sum(1)
return pts_exp
def feat_match_loss(nerf_feat, embedding_xyz, feats, pts, pts_prob, bound,
is_training=True):
"""
feats: ..., num_feat
pts: ..., ndepth, 3
pts_prob: ..., ndepth
loss: ..., 1
"""
pts = pts.clone()
base_shape = feats.shape[:-1] # bs, ns
nfeat = feats.shape[-1]
ndepth = pts_prob.shape[-1]
feats= feats.view(-1, nfeat)
pts = pts.view(-1, ndepth,3)
# part1: compute expected pts
pts_exp = compute_pts_exp(pts_prob, pts)
## part2: matching
pts_pred = feat_match(nerf_feat, embedding_xyz, feats,
bound,grid_size=20,is_training=is_training)
# part3: compute loss
feat_err = (pts_pred - pts_exp).norm(2,-1) # n,ndepth
# rearrange outputs
pts_pred = pts_pred.view(base_shape+(3,))
pts_exp = pts_exp .view(base_shape+(3,))
feat_err = feat_err .view(base_shape+(1,))
return pts_pred, pts_exp, feat_err
def kp_reproj_loss(pts_pred, xys, models, embedding_xyz, rays):
"""
pts_pred, ...,3
xys, ...,2
out, ...,1 same as pts_pred
gcc loss is only used to update root/body pose and skinning weights
"""
xys = xys.view(-1,1,2)
xy_reproj = kp_reproj(pts_pred, models, embedding_xyz, rays)
proj_err = (xys - xy_reproj[...,:2]).norm(2,-1)
proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
return proj_err
def kp_reproj(pts_pred, models, embedding_xyz, rays, to_target=False):
"""
pts_pred, ...,3
out, ...,1,3 same as pts_pred
to_target whether reproject to target frame
"""
N = pts_pred.view(-1,3).shape[0]
xyz_coarse_sampled = pts_pred.view(-1,1,3)
# detach grad since reproj-loss would not benefit feature learning
# (due to ambiguity)
#xyz_coarse_sampled = xyz_coarse_sampled.detach()
# TODO wrap flowbw and lbs into the same module
# TODO include loss for flowbw
if to_target: rtk_vec = rays['rtk_vec_target']
else: rtk_vec = rays['rtk_vec']
rtk_vec = rtk_vec.view(N,-1) # bs, ns, 21
if 'bones' in models.keys():
if to_target: bone_rts_fw = rays['bone_rts_target']
else: bone_rts_fw = rays['bone_rts']
bone_rts_fw = bone_rts_fw.view(N,-1) # bs, ns,-1
if 'nerf_skin' in models.keys():
nerf_skin = models['nerf_skin']
else: nerf_skin = None
bones = models['bones_rst']
skin_aux = models['skin_aux']
rest_pose_code = models['rest_pose_code']
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones.device))
skin_forward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz, bones,
rest_pose_code, nerf_skin, skin_aux=skin_aux)
xyz_coarse_sampled,_ = lbs(bones, bone_rts_fw,
skin_forward, xyz_coarse_sampled, backward=False)
Rmat = rtk_vec[:,0:9] .view(N,1,3,3)
Tmat = rtk_vec[:,9:12] .view(N,1,3)
Kinv = rtk_vec[:,12:21].view(N,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_sampled = obj_to_cam( xyz_coarse_sampled, Rmat, Tmat)
xyz_coarse_sampled = pinhole_cam(xyz_coarse_sampled,K)
xy_coarse_sampled = xyz_coarse_sampled[...,:2]
return xy_coarse_sampled
def feat_match(nerf_feat, embedding_xyz, feats, bound,
grid_size=20,is_training=True, init_pts=None, rt_entropy=False):
"""
feats: -1, num_feat
"""
if is_training:
chunk_pts = 8*1024
else:
chunk_pts = 1024
chunk_pix = 4096
nsample,_ = feats.shape
device = feats.device
feats = F.normalize(feats,2,-1)
# sample model on a regular 3d grid, and correlate with feature, nkxkxk
#p1d = np.linspace(-bound, bound, grid_size).astype(np.float32)
#query_yxz = np.stack(np.meshgrid(p1d, p1d, p1d), -1) # (y,x,z)
pxd = np.linspace(-bound[0], bound[0], grid_size).astype(np.float32)
pyd = np.linspace(-bound[1], bound[1], grid_size).astype(np.float32)
pzd = np.linspace(-bound[2], bound[2], grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pyd, pxd, pzd), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).to(device).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
if init_pts is not None:
query_xyz = query_xyz[None] + init_pts[:,None]
else:
# N x Ns x 3
query_xyz = query_xyz[None]
# inject some noise at training time
if is_training and init_pts is None:
bound = torch.Tensor(bound)[None,None].to(device)
query_xyz = query_xyz + torch.randn_like(query_xyz) * bound * 0.05
cost_vol = []
for i in range(0,grid_size**3,chunk_pts):
if init_pts is None:
query_xyz_chunk = query_xyz[0,i:i+chunk_pts]
xyz_embedded = embedding_xyz(query_xyz_chunk)[:,None] # (N,1,...)
vol_feat_subchunk = evaluate_mlp(nerf_feat, xyz_embedded)[:,0] # (chunk, num_feat)
# normalize vol feat
vol_feat_subchunk = F.normalize(vol_feat_subchunk,2,-1)[None]
cost_chunk = []
for j in range(0,nsample,chunk_pix):
feats_chunk = feats[j:j+chunk_pix] # (chunk pix, num_feat)
if init_pts is not None:
# only query 3d grid according to each px when they are diff
# vol feature
query_xyz_chunk = query_xyz[j:j+chunk_pix,i:i+chunk_pts].clone()
xyz_embedded = embedding_xyz(query_xyz_chunk)
vol_feat_subchunk = evaluate_mlp(nerf_feat, xyz_embedded)
# normalize vol feat
vol_feat_subchunk = F.normalize(vol_feat_subchunk,2,-1)
# cpix, cpts
# distance metric
cost_subchunk = (vol_feat_subchunk * \
feats_chunk[:,None]).sum(-1) * (nerf_feat.beta.abs()+1e-9)
cost_chunk.append(cost_subchunk)
cost_chunk = torch.cat(cost_chunk,0) # (nsample, cpts)
cost_vol.append(cost_chunk)
cost_vol = torch.cat(cost_vol,-1) # (nsample, k**3)
prob_vol = cost_vol.softmax(-1)
# regress to the true location, n,3
if not is_training: torch.cuda.empty_cache()
# n, ns, 1 * n, ns, 3
pts_pred = (prob_vol[...,None] * query_xyz).sum(1)
if rt_entropy:
# compute normalized entropy
match_unc = (-prob_vol * prob_vol.clamp(1e-9,1-1e-9).log()).sum(1)[:,None]
match_unc = match_unc/np.log(grid_size**3)
return pts_pred, match_unc
else:
return pts_pred
def grad_update_bone(bones,embedding_xyz, nerf_vis, learning_rate):
"""
#TODO need to update bones locally
"""
device = bones.device
bones_data = bones.data.detach()
bones_data.requires_grad_(True)
bone_xyz_embed = embedding_xyz(bones_data[:,None,:3])
sdf_at_bone = evaluate_mlp(nerf_vis, bone_xyz_embed)
bone_loc_loss = F.relu(-sdf_at_bone).mean()
# compute gradient wrt bones
d_output = torch.ones_like(bone_loc_loss, requires_grad=False, device=device)
gradient = torch.autograd.grad(
outputs=bone_loc_loss,
inputs=bones_data,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
bones.data = bones.data-gradient*learning_rate
return bone_loc_loss
def loss_filter_line(sil_err, errid, frameid, sil_loss_samp, img_size, scale_factor=10):
"""
sil_err: Tx512
errid: N
"""
sil_loss_samp = sil_loss_samp.detach().cpu().numpy().reshape(-1)
sil_err[errid] = sil_loss_samp
sil_err = sil_err.reshape(-1,img_size)
sil_err = sil_err.sum(-1) / (1e-9+(sil_err>0).astype(float).sum(-1))
sil_err_med = np.median(sil_err[sil_err>0])
invalid_frame = sil_err > sil_err_med*scale_factor
invalid_idx = invalid_frame[frameid]
sil_err[:] = 0
return invalid_idx
def loss_filter(g_floerr, flo_loss_samp, sil_at_samp_flo, scale_factor=10):
"""
g_floerr: T,
flo_loss_samp: bs,N,1
sil_at_samp_flo:bs,N,1
"""
bs = sil_at_samp_flo.shape[0]
# find history meidan
g_floerr = g_floerr[g_floerr>0]
# tb updated as history value
#flo_err = []
#for i in range(bs):
# flo_err_sub =flo_loss_samp[i][sil_at_samp_flo[i]]
# if len(flo_err_sub) >0:
# #flo_err_sub = flo_err_sub.median().detach().cpu().numpy()
# flo_err_sub = flo_err_sub.mean().detach().cpu().numpy()
# else:
# flo_err_sub = 0
# flo_err.append(flo_err_sub)
#flo_err = np.stack(flo_err)
# vectorized version but uses mean to update
flo_err = (flo_loss_samp * sil_at_samp_flo).sum(1) /\
(1e-9+sil_at_samp_flo.sum(1)) # bs, N, 1
flo_err = flo_err.detach().cpu().numpy()[...,0]
# find invalid idx
invalid_idx = flo_err > np.median(g_floerr)*scale_factor
return flo_err, invalid_idx
def compute_xyz_wt_loss(gt_list, curr_list):
loss = []
for i in range(len(gt_list)):
loss.append( (gt_list[i].detach() - curr_list[i]).pow(2).mean() )
loss = torch.stack(loss).mean()
return loss
def compute_root_sm_2nd_loss(rtk_all, data_offset):
"""
2nd order loss
"""
rot_sm_loss = []
trn_sm_loss = []
for didx in range(len(data_offset)-1):
stt_idx = data_offset[didx]
end_idx = data_offset[didx+1]
stt_rtk = rtk_all[stt_idx:end_idx-2]
mid_rtk = rtk_all[stt_idx+1:end_idx-1]
end_rtk = rtk_all[stt_idx+2:end_idx]
rot_sub1 = stt_rtk[:,:3,:3].matmul(mid_rtk[:,:3,:3].permute(0,2,1))
rot_sub2 = mid_rtk[:,:3,:3].matmul(end_rtk[:,:3,:3].permute(0,2,1))
trn_sub1 = stt_rtk[:,:3,3] - mid_rtk[:,:3,3]
trn_sub2 = mid_rtk[:,:3,3] - end_rtk[:,:3,3]
rot_sm_sub = rot_sub1.matmul(rot_sub2.permute(0,2,1))
trn_sm_sub = trn_sub1 - trn_sub2
rot_sm_loss.append(rot_sm_sub)
trn_sm_loss.append(trn_sm_sub)
rot_sm_loss = torch.cat(rot_sm_loss,0)
rot_sm_loss = rot_angle(rot_sm_loss).mean()*1e-1
trn_sm_loss = torch.cat(trn_sm_loss,0)
trn_sm_loss = trn_sm_loss.norm(2,-1).mean()
root_sm_loss = rot_sm_loss + trn_sm_loss
root_sm_loss = root_sm_loss * 0.1
return root_sm_loss
def compute_root_sm_loss(rtk_all, data_offset):
rot_sm_loss = []
trans_sm_loss = []
for didx in range(len(data_offset)-1):
stt_idx = data_offset[didx]
end_idx = data_offset[didx+1]
rot_sm_sub = rtk_all[stt_idx:end_idx-1,:3,:3].matmul(
rtk_all[stt_idx+1:end_idx,:3,:3].permute(0,2,1))
trans_sm_sub = rtk_all[stt_idx:end_idx-1,:3,3] - \
rtk_all[stt_idx+1:end_idx,:3,3]
rot_sm_loss.append(rot_sm_sub)
trans_sm_loss.append(trans_sm_sub)
rot_sm_loss = torch.cat(rot_sm_loss,0)
rot_sm_loss = rot_angle(rot_sm_loss).mean()*1e-3
trans_sm_loss = torch.cat(trans_sm_loss,0)
trans_sm_loss = trans_sm_loss.norm(2,-1).mean()*0.1
root_sm_loss = rot_sm_loss + trans_sm_loss
return root_sm_loss
def shape_init_loss(pts, faces, mlp, embed, bound_factor, use_ellips=True):
# compute sdf loss wrt to a mesh
# construct mesh
mesh = trimesh.Trimesh(pts.cpu(), faces=faces.cpu())
device = next(mlp.parameters()).device
# Sample points
nsample =10000
obj_bound = pts.abs().max(0)[0][None,None]
bound = obj_bound * bound_factor
pts_samp = torch.rand(1,nsample,3).to(device)*2*bound-bound
# outside: positive
if use_ellips:
# signed distance to a ellipsoid
dis = (pts_samp/obj_bound).pow(2).sum(2).view(-1)
dis = torch.sqrt(dis)
dis = dis - 1
dis = dis * obj_bound.mean()
else:
# signed distance to a sphere
dis = (pts_samp).pow(2).sum(2).view(-1)
dis = torch.sqrt(dis)
dis = dis - obj_bound.min()
# compute sdf
pts_embedded = embed(pts_samp)
y = evaluate_mlp(mlp, pts_embedded, chunk=pts_samp.shape[0],
xyz=None,code=None,sigma_only=True)
sdf = -y.view(-1) # positive: outside
shape_loss = (sdf - dis).pow(2).mean()
return shape_loss
|
the-stack_106_15075
|
import math
from carla.pid_controller.pid import PID
from carla.client import VehicleControl
# PID based controller, we can have different ones
class Controller(object):
# The vehicle controller, it receives waypoints and applies a PID control in order
# to get the action.
def __init__(self, params):
# The parameters for this controller, set by the agent
self.params = params
# PID speed controller
self.pid = PID(p=params['pid_p'], i=params['pid_i'], d=params['pid_d'])
def get_control(self, wp_angle, wp_angle_speed, speed_factor, current_speed):
control = VehicleControl()
current_speed = max(current_speed, 0)
steer = self.params['steer_gain'] * wp_angle
if steer > 0:
control.steer = min(steer, 1)
else:
control.steer = max(steer, -1)
# Don't go0 to fast around corners
if math.fabs(wp_angle_speed) < 0.1:
target_speed_adjusted = self.params['target_speed'] * speed_factor
elif math.fabs(wp_angle_speed) < 0.5:
target_speed_adjusted = 20 * speed_factor
else:
target_speed_adjusted = 15 * speed_factor
self.pid.target = target_speed_adjusted
pid_gain = self.pid(feedback=current_speed)
print ('Target: ', self.pid.target, 'Error: ', self.pid.error, 'Gain: ', pid_gain)
print ('Target Speed: ', target_speed_adjusted, 'Current Speed: ', current_speed, 'Speed Factor: ',
speed_factor)
throttle = min(max(self.params['default_throttle'] - 1.3 * pid_gain, 0),
self.params['throttle_max'])
if pid_gain > 0.5:
brake = min(0.35 * pid_gain * self.params['brake_strength'], 1)
else:
brake = 0
control.throttle = max(throttle, 0) # Prevent N by putting at least 0.01
control.brake = brake
print ('Throttle: ', control.throttle, 'Brake: ', control.brake, 'Steering Angle: ', control.steer)
return control
|
the-stack_106_15077
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authorization support for gRPC."""
from __future__ import absolute_import
import logging
import os
import six
from google.auth import environment_vars
from google.auth import exceptions
from google.auth.transport import _mtls_helper
from google.oauth2 import service_account
try:
import grpc
except ImportError as caught_exc: # pragma: NO COVER
six.raise_from(
ImportError(
"gRPC is not installed, please install the grpcio package "
"to use the gRPC transport."
),
caught_exc,
)
_LOGGER = logging.getLogger(__name__)
class AuthMetadataPlugin(grpc.AuthMetadataPlugin):
"""A `gRPC AuthMetadataPlugin`_ that inserts the credentials into each
request.
.. _gRPC AuthMetadataPlugin:
http://www.grpc.io/grpc/python/grpc.html#grpc.AuthMetadataPlugin
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to requests.
request (google.auth.transport.Request): A HTTP transport request
object used to refresh credentials as needed.
default_host (Optional[str]): A host like "pubsub.googleapis.com".
This is used when a self-signed JWT is created from service
account credentials.
"""
def __init__(self, credentials, request, default_host=None):
# pylint: disable=no-value-for-parameter
# pylint doesn't realize that the super method takes no arguments
# because this class is the same name as the superclass.
super(AuthMetadataPlugin, self).__init__()
self._credentials = credentials
self._request = request
self._default_host = default_host
def _get_authorization_headers(self, context):
"""Gets the authorization headers for a request.
Returns:
Sequence[Tuple[str, str]]: A list of request headers (key, value)
to add to the request.
"""
headers = {}
# https://google.aip.dev/auth/4111
# Attempt to use self-signed JWTs when a service account is used.
# A default host must be explicitly provided since it cannot always
# be determined from the context.service_url.
if (
isinstance(self._credentials, service_account.Credentials)
and self._default_host
):
self._credentials._create_self_signed_jwt(
"https://{}/".format(self._default_host)
)
self._credentials.before_request(
self._request, context.method_name, context.service_url, headers
)
return list(six.iteritems(headers))
def __call__(self, context, callback):
"""Passes authorization metadata into the given callback.
Args:
context (grpc.AuthMetadataContext): The RPC context.
callback (grpc.AuthMetadataPluginCallback): The callback that will
be invoked to pass in the authorization metadata.
"""
callback(self._get_authorization_headers(context), None)
def secure_authorized_channel(
credentials,
request,
target,
ssl_credentials=None,
client_cert_callback=None,
**kwargs
):
"""Creates a secure authorized gRPC channel.
This creates a channel with SSL and :class:`AuthMetadataPlugin`. This
channel can be used to create a stub that can make authorized requests.
Users can configure client certificate or rely on device certificates to
establish a mutual TLS channel, if the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
variable is explicitly set to `true`.
Example::
import google.auth
import google.auth.transport.grpc
import google.auth.transport.requests
from google.cloud.speech.v1 import cloud_speech_pb2
# Get credentials.
credentials, _ = google.auth.default()
# Get an HTTP request function to refresh credentials.
request = google.auth.transport.requests.Request()
# Create a channel.
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, regular_endpoint, request,
ssl_credentials=grpc.ssl_channel_credentials())
# Use the channel to create a stub.
cloud_speech.create_Speech_stub(channel)
Usage:
There are actually a couple of options to create a channel, depending on if
you want to create a regular or mutual TLS channel.
First let's list the endpoints (regular vs mutual TLS) to choose from::
regular_endpoint = 'speech.googleapis.com:443'
mtls_endpoint = 'speech.mtls.googleapis.com:443'
Option 1: create a regular (non-mutual) TLS channel by explicitly setting
the ssl_credentials::
regular_ssl_credentials = grpc.ssl_channel_credentials()
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, regular_endpoint, request,
ssl_credentials=regular_ssl_credentials)
Option 2: create a mutual TLS channel by calling a callback which returns
the client side certificate and the key (Note that
`GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly
set to `true`)::
def my_client_cert_callback():
code_to_load_client_cert_and_key()
if loaded:
return (pem_cert_bytes, pem_key_bytes)
raise MyClientCertFailureException()
try:
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, mtls_endpoint, request,
client_cert_callback=my_client_cert_callback)
except MyClientCertFailureException:
# handle the exception
Option 3: use application default SSL credentials. It searches and uses
the command in a context aware metadata file, which is available on devices
with endpoint verification support (Note that
`GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly
set to `true`).
See https://cloud.google.com/endpoint-verification/docs/overview::
try:
default_ssl_credentials = SslCredentials()
except:
# Exception can be raised if the context aware metadata is malformed.
# See :class:`SslCredentials` for the possible exceptions.
# Choose the endpoint based on the SSL credentials type.
if default_ssl_credentials.is_mtls:
endpoint_to_use = mtls_endpoint
else:
endpoint_to_use = regular_endpoint
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, endpoint_to_use, request,
ssl_credentials=default_ssl_credentials)
Option 4: not setting ssl_credentials and client_cert_callback. For devices
without endpoint verification support or `GOOGLE_API_USE_CLIENT_CERTIFICATE`
environment variable is not `true`, a regular TLS channel is created;
otherwise, a mutual TLS channel is created, however, the call should be
wrapped in a try/except block in case of malformed context aware metadata.
The following code uses regular_endpoint, it works the same no matter the
created channle is regular or mutual TLS. Regular endpoint ignores client
certificate and key::
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, regular_endpoint, request)
The following code uses mtls_endpoint, if the created channle is regular,
and API mtls_endpoint is confgured to require client SSL credentials, API
calls using this channel will be rejected::
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, mtls_endpoint, request)
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to requests.
request (google.auth.transport.Request): A HTTP transport request
object used to refresh credentials as needed. Even though gRPC
is a separate transport, there's no way to refresh the credentials
without using a standard http transport.
target (str): The host and port of the service.
ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
credentials. This can be used to specify different certificates.
This argument is mutually exclusive with client_cert_callback;
providing both will raise an exception.
If ssl_credentials and client_cert_callback are None, application
default SSL credentials are used if `GOOGLE_API_USE_CLIENT_CERTIFICATE`
environment variable is explicitly set to `true`, otherwise one way TLS
SSL credentials are used.
client_cert_callback (Callable[[], (bytes, bytes)]): Optional
callback function to obtain client certicate and key for mutual TLS
connection. This argument is mutually exclusive with
ssl_credentials; providing both will raise an exception.
This argument does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE`
environment variable is explicitly set to `true`.
kwargs: Additional arguments to pass to :func:`grpc.secure_channel`.
Returns:
grpc.Channel: The created gRPC channel.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
creation failed for any reason.
"""
# Create the metadata plugin for inserting the authorization header.
metadata_plugin = AuthMetadataPlugin(credentials, request)
# Create a set of grpc.CallCredentials using the metadata plugin.
google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)
if ssl_credentials and client_cert_callback:
raise ValueError(
"Received both ssl_credentials and client_cert_callback; "
"these are mutually exclusive."
)
# If SSL credentials are not explicitly set, try client_cert_callback and ADC.
if not ssl_credentials:
use_client_cert = os.getenv(
environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
)
if use_client_cert == "true" and client_cert_callback:
# Use the callback if provided.
cert, key = client_cert_callback()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
elif use_client_cert == "true":
# Use application default SSL credentials.
adc_ssl_credentils = SslCredentials()
ssl_credentials = adc_ssl_credentils.ssl_credentials
else:
ssl_credentials = grpc.ssl_channel_credentials()
# Combine the ssl credentials and the authorization credentials.
composite_credentials = grpc.composite_channel_credentials(
ssl_credentials, google_auth_credentials
)
return grpc.secure_channel(target, composite_credentials, **kwargs)
class SslCredentials:
"""Class for application default SSL credentials.
The behavior is controlled by `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment
variable whose default value is `false`. Client certificate will not be used
unless the environment variable is explicitly set to `true`. See
https://google.aip.dev/auth/4114
If the environment variable is `true`, then for devices with endpoint verification
support, a device certificate will be automatically loaded and mutual TLS will
be established.
See https://cloud.google.com/endpoint-verification/docs/overview.
"""
def __init__(self):
use_client_cert = os.getenv(
environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
)
if use_client_cert != "true":
self._is_mtls = False
else:
# Load client SSL credentials.
metadata_path = _mtls_helper._check_dca_metadata_path(
_mtls_helper.CONTEXT_AWARE_METADATA_PATH
)
self._is_mtls = metadata_path is not None
@property
def ssl_credentials(self):
"""Get the created SSL channel credentials.
For devices with endpoint verification support, if the device certificate
loading has any problems, corresponding exceptions will be raised. For
a device without endpoint verification support, no exceptions will be
raised.
Returns:
grpc.ChannelCredentials: The created grpc channel credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
creation failed for any reason.
"""
if self._is_mtls:
try:
_, cert, key, _ = _mtls_helper.get_client_ssl_credentials()
self._ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
except exceptions.ClientCertError as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
else:
self._ssl_credentials = grpc.ssl_channel_credentials()
return self._ssl_credentials
@property
def is_mtls(self):
"""Indicates if the created SSL channel credentials is mutual TLS."""
return self._is_mtls
|
the-stack_106_15078
|
from elasticsearch_dsl import Date, Keyword, Text
from datahub.search import fields
from datahub.search.models import BaseESModel
class ESSimpleModel(BaseESModel):
"""Elasticsearch representation of SimpleModel model."""
id = Keyword()
name = Text(
fields={
'keyword': fields.NormalizedKeyword(),
'trigram': fields.TrigramText(),
},
)
date = Date()
SEARCH_FIELDS = (
'name',
'name.trigram',
)
|
the-stack_106_15079
|
#By Ex094 for Deque's Project
class rot:
def __init__(self, alpha, rotate):
self.alpha = str(alpha)
self.rotate = str(rotate)
def encode(self, text):
enc = ''
for char in text:
if char in self.alpha:
get = self.alpha.index(char)
enc += self.rotate[get]
else: enc += char
return enc
def decode(self, text):
return self.encode(text)
rot13 = rot("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", "NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm")
rot18 = rot("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz", "RSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQnopqrstuvwxyzabcdefghijklm")
rot47 = rot("!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~", "PQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNO")
rot5 = rot("0123456789", "5678901234")
|
the-stack_106_15081
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1", manifest={"TensorboardTimeSeries",},
)
class TensorboardTimeSeries(proto.Message):
r"""TensorboardTimeSeries maps to times series produced in
training runs
Attributes:
name (str):
Output only. Name of the
TensorboardTimeSeries.
display_name (str):
Required. User provided name of this
TensorboardTimeSeries. This value should be
unique among all TensorboardTimeSeries resources
belonging to the same TensorboardRun resource
(parent resource).
description (str):
Description of this TensorboardTimeSeries.
value_type (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.ValueType):
Required. Immutable. Type of
TensorboardTimeSeries value.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
TensorboardTimeSeries was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
TensorboardTimeSeries was last updated.
etag (str):
Used to perform a consistent read-modify-
rite updates. If not set, a blind "overwrite"
update happens.
plugin_name (str):
Immutable. Name of the plugin this time
series pertain to. Such as Scalar, Tensor, Blob
plugin_data (bytes):
Data of the current plugin, with the size
limited to 65KB.
metadata (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.Metadata):
Output only. Scalar, Tensor, or Blob metadata
for this TensorboardTimeSeries.
"""
class ValueType(proto.Enum):
r"""An enum representing the value type of a
TensorboardTimeSeries.
"""
VALUE_TYPE_UNSPECIFIED = 0
SCALAR = 1
TENSOR = 2
BLOB_SEQUENCE = 3
class Metadata(proto.Message):
r"""Describes metadata for a TensorboardTimeSeries.
Attributes:
max_step (int):
Output only. Max step index of all data
points within a TensorboardTimeSeries.
max_wall_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Max wall clock timestamp of all
data points within a TensorboardTimeSeries.
max_blob_sequence_length (int):
Output only. The largest blob sequence length (number of
blobs) of all data points in this time series, if its
ValueType is BLOB_SEQUENCE.
"""
max_step = proto.Field(proto.INT64, number=1,)
max_wall_time = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
max_blob_sequence_length = proto.Field(proto.INT64, number=3,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
value_type = proto.Field(proto.ENUM, number=4, enum=ValueType,)
create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
etag = proto.Field(proto.STRING, number=7,)
plugin_name = proto.Field(proto.STRING, number=8,)
plugin_data = proto.Field(proto.BYTES, number=9,)
metadata = proto.Field(proto.MESSAGE, number=10, message=Metadata,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.