filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_16131
|
import numpy as np
import random
import math
import os.path
from keras.models import Sequential, load_model
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D, Dropout, Dense, Flatten
from keras.constraints import max_norm
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import array_to_img, img_to_array, load_img
epochs = 1000
batch_size = 1
validation_split = 0.2
source_csv = 'depths.csv'
source_csv_delimiter = ','
# fix random seed for reproducibility
np.random.seed(7)
depths = np.genfromtxt(source_csv, delimiter=source_csv_delimiter, skip_header=1, dtype=['U16', 'float_'])
for i in range(len(depths)):
depths[i][1] /= 1000
trainX = []
trainY = []
for i in range(len(depths)):
if(os.path.isfile('train/images/' + depths[i][0] + '.png')):
image = load_img('train/images/' + depths[i][0] + '.png', color_mode='grayscale')
image_array = img_to_array(image)
image_array /= 255
mask = load_img('train/masks/' + depths[i][0] + '.png', color_mode='grayscale')
mask_array = img_to_array(mask)
mask_array /= 255
mask_array = np.array(mask_array)
mask_array = mask_array.flatten()
trainX.append(image_array)
trainY.append(mask_array)
trainX = np.array(trainX)
trainY = np.array(trainY)
print(trainX[40])
print(trainY[40])
# make it divisable by batch size
remainder = len(trainX) % batch_size
if remainder > 0:
trainX = trainX[:-remainder]
trainY = trainY[:-remainder]
print(trainX.shape)
print(trainY.shape)
np.save('dataset_train_x_no_depth', trainX)
np.save('dataset_train_y_no_depth', trainY)
'''
# create and fit model
model = Sequential()
model.add(Dense(5, input_shape=(2, 101, 101, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(10201, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
earlystopper = EarlyStopping(monitor='val_loss', verbose=1, patience=5)
checkpointer = ModelCheckpoint(filepath='test.h5', verbose=1, save_best_only=True)
model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, validation_split=validation_split, callbacks=[checkpointer, earlystopper])
'''
|
the-stack_106_16132
|
import numpy as np
import pytest
import astropy
import astropy.units as u
from astropy.constants import c as speed_of_light
from astropy.coordinates import (
ICRS,
Angle,
CartesianDifferential,
CartesianRepresentation,
ConvertError,
HeliocentricMeanEcliptic,
Longitude,
SkyCoord,
SphericalDifferential,
get_body_barycentric,
get_body_barycentric_posvel,
)
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
from astropy.time import Time
from sunpy.coordinates import (
GeocentricEarthEquatorial,
GeocentricSolarEcliptic,
Heliocentric,
HeliocentricEarthEcliptic,
HeliocentricInertial,
HeliographicCarrington,
HeliographicStonyhurst,
Helioprojective,
get_earth,
sun,
)
from sunpy.coordinates.frames import _J2000
from sunpy.coordinates.transformations import transform_with_sun_center
from sunpy.sun.constants import radius as _RSUN
from sunpy.sun.constants import sidereal_rotation_rate
from sunpy.time import parse_time
def test_hcc_to_hgs():
'''
Check that a coordinate pointing to the observer in Heliocentric
coordinates maps to the lattitude/longitude of the observer in
HeliographicStonyhurst coordinates.
'''
lat = 10 * u.deg
lon = 20 * u.deg
observer = HeliographicStonyhurst(lat=lat, lon=lon)
hcc_in = Heliocentric(x=0*u.km, y=0*u.km, z=1*u.km, observer=observer)
hgs_out = hcc_in.transform_to(HeliographicStonyhurst)
assert_quantity_allclose(hgs_out.lat, lat)
assert_quantity_allclose(hgs_out.lon, lon)
def test_hpc_hpc():
# Use some unphysical values for solar parameters for testing, to make it
# easier to calculate expected results.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_in = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_out = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
hpc_in = Helioprojective(0*u.arcsec, 0*u.arcsec, rsun=rsun, observer=observer_in)
hpc_out = Helioprojective(observer=observer_out, rsun=rsun)
hpc_new = hpc_in.transform_to(hpc_out)
assert hpc_new.observer == hpc_out.observer
# Calculate the distance subtended by an angle of L0 from the centre of the
# Sun.
dd = -1 * rsun * np.tan(L0)
# Calculate the angle corresponding to that distance as seen by the new
# observer.
theta = np.arctan2(dd, (D0 - rsun))
assert quantity_allclose(theta, hpc_new.Tx, rtol=1e-3)
def test_hpc_hpc_sc():
# Use some unphysical values for solar parameters for testing, to make it
# easier to calculate expected results.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_in = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_out = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
sc_in = SkyCoord(0*u.arcsec, 0*u.arcsec, rsun=rsun, observer=observer_in,
frame='helioprojective')
hpc_out = Helioprojective(observer=observer_out, rsun=rsun)
hpc_new = sc_in.transform_to(hpc_out)
assert hpc_new.observer.lat == hpc_out.observer.lat
assert hpc_new.observer.lon == hpc_out.observer.lon
assert hpc_new.observer.radius == hpc_out.observer.radius
def test_hpc_hpc_null():
hpc_in = Helioprojective(0*u.arcsec, 0*u.arcsec)
hpc_out = Helioprojective()
hpc_new = hpc_in.transform_to(hpc_out)
assert hpc_new is not hpc_in
assert quantity_allclose(hpc_new.Tx, hpc_in.Tx)
assert quantity_allclose(hpc_new.Ty, hpc_in.Ty)
assert hpc_out.observer == hpc_new.observer
def test_hcrs_hgs():
# Get the current Earth location in HCRS
adate = parse_time('2015/05/01 01:13:00')
earth_hcrs = SkyCoord(get_body_barycentric('earth', adate), frame='icrs', obstime=adate).hcrs
# Convert from HCRS to HGS
earth_hgs = earth_hcrs.transform_to(HeliographicStonyhurst)
# The HGS longitude of the Earth should be zero within numerical error
# Due to an issue with wrapping at +-360, we shift it to pass the test.
assert quantity_allclose((earth_hgs.lon+1*u.deg) % (360*u.deg), 1*u.deg, atol=1e-12*u.deg)
# The HGS latitude and radius should be within valid ranges
assert quantity_allclose(earth_hgs.lat, 0*u.deg, atol=7.3*u.deg)
assert quantity_allclose(earth_hgs.radius, 1*u.AU, atol=0.017*u.AU)
def test_hcrs_hgs_array_obstime():
# Get the Earth location in HCRS at two times
times = Time(['2017-01-01', '2017-06-01'])
earth_hcrs = SkyCoord(get_body_barycentric('earth', times), frame='icrs', obstime=times).hcrs
# Transform each time in separate calls (uses scalar obstime)
earth_hgs_0 = earth_hcrs[0].transform_to(HeliographicStonyhurst)
earth_hgs_1 = earth_hcrs[1].transform_to(HeliographicStonyhurst)
# Transform both times in one call (uses array obstime)
earth_hgs = earth_hcrs.transform_to(HeliographicStonyhurst)
# Confirm that the two approaches produce the same results
assert quantity_allclose(earth_hgs_0.lon, earth_hgs[0].lon, atol=1e-12*u.deg)
assert quantity_allclose(earth_hgs_0.lat, earth_hgs[0].lat, rtol=1e-10)
assert quantity_allclose(earth_hgs_0.radius, earth_hgs[0].radius, rtol=1e-10)
assert quantity_allclose(earth_hgs_1.lon, earth_hgs[1].lon, atol=1e-12*u.deg)
assert quantity_allclose(earth_hgs_1.lat, earth_hgs[1].lat, rtol=1e-10)
assert quantity_allclose(earth_hgs_1.radius, earth_hgs[1].radius, rtol=1e-10)
def test_hgs_hcrs():
# This test checks the HGS->HCRS transformation by transforming from HGS to
# HeliocentricMeanEcliptic (HME). It will fail if there are errors in Astropy's
# HCRS->ICRS or ICRS->HME transformations.
# Use published HGS coordinates in the Astronomical Almanac (2013), pages C6-C7
obstime = Time('2013-01-28')
earth_hgs = SkyCoord(0*u.deg, -5.73*u.deg, 0.9848139*u.AU, frame=HeliographicStonyhurst,
obstime=obstime)
# Transform to HME at observation-time equinox
earth_hme = earth_hgs.transform_to(HeliocentricMeanEcliptic(equinox=obstime))
# Validate against published values from the Astronomical Almanac (2013), page C6 per page E2
# The dominant source of inaccuracy is the limited precision of the published B0 used above
assert quantity_allclose(earth_hme.lon, Angle('308d13m30.51s') - 180*u.deg, atol=5*u.arcsec)
assert quantity_allclose(earth_hme.lat, -Angle('-0.27s'), atol=10*u.arcsec)
assert quantity_allclose(earth_hme.distance, 0.9848139*u.AU, atol=5e-7*u.AU)
def test_hgs_hgc_roundtrip():
obstime = "2011-01-01"
hgsin = HeliographicStonyhurst(lat=10*u.deg, lon=20*u.deg, obstime=obstime)
hgcout = hgsin.transform_to(HeliographicCarrington(observer='earth', obstime=obstime))
assert_quantity_allclose(hgsin.lat, hgcout.lat)
assert_quantity_allclose(hgsin.lon + sun.L0(obstime), hgcout.lon)
hgsout = hgcout.transform_to(HeliographicStonyhurst(obstime=obstime))
assert_quantity_allclose(hgsout.lat, hgsin.lat)
assert_quantity_allclose(hgsout.lon, hgsin.lon)
def test_hgs_cartesian_rep_to_hpc():
# This test checks transformation HGS->HPC when the coordinate is in a Cartesian
# representation and that it is the same as a transformation from an HGS frame with a
# spherical representation
obstime = "2011-01-01"
hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,
frame=HeliographicStonyhurst(obstime=obstime),
representation_type='cartesian')
hpc_frame = Helioprojective(observer='earth', obstime=obstime)
hgscoord_sph = hgscoord_cart.copy()
hgscoord_sph.representation_type = 'spherical'
hpccoord_cart = hgscoord_cart.transform_to(hpc_frame)
hpccoord_sph = hgscoord_sph.transform_to(hpc_frame)
assert_quantity_allclose(hpccoord_cart.Tx, hpccoord_sph.Tx)
assert_quantity_allclose(hpccoord_cart.Ty, hpccoord_sph.Ty)
assert_quantity_allclose(hpccoord_cart.distance, hpccoord_sph.distance)
def test_hgs_cartesian_rep_to_hcc():
# This test checks transformation HGS->HCC when the coordinate is in a Cartesian
# representation and that it is the same as a transformation from an HGS frame with a
# spherical representation
obstime = "2011-01-01"
hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,
frame=HeliographicStonyhurst(obstime=obstime),
representation_type='cartesian')
hcc_frame = Heliocentric(observer='earth', obstime=obstime)
hgscoord_sph = hgscoord_cart.copy()
hgscoord_sph.representation_type = 'spherical'
hcccoord_cart = hgscoord_cart.transform_to(hcc_frame)
hcccoord_sph = hgscoord_sph.transform_to(hcc_frame)
assert_quantity_allclose(hcccoord_cart.x, hcccoord_sph.x)
assert_quantity_allclose(hcccoord_cart.y, hcccoord_sph.y)
assert_quantity_allclose(hcccoord_cart.z, hcccoord_sph.z)
def test_hgs_cartesian_rep_to_hgc():
# This test checks transformation HGS->HCC when the coordinate is in a Cartesian
# representation and that it is the same as a transformation from an HGS frame with a
# spherical representation
obstime = "2011-01-01"
hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,
frame=HeliographicStonyhurst(obstime=obstime),
representation_type='cartesian')
hgscoord_sph = hgscoord_cart.copy()
hgscoord_sph.representation_type = 'spherical'
# HGC
hgcframe = HeliographicCarrington(observer='earth', obstime=obstime)
hgccoord_cart = hgscoord_cart.transform_to(hgcframe)
hgccoord_sph = hgscoord_sph.transform_to(hgcframe)
assert_quantity_allclose(hgccoord_cart.lat, hgccoord_sph.lat)
assert_quantity_allclose(hgccoord_cart.lon, hgccoord_sph.lon)
assert_quantity_allclose(hgccoord_cart.radius, hgccoord_sph.radius)
def test_hcc_to_hpc_different_observer():
# This test checks transformation HCC->HPC in the case where the HCC and HPC frames are
# defined by different observers.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_1 = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_2 = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
hcc_frame = Heliocentric(observer=observer_1)
hpc_frame = Helioprojective(observer=observer_2)
hcccoord = SkyCoord(x=rsun, y=rsun, z=rsun, frame=hcc_frame)
hpccoord_out = hcccoord.transform_to(hpc_frame)
hpccoord_expected = hcccoord.transform_to(HeliographicStonyhurst).transform_to(hpc_frame)
assert_quantity_allclose(hpccoord_out.Tx, hpccoord_expected.Tx)
assert_quantity_allclose(hpccoord_out.Ty, hpccoord_expected.Ty)
assert_quantity_allclose(hpccoord_out.distance, hpccoord_expected.distance)
def test_hpc_to_hcc_different_observer():
# This test checks transformation HPC->HCC in the case where the HCC and HPC frames are
# defined by different observers.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_1 = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_2 = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
hcc_frame = Heliocentric(observer=observer_1)
hpc_frame = Helioprojective(observer=observer_2, rsun=rsun)
hpccoord = SkyCoord(Tx=0*u.arcsec, Ty=0*u.arcsec, frame=hpc_frame)
hcccoord_out = hpccoord.transform_to(hcc_frame)
hcccoord_expected = hpccoord.transform_to(HeliographicStonyhurst).transform_to(hcc_frame)
assert_quantity_allclose(hcccoord_out.x, hcccoord_expected.x)
assert_quantity_allclose(hcccoord_out.y, hcccoord_expected.y)
assert_quantity_allclose(hcccoord_out.z, hcccoord_expected.z)
def test_hcc_to_hpc_same_observer():
# This test checks transformation HCC->HPC in the case of same observer
rsun = 1*u.m
D0 = 1*u.km
observer = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
hcc_frame = Heliocentric(observer=observer)
hpc_frame = Helioprojective(observer=observer, rsun=rsun)
hcccoord = SkyCoord(x=rsun, y=rsun, z=rsun, frame=hcc_frame)
hpccoord_out = hcccoord.transform_to(hpc_frame)
hpccoord_expected = hcccoord.transform_to(HeliographicStonyhurst).transform_to(hpc_frame)
assert_quantity_allclose(hpccoord_out.Tx, hpccoord_expected.Tx)
assert_quantity_allclose(hpccoord_out.Ty, hpccoord_expected.Ty)
assert_quantity_allclose(hpccoord_out.distance, hpccoord_expected.distance)
def test_hpc_to_hcc_same_observer():
# This test checks transformation HPC->HCC in the case of same observer
rsun = 1*u.m
D0 = 1 * u.km
observer = HeliographicStonyhurst(lat=0 * u.deg, lon=0 * u.deg, radius=D0)
hcc_frame = Heliocentric(observer=observer)
hpc_frame = Helioprojective(observer=observer, rsun=rsun)
hpccoord = SkyCoord(Tx=0 * u.arcsec, Ty=0 * u.arcsec, frame=hpc_frame)
hcccoord_out = hpccoord.transform_to(hcc_frame)
hcccoord_expected = hpccoord.transform_to(HeliographicStonyhurst).transform_to(hcc_frame)
assert_quantity_allclose(hcccoord_out.x, hcccoord_expected.x)
assert_quantity_allclose(hcccoord_out.y, hcccoord_expected.y)
assert_quantity_allclose(hcccoord_out.z, hcccoord_expected.z)
def test_hpc_hcc_different_observer_radius():
# Tests HPC->HCC with a change in observer at different distances from the Sun
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU)
hpc = Helioprojective(0*u.arcsec, 0*u.arcsec, 0.5*u.AU, observer=observer1)
observer2 = HeliographicStonyhurst(90*u.deg, 0*u.deg, 0.75*u.AU)
hcc = hpc.transform_to(Heliocentric(observer=observer2))
assert_quantity_allclose(hcc.x, -0.5*u.AU)
assert_quantity_allclose(hcc.y, 0*u.AU, atol=1e-10*u.AU)
assert_quantity_allclose(hcc.z, 0*u.AU, atol=1e-10*u.AU)
def test_hgs_hgs():
# Test HGS loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime=obstime))
new = old.transform_to(HeliographicStonyhurst(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, old.lon - 1*u.deg, atol=0.1*u.deg) # due to Earth motion
assert_quantity_allclose(new.lat, old.lat, atol=1e-3*u.deg)
assert_quantity_allclose(new.radius, old.radius, atol=1e-5*u.AU)
def test_hgc_hgc():
# Test HGC loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicCarrington(observer='earth',
obstime=obstime))
new = old.transform_to(HeliographicCarrington(observer='earth', obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, 75.815607 * u.deg, atol=1e-7*u.deg) # solar rotation
# These are not equal to the old values, because the coordinates stay fixed
# in inertial space, whilst the frame (fixed to the center of the Sun)
# moves slightly.
assert_quantity_allclose(new.lat, 9.999963 * u.deg, atol=1e-7*u.deg)
assert_quantity_allclose(new.radius, 1.000009 * u.AU, atol=1e-7*u.AU)
def test_hgc_hgc_different_observers():
obstime = Time('2001-01-01')
hgc_earth = HeliographicCarrington(observer='earth', obstime=obstime)
hgc_mars = HeliographicCarrington(observer='mars', obstime=obstime)
hgc_sun = HeliographicCarrington(observer='sun', obstime=obstime)
sc = SkyCoord(10*u.deg, 20*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime=obstime))
sc_hgc_earth = sc.transform_to(hgc_earth)
sc_hgc_mars = sc_hgc_earth.transform_to(hgc_mars)
sc_hgc_sun = sc_hgc_mars.transform_to(hgc_sun)
ltt_earth = hgc_earth.observer.radius / speed_of_light
assert_quantity_allclose(sc_hgc_earth.lon - sc_hgc_sun.lon, ltt_earth * sidereal_rotation_rate)
ltt_mars = hgc_mars.observer.radius / speed_of_light
assert_quantity_allclose(sc_hgc_mars.lon - sc_hgc_sun.lon, ltt_mars * sidereal_rotation_rate)
def test_hcc_hcc():
# Test same observer and changing obstime
observer = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-02-01')
from_hcc = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer, obstime='2001-01-01')
to_hcc = from_hcc.transform_to(Heliocentric(observer=observer, obstime='2001-03-31'))
# Since the observer is the same, the coordinates should be nearly the same but not exactly
# equal due to motion of the origin (the Sun)
assert np.all(from_hcc.cartesian.xyz != to_hcc.cartesian.xyz)
assert_quantity_allclose(from_hcc.cartesian.xyz, to_hcc.cartesian.xyz, rtol=2e-3)
# Test changing observer and same obstime
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')
observer2 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-03-31')
from_hcc = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer1, obstime='2001-02-01')
to_hcc = from_hcc.transform_to(Heliocentric(observer=observer2, obstime='2001-02-01'))
# This change in observer is approximately a 90-degree rotation about the Y axis
assert_quantity_allclose(to_hcc.x, -from_hcc.z, rtol=2e-3)
assert_quantity_allclose(to_hcc.y, from_hcc.y, rtol=2e-3)
assert_quantity_allclose(to_hcc.z, from_hcc.x, rtol=2e-3)
def test_hcc_hgs_observer_mismatch():
# Test whether the transformation gives the same answer regardless of what obstime the observer
# coordinate is represented in
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')
observer2 = observer1.transform_to(HeliographicStonyhurst(obstime='2001-03-31'))
hcc1 = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer1, obstime=observer1.obstime)
hgs1 = hcc1.transform_to(HeliographicStonyhurst(obstime=hcc1.obstime))
hcc2 = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer2, obstime=observer1.obstime)
hgs2 = hcc2.transform_to(HeliographicStonyhurst(obstime=hcc2.obstime))
assert_quantity_allclose(hgs1.lon, hgs2.lon)
assert_quantity_allclose(hgs1.lat, hgs2.lat)
assert_quantity_allclose(hgs1.radius, hgs2.radius)
def test_hgs_hcc_observer_mismatch():
# Test whether the transformation gives the same answer regardless of what obstime the observer
# coordinate is represented in
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')
observer2 = observer1.transform_to(HeliographicStonyhurst(obstime='2001-03-31'))
hgs = HeliographicStonyhurst(20*u.deg, 40*u.deg, 0.5*u.AU, obstime=observer1.obstime)
hcc1 = hgs.transform_to(Heliocentric(observer=observer1, obstime=hgs.obstime))
hcc2 = hgs.transform_to(Heliocentric(observer=observer2, obstime=hgs.obstime))
assert_quantity_allclose(hcc1.cartesian.xyz, hcc2.cartesian.xyz)
def test_hgs_hcrs_sunspice():
# Compare our HGS->HCRS transformation against SunSPICE by transforming beyond it
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
# "HAE" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0
#
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HAE', /au, /degrees
# IDL> print, coord
# 1.0000000 -108.65371 10.642778
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(HeliocentricMeanEcliptic)
assert_quantity_allclose(new.lon, Longitude(-108.65371*u.deg), atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.642778*u.deg, atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.radius)
# Transform to HAE precessed to the mean ecliptic of date instead of J2000.0
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HAE', /precess, /au, /degrees
# IDL> print, coord
# 1.0000000 -108.38240 10.640314
new = old.transform_to(HeliocentricMeanEcliptic(equinox='2019-06-01'))
assert_quantity_allclose(new.lon, Longitude(-108.38240*u.deg), atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.640314*u.deg, atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.radius)
def test_hgs_hgc_sunspice():
# Compare our HGS->HGC transformation against SunSPICE
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
# "Carrington" does not include light travel time to the observer, which our HGC includes
#
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'Carrington', /au, /degrees
# IDL> print, coord
# 1.0000000 16.688242 10.000000
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(HeliographicCarrington(observer='earth'))
# Calculate the difference in longitude due to light travel time from the Sun to the Earth
delta_lon = sidereal_rotation_rate * (sun.earth_distance(old.obstime) - _RSUN) / speed_of_light
assert_quantity_allclose(new.lon, 16.688242*u.deg + delta_lon, atol=1e-2*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.radius, old.radius)
def test_hgs_hcc_sunspice():
# Compare our HGS->HCC transformation against SunSPICE
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
# "HGRTN" is equivalent to our Heliocentric, but with the axes permuted
# SunSPICE, like us, assumes an Earth observer if not explicitly specified
#
# IDL> coord = [7d5, 8d5, 9d5]
# IDL> convert_sunspice_coord, '2019-06-01', coord, 'HEQ', 'HGRTN'
# Assuming Earth observation
# IDL> print, coord
# 688539.32 800000.00 908797.89
old = SkyCoord(CartesianRepresentation([7e5, 8e5, 9e5]*u.km),
frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(Heliocentric(observer='earth'))
assert_quantity_allclose(new.x, 800000.00*u.km, atol=1e-2*u.km)
assert_quantity_allclose(new.y, 908797.89*u.km, atol=1e-2*u.km)
assert_quantity_allclose(new.z, 688539.32*u.km, atol=1e-2*u.km)
def test_hpc_hgs_implicit_hcc():
# An HPC->HGS transformation should give the same answer whether the transformation step
# through HCC is implicit or explicit
start = SkyCoord(0*u.arcsec, 0*u.arcsec, 0.5*u.AU,
frame=Helioprojective(obstime='2019-06-01', observer='earth'))
frame = HeliographicStonyhurst(obstime='2019-12-01')
implicit = start.transform_to(frame)
explicit1 = start.transform_to(Heliocentric(obstime=start.obstime, observer='earth')).\
transform_to(frame)
explicit2 = start.transform_to(Heliocentric(obstime=frame.obstime, observer='earth')).\
transform_to(frame)
assert_quantity_allclose(implicit.separation_3d(explicit1), 0*u.AU, atol=1e-10*u.AU)
assert_quantity_allclose(implicit.separation_3d(explicit2), 0*u.AU, atol=1e-10*u.AU)
@pytest.mark.skipif(astropy.__version__ < '3.2.0', reason="Not supported by Astropy <3.2")
def test_velocity_hcrs_hgs():
# Obtain the position/velocity of Earth in ICRS
obstime = Time(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])
pos, vel = get_body_barycentric_posvel('earth', obstime)
loc = pos.with_differentials(vel.represent_as(CartesianDifferential))
earth = SkyCoord(loc, frame='icrs', obstime=obstime)
# The velocity of Earth in HGS should be very close to zero. The velocity in the HGS Y
# direction is slightly further away from zero because there is true latitudinal motion.
new = earth.heliographic_stonyhurst
assert_quantity_allclose(new.velocity.d_x, 0*u.km/u.s, atol=1e-15*u.km/u.s)
assert_quantity_allclose(new.velocity.d_y, 0*u.km/u.s, atol=1e-14*u.km/u.s)
assert_quantity_allclose(new.velocity.d_x, 0*u.km/u.s, atol=1e-15*u.km/u.s)
# Test the loopback to ICRS
newer = new.icrs
assert_quantity_allclose(newer.velocity.d_x, vel.x)
assert_quantity_allclose(newer.velocity.d_y, vel.y)
assert_quantity_allclose(newer.velocity.d_z, vel.z)
def test_velocity_hgs_hgc():
# Construct a simple HGS coordinate with zero velocity
obstime = Time(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])
pos = CartesianRepresentation(1, 0, 0)*u.AU
vel = CartesianDifferential(0, 0, 0)*u.km/u.s
loc = (pos.with_differentials(vel))._apply('repeat', obstime.size)
coord = SkyCoord(HeliographicStonyhurst(loc, obstime=obstime))
# The induced velocity in HGC should be entirely longitudinal, and approximately equal to one
# full rotation every mean synodic period (27.2753 days)
hgc_frame = HeliographicCarrington(observer='earth', obstime=obstime)
new = coord.transform_to(hgc_frame)
new_vel = new.data.differentials['s'].represent_as(SphericalDifferential, new.data)
assert_quantity_allclose(new_vel.d_lon, -360*u.deg / (27.27253*u.day), rtol=1e-2)
assert_quantity_allclose(new_vel.d_lat, 0*u.deg/u.s)
assert_quantity_allclose(new_vel.d_distance, 0*u.km/u.s, atol=1e-7*u.km/u.s)
def test_hme_hee_sunspice():
# Compare our HME->HEE transformation against SunSPICE
# "HAE" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0
#
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'HEE', /au, /degrees
# IDL> print, coord
# 1.0000000 110.01610 10.000300
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricMeanEcliptic(obstime='2019-06-01'))
new = old.transform_to(HeliocentricEarthEcliptic)
assert_quantity_allclose(new.lon, Longitude(110.01610*u.deg), atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.000300*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.distance)
# Transform from HAE precessed to the mean ecliptic of date instead of J2000.0
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'HEE', /au, /degrees, /precess
# IDL> print, coord
# 1.0000000 109.74535 10.000070
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricMeanEcliptic(obstime='2019-06-01',
equinox='2019-06-01'))
new = old.transform_to(HeliocentricEarthEcliptic)
assert_quantity_allclose(new.lon, Longitude(109.74535*u.deg), atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.000070*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.distance)
def test_hee_hee():
# Test HEE loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricEarthEcliptic(obstime=obstime))
new = old.transform_to(HeliocentricEarthEcliptic)
assert_quantity_allclose(new.lon, old.lon)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.distance)
new = old.transform_to(HeliocentricEarthEcliptic(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, old.lon - 1*u.deg, atol=0.1*u.deg) # due to Earth motion
assert_quantity_allclose(new.lat, old.lat, atol=0.5*u.arcsec)
assert_quantity_allclose(new.distance, old.distance, rtol=1e-5)
def test_hee_gse_sunspice():
# Compare our HEE->GSE transformation against SunSPICE
#
# IDL> coord = [0.7d, -20.d, 10.d]
# IDL> convert_sunspice_coord, '2019-06-01', coord, 'HEE', 'GSE', /au, /degrees
# IDL> print, coord
# 0.45215884 32.777377 15.594639
old = SkyCoord(-20*u.deg, 10*u.deg, 0.7*u.AU,
frame=HeliocentricEarthEcliptic(obstime='2019-06-01'))
new = old.geocentricsolarecliptic
assert_quantity_allclose(new.lon, 32.777377*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 15.594639*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, 0.45215884*u.AU)
def test_gse_gse():
# Test GSE loopback transformation
old = SkyCoord(90*u.deg, 10*u.deg, 0.7*u.AU,
frame=GeocentricSolarEcliptic(obstime='2001-01-01'))
new = old.transform_to(GeocentricSolarEcliptic)
assert_quantity_allclose(new.lon, old.lon)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.distance)
def test_hgs_hci_sunspice():
# Compare our HGS->HCI transformation against SunSPICE
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
#
# IDL> coord = [1.d, 120.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HCI', /au, /degrees
# IDL> print, coord
# 1.0000000 -65.736793 10.000000
old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(HeliocentricInertial)
assert_quantity_allclose(new.lon, -65.736793*u.deg, atol=0.5*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.radius)
def test_hci_hci():
# Test HCI loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 0.7*u.AU, frame=HeliocentricInertial(obstime=obstime))
new = old.transform_to(HeliocentricInertial)
assert_quantity_allclose(new.lon, old.lon)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.distance)
new = old.transform_to(HeliocentricInertial(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, old.lon, atol=0.1*u.deg) # due to Earth motion
assert_quantity_allclose(new.lat, old.lat, atol=1e-3*u.deg)
assert_quantity_allclose(new.distance, old.distance, atol=1e-5*u.AU)
def test_hme_gei_sunspice():
# Compare our HME->GEI transformation against SunSPICE
# "HAE" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0
#
# IDL> coord = [1.d, 120.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'GEI', /au, /degrees
# IDL> print, coord
# 1.8197210 95.230617 28.830109
old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU,
frame=HeliocentricMeanEcliptic(obstime='2019-06-01'))
new = old.transform_to(GeocentricEarthEquatorial)
assert_quantity_allclose(new.lon, Longitude(95.230617*u.deg), atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 28.830109*u.deg, atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, 1.8197210*u.AU)
# Transform from HAE precessed to the mean ecliptic of date instead of J2000.0
# IDL> coord = [1.d, 120.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'GEI', /au, /degrees, /precess
# IDL> print, coord
# 1.8217103 95.079030 28.827750
old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU,
frame=HeliocentricMeanEcliptic(obstime='2019-06-01', equinox='2019-06-01'))
new = old.transform_to(GeocentricEarthEquatorial(equinox=_J2000))
assert_quantity_allclose(new.lon, Longitude(95.079030*u.deg), atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 28.827750*u.deg, atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, 1.8217103*u.AU)
def test_gei_gei():
# Test GEI loopback transformation using the 2017 revision to Franz & Harper 2002
t = Time('1996-08-28 16:46:00', scale='tt')
gei_j2000 = CartesianRepresentation([-5.7840451, -4.1082375, 1.9146822] * (6378.14*u.km))
gei_d = CartesianRepresentation([-5.7864918, -4.1039136, 1.9165612] * (6378.14*u.km))
old = SkyCoord(gei_j2000, frame=GeocentricEarthEquatorial(obstime=t))
new = old.transform_to(GeocentricEarthEquatorial(equinox=t, obstime=t)).cartesian
assert_quantity_allclose(new.xyz, gei_d.xyz)
def test_no_observer():
# Tests transformations to and from observer-based frames with no observer defined
frames_in = [Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None, obstime='2001-01-01'),
Helioprojective(0*u.deg, 0*u.deg, observer=None),
Helioprojective(0*u.deg, 0*u.deg, observer=None, obstime='2001-01-01')]
frames_out = frames_in + [
HeliographicStonyhurst(0*u.deg, 0*u.deg, obstime=None),
HeliographicStonyhurst(0*u.deg, 0*u.deg, obstime='2001-01-01'),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None, obstime='2012-12-12'),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer="earth", obstime=None),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer="earth", obstime='2001-01-01'),
Helioprojective(0*u.deg, 0*u.deg, observer=None, obstime='2012-12-12'),
Helioprojective(0*u.deg, 0*u.deg, observer="earth", obstime=None),
Helioprojective(0*u.deg, 0*u.deg, observer="earth", obstime='2001-01-01')]
# Self-transformations should succeed
for f in frames_in:
f.transform_to(f.replicate_without_data())
# All other transformations should error
for i, f1 in enumerate(frames_in):
for f2 in frames_out[i + 1:]:
with pytest.raises(ConvertError):
f1.transform_to(f2)
with pytest.raises(ConvertError):
f2.transform_to(f1)
def test_array_obstime():
# Validate that you can transform from an array of obstimes to no obstimes,
# or different obstimes.
a = SkyCoord([10]*2, [10]*2, unit=u.deg,
observer="earth",
obstime=["2019-01-01", "2019-01-02"],
frame="heliographic_carrington")
t = a.transform_to(Helioprojective)
assert isinstance(t.frame, Helioprojective)
t2 = a.transform_to(Helioprojective(obstime=["2019-01-03", "2019-01-04"]))
assert isinstance(t2.frame, Helioprojective)
_frames_wo_observer = [HeliographicStonyhurst, HeliocentricInertial,
HeliocentricEarthEcliptic, GeocentricSolarEcliptic,
GeocentricEarthEquatorial]
@pytest.mark.parametrize("frame_class", _frames_wo_observer)
def test_convert_error_with_no_obstime(frame_class):
# For most transformations, we do not allow `obstime` to be `None`
frame = frame_class(CartesianRepresentation(0, 0, 0)*u.km, obstime=None)
with pytest.raises(ConvertError, match=r".*obstime.*"):
ICRS(0*u.deg, 0*u.deg, 0*u.AU).transform_to(frame)
with pytest.raises(ConvertError, match=r".*obstime.*"):
frame.transform_to(ICRS)
# Convenience function to check whether a transformation succeeds if the target `obstime` is `None`
def assert_no_obstime_on_target_end(start_class, end_class):
start_obstime = Time("2001-01-01")
if hasattr(start_class, 'observer'):
coord = start_class(CartesianRepresentation(0, 0, 0)*u.km,
obstime=start_obstime, observer="earth")
else:
coord = start_class(CartesianRepresentation(0, 0, 0)*u.km, obstime=start_obstime)
result = coord.transform_to(end_class)
assert result.obstime == start_obstime
# We currently allow the target `obstime` to be `None` for the transformation subgraph
# below `HeliographicStonyhurst`, but this may change in the future
_frameset1 = [HeliographicStonyhurst, HeliocentricInertial]
_frameset2 = [HeliographicCarrington, Heliocentric, Helioprojective]
@pytest.mark.parametrize("start_class", _frameset1 + _frameset2)
@pytest.mark.parametrize("end_class", _frameset1)
def test_no_obstime_on_target_end_hgs_subgraph(start_class, end_class):
assert_no_obstime_on_target_end(start_class, end_class)
# We currently allow the target `obstime` to be `None` for the transformation subgraph
# below `HeliocentricEarthEcliptic`, but this may change in the future
_frameset3 = [HeliocentricEarthEcliptic, GeocentricSolarEcliptic]
@pytest.mark.parametrize("start_class", _frameset3)
@pytest.mark.parametrize("end_class", _frameset3)
def test_no_obstime_on_target_end_hee_subgraph(start_class, end_class):
assert_no_obstime_on_target_end(start_class, end_class)
def test_transform_with_sun_center():
sun_center = SkyCoord(0*u.deg, 0*u.deg, 0*u.AU,
frame=HeliographicStonyhurst(obstime="2001-01-01"))
with transform_with_sun_center():
result1 = sun_center.transform_to(HeliographicStonyhurst(obstime="2001-02-01"))
# The coordinate should stay pointing at Sun center
assert_quantity_allclose(result1.lon, sun_center.lon)
assert_quantity_allclose(result1.lat, sun_center.lat)
assert_quantity_allclose(result1.radius, sun_center.radius)
other = SkyCoord(10*u.deg, 20*u.deg, 1*u.AU,
frame=HeliographicStonyhurst(obstime="2001-01-01"))
with transform_with_sun_center():
result2 = other.transform_to(HeliographicCarrington(observer='earth', obstime="2001-02-01"))
# The coordinate should stay at the same latitude and the same distance from Sun center
assert_quantity_allclose(result2.lat, other.lat)
assert_quantity_allclose(result2.radius, other.radius)
def test_transform_with_sun_center_reset():
# This test sequence ensures that the context manager resets propoerly
sun_center = SkyCoord(0*u.deg, 0*u.deg, 0*u.AU,
frame=HeliographicStonyhurst(obstime="2001-01-01"))
end_frame = HeliocentricInertial(obstime="2001-02-01")
# Without the context manager, the coordinate should not point at Sun center
result1 = sun_center.transform_to(end_frame)
assert result1.lon != sun_center.lon
assert result1.lat != sun_center.lat
assert result1.distance != sun_center.radius
# Using the context manager, the coordinate should point at Sun center
with transform_with_sun_center():
result2 = sun_center.transform_to(end_frame)
assert_quantity_allclose(result2.lon, sun_center.lon)
assert_quantity_allclose(result2.lat, sun_center.lat)
assert_quantity_allclose(result2.distance, sun_center.radius)
# Exiting a nested context manager should not affect the outer context manager
with transform_with_sun_center():
with transform_with_sun_center():
pass
result2a = sun_center.transform_to(end_frame)
assert_quantity_allclose(result2a.lon, result2.lon)
assert_quantity_allclose(result2a.lat, result2.lat)
assert_quantity_allclose(result2a.distance, result2.distance)
# After the context manager, the coordinate should have the same result as the first transform
result3 = sun_center.transform_to(end_frame)
assert_quantity_allclose(result3.lon, result1.lon)
assert_quantity_allclose(result3.lat, result1.lat)
assert_quantity_allclose(result3.distance, result1.distance)
|
the-stack_106_16135
|
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Callable, List
from skdecide.domains import Domain, PipeParallelDomain, ShmParallelDomain
__all__ = ["ParallelSolver"]
class ParallelSolver:
"""A solver must inherit this class if it wants to call several cloned parallel domains in separate concurrent processes.
The solver is meant to be called either within a 'with' context statement, or to be cleaned up using the close() method.
"""
def __init__(
self,
domain_factory: Callable[[], Domain],
parallel: bool = False,
shared_memory_proxy=None,
):
"""Creates a parallelizable solver
# Parameters
domain_factory: A callable with no argument returning the domain to solve (factory is the domain class if None).
parallel: True if the solver is run in parallel mode.
shared_memory_proxy: Shared memory proxy to use if not None, otherwise run piped parallel domains.
"""
self._domain_factory = domain_factory
self._parallel = parallel
self._shared_memory_proxy = shared_memory_proxy
self._domain = None
self._lambdas = [] # to define in the inherited class!
self._ipc_notify = False # to define in the inherited class!
def _initialize(self):
"""Launches the parallel domains.
This method requires to have previously recorded the self._domain_factory (e.g. after calling _init_solve),
the set of lambda functions passed to the solver's constructor (e.g. heuristic lambda for heuristic-based solvers),
and whether the parallel domain jobs should notify their status via the IPC protocol (required when interacting with
other programming languages like C++)
"""
if self._parallel:
if self._shared_memory_proxy is None:
self._domain = PipeParallelDomain(
self._domain_factory,
lambdas=self._lambdas,
ipc_notify=self._ipc_notify,
)
else:
self._domain = ShmParallelDomain(
self._domain_factory,
self._shared_memory_proxy,
lambdas=self._lambdas,
ipc_notify=self._ipc_notify,
)
# Launch parallel domains before created the algorithm object
# otherwise spawning new processes (the default on Windows)
# will fail trying to pickle the C++ underlying algorithm
self._domain._launch_processes()
else:
self._domain = self._domain_factory()
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._domain is not None and self._parallel:
self._domain.close()
self._domain = None
def _cleanup(self):
self.close()
def get_domain(self):
"""
Returns the domain, optionally creating a parallel domain if not already created.
"""
if self._domain is None:
self._initialize()
return self._domain
def call_domain_method(self, name, *args):
"""Calls a parallel domain's method.
This is the only way to get a domain method for a parallel domain.
"""
if self._parallel:
process_id = getattr(self._domain, name)(*args)
return self._domain.get_result(process_id)
else:
return getattr(self._domain, name)(*args)
|
the-stack_106_16136
|
sidade = 0
hidade = 0
conth = 0
contm = 0
nomeh = ''
for c in range(1, 5):
nome = str(input(f'=-=-=-=-=- DADOS {c}ª PESSOA -=-=-=-=-='
f'Digite o nome da pessoa: ')).lower().strip()
idade = int(input('Digite a idade da pessoa: '))
sexo = str(input('Digite o sexo da pessoa: ')).lower().strip()
if sexo == "m":
conth += 1
if conth == 1:
hidade = idade
nomeh = nome
else:
if hidade < idade:
hidade = idade
nomeh = nome
if sexo == "f" and idade < 20:
contm += 1
sidade += idade
print(f'A média de idade do grupo é {(sidade / 4) :.0f}.\n'
f'O homem mais velho do grupo é {nomeh} com {hidade} anos.\n'
f'O numero de mulheres com menos de 20 anos é {contm}.')
|
the-stack_106_16137
|
import logging
from typing import Any, Dict, List, Optional
import hummingbot.connector.exchange.ascend_ex.ascend_ex_constants as constants
from hummingbot.connector.exchange.ascend_ex.ascend_ex_order_book_message import AscendExOrderBookMessage
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage, OrderBookMessageType
from hummingbot.logger import HummingbotLogger
_logger = None
class AscendExOrderBook(OrderBook):
@classmethod
def logger(cls) -> HummingbotLogger:
global _logger
if _logger is None:
_logger = logging.getLogger(__name__)
return _logger
@classmethod
def snapshot_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: float,
metadata: Optional[Dict] = None):
"""
Convert json snapshot data into standard OrderBookMessage format
:param msg: json snapshot data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: AscendExOrderBookMessage
"""
if metadata:
msg.update(metadata)
return AscendExOrderBookMessage(
message_type=OrderBookMessageType.SNAPSHOT,
content=msg,
timestamp=timestamp
)
@classmethod
def diff_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None):
"""
Convert json diff data into standard OrderBookMessage format
:param msg: json diff data from live web socket stream
:param timestamp: timestamp attached to incoming data
:param metadata: a dictionary with extra information to add to diff message
:return: AscendExOrderBookMessage
"""
if metadata:
msg.update(metadata)
return AscendExOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp
)
@classmethod
def trade_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None):
"""
Creates a trade message with the information from the trade event sent by the exchange
:param msg: the trade event details sent by the exchange
:param timestamp: timestamp attached to incoming data
:param metadata: a dictionary with extra information to add to trade message
:return: a trade message with the details of the trade as provided by the exchange
"""
if metadata:
msg.update(metadata)
msg.update({
"exchange_order_id": msg.get("seqnum"),
"trade_type": "buy" if msg.get("bm") else "sell",
"price": msg.get("p"),
"amount": msg.get("q"),
})
return AscendExOrderBookMessage(
message_type=OrderBookMessageType.TRADE,
content=msg,
timestamp=timestamp
)
@classmethod
def from_snapshot(cls, snapshot: OrderBookMessage):
raise NotImplementedError(constants.EXCHANGE_NAME + " order book needs to retain individual order data.")
@classmethod
def restore_from_snapshot_and_diffs(cls, snapshot: OrderBookMessage, diffs: List[OrderBookMessage]):
raise NotImplementedError(constants.EXCHANGE_NAME + " order book needs to retain individual order data.")
|
the-stack_106_16138
|
import copy
import pprint
from typing import Any, Dict, List, Tuple, Optional, Sequence, TYPE_CHECKING
import numpy as np
from ..constants import TYPE, INTENSITY
from .image import Image
from ..utils import get_subclasses
if TYPE_CHECKING:
from ..transforms import Transform, Compose
class Subject(dict):
"""Class to store information about the images corresponding to a subject.
Args:
*args: If provided, a dictionary of items.
**kwargs: Items that will be added to the subject sample.
Example:
>>> import torchio as tio
>>> # One way:
>>> subject = tio.Subject(
... one_image=tio.ScalarImage('path_to_image.nii.gz'),
... a_segmentation=tio.LabelMap('path_to_seg.nii.gz'),
... age=45,
... name='John Doe',
... hospital='Hospital Juan Negrín',
... )
>>> # If you want to create the mapping before, or have spaces in the keys:
>>> subject_dict = {
... 'one image': tio.ScalarImage('path_to_image.nii.gz'),
... 'a segmentation': tio.LabelMap('path_to_seg.nii.gz'),
... 'age': 45,
... 'name': 'John Doe',
... 'hospital': 'Hospital Juan Negrín',
... }
>>> subject = tio.Subject(subject_dict)
"""
def __init__(self, *args, **kwargs: Dict[str, Any]):
if args:
if len(args) == 1 and isinstance(args[0], dict):
kwargs.update(args[0])
else:
message = (
'Only one dictionary as positional argument is allowed')
raise ValueError(message)
super().__init__(**kwargs)
self._parse_images(self.get_images(intensity_only=False))
self.update_attributes() # this allows me to do e.g. subject.t1
self.applied_transforms = []
def __repr__(self):
num_images = len(self.get_images(intensity_only=False))
string = (
f'{self.__class__.__name__}'
f'(Keys: {tuple(self.keys())}; images: {num_images})'
)
return string
def __copy__(self):
result_dict = {}
for key, value in self.items():
if isinstance(value, Image):
value = copy.copy(value)
else:
value = copy.deepcopy(value)
result_dict[key] = value
new = Subject(result_dict)
new.applied_transforms = self.applied_transforms[:]
return new
def __len__(self):
return len(self.get_images(intensity_only=False))
@staticmethod
def _parse_images(images: List[Tuple[str, Image]]) -> None:
# Check that it's not empty
if not images:
raise ValueError('A subject without images cannot be created')
@property
def shape(self):
"""Return shape of first image in subject.
Consistency of shapes across images in the subject is checked first.
Example::
>>> import torchio as tio
>>> colin = tio.datasets.Colin27()
>>> colin.shape
(1, 181, 217, 181)
"""
self.check_consistent_attribute('shape')
return self.get_first_image().shape
@property
def spatial_shape(self):
"""Return spatial shape of first image in subject.
Consistency of spatial shapes across images in the subject is checked
first.
Example::
>>> import torchio as tio
>>> colin = tio.datasets.Colin27()
>>> colin.shape
(181, 217, 181)
"""
self.check_consistent_spatial_shape()
return self.get_first_image().spatial_shape
@property
def spacing(self):
"""Return spacing of first image in subject.
Consistency of spacings across images in the subject is checked first.
Example::
>>> import torchio as tio
>>> colin = tio.datasets.Slicer()
>>> colin.shape
(1.0, 1.0, 1.2999954223632812)
"""
self.check_consistent_attribute('spacing')
return self.get_first_image().spacing
@property
def history(self):
# Kept for backwards compatibility
return self.get_applied_transforms()
def is_2d(self):
return all(i.is_2d() for i in self.get_images(intensity_only=False))
def get_applied_transforms(
self,
ignore_intensity: bool = False,
image_interpolation: Optional[str] = None,
) -> List['Transform']:
from ..transforms.transform import Transform
from ..transforms.intensity_transform import IntensityTransform
name_to_transform = {
cls.__name__: cls
for cls in get_subclasses(Transform)
}
transforms_list = []
for transform_name, arguments in self.applied_transforms:
transform = name_to_transform[transform_name](**arguments)
if ignore_intensity and isinstance(transform, IntensityTransform):
continue
resamples = hasattr(transform, 'image_interpolation')
if resamples and image_interpolation is not None:
parsed = transform.parse_interpolation(image_interpolation)
transform.image_interpolation = parsed
transforms_list.append(transform)
return transforms_list
def get_composed_history(
self,
ignore_intensity: bool = False,
image_interpolation: Optional[str] = None,
) -> 'Compose':
from ..transforms.augmentation.composition import Compose
transforms = self.get_applied_transforms(
ignore_intensity=ignore_intensity,
image_interpolation=image_interpolation,
)
return Compose(transforms)
def get_inverse_transform(
self,
warn: bool = True,
ignore_intensity: bool = True,
image_interpolation: Optional[str] = None,
) -> 'Compose':
"""Get a reversed list of the inverses of the applied transforms.
Args:
warn: Issue a warning if some transforms are not invertible.
ignore_intensity: If ``True``, all instances of
:class:`~torchio.transforms.intensity_transform.IntensityTransform`
will be ignored.
image_interpolation: Modify interpolation for scalar images inside
transforms that perform resampling.
"""
history_transform = self.get_composed_history(
ignore_intensity=ignore_intensity,
image_interpolation=image_interpolation,
)
inverse_transform = history_transform.inverse(warn=warn)
return inverse_transform
def apply_inverse_transform(self, **kwargs) -> 'Subject':
"""Try to apply the inverse of all applied transforms, in reverse order.
Args:
**kwargs: Keyword arguments passed on to
:meth:`~torchio.data.subject.Subject.get_inverse_transform`.
"""
inverse_transform = self.get_inverse_transform(**kwargs)
transformed = inverse_transform(self)
transformed.clear_history()
return transformed
def clear_history(self) -> None:
self.applied_transforms = []
def check_consistent_attribute(self, attribute: str) -> None:
values_dict = {}
iterable = self.get_images_dict(intensity_only=False).items()
for image_name, image in iterable:
values_dict[image_name] = getattr(image, attribute)
num_unique_values = len(set(values_dict.values()))
if num_unique_values > 1:
message = (
f'More than one {attribute} found in subject images:'
f'\n{pprint.pformat(values_dict)}'
)
raise RuntimeError(message)
def check_consistent_spatial_shape(self) -> None:
self.check_consistent_attribute('spatial_shape')
def check_consistent_orientation(self) -> None:
self.check_consistent_attribute('orientation')
def check_consistent_affine(self):
# https://github.com/fepegar/torchio/issues/354
affine = None
first_image = None
iterable = self.get_images_dict(intensity_only=False).items()
for image_name, image in iterable:
if affine is None:
affine = image.affine
first_image = image_name
elif not np.allclose(affine, image.affine, rtol=1e-6, atol=1e-6):
message = (
f'Images "{first_image}" and "{image_name}" do not occupy'
' the same physical space.'
f'\nAffine of "{first_image}":'
f'\n{pprint.pformat(affine)}'
f'\nAffine of "{image_name}":'
f'\n{pprint.pformat(image.affine)}'
)
raise RuntimeError(message)
def check_consistent_space(self):
self.check_consistent_spatial_shape()
self.check_consistent_affine()
def get_images_dict(
self,
intensity_only=True,
include: Optional[Sequence[str]] = None,
exclude: Optional[Sequence[str]] = None,
) -> Dict[str, Image]:
images = {}
for image_name, image in self.items():
if not isinstance(image, Image):
continue
if intensity_only and not image[TYPE] == INTENSITY:
continue
if include is not None and image_name not in include:
continue
if exclude is not None and image_name in exclude:
continue
images[image_name] = image
return images
def get_images(
self,
intensity_only=True,
include: Optional[Sequence[str]] = None,
exclude: Optional[Sequence[str]] = None,
) -> List[Image]:
images_dict = self.get_images_dict(
intensity_only=intensity_only,
include=include,
exclude=exclude,
)
return list(images_dict.values())
def get_first_image(self) -> Image:
return self.get_images(intensity_only=False)[0]
# flake8: noqa: F821
def add_transform(
self,
transform: 'Transform',
parameters_dict: dict,
) -> None:
self.applied_transforms.append((transform.name, parameters_dict))
def load(self) -> None:
"""Load images in subject on RAM."""
for image in self.get_images(intensity_only=False):
image.load()
def update_attributes(self) -> None:
# This allows to get images using attribute notation, e.g. subject.t1
self.__dict__.update(self)
def add_image(self, image: Image, image_name: str) -> None:
"""Add an image."""
self[image_name] = image
self.update_attributes()
def remove_image(self, image_name: str) -> None:
"""Remove an image."""
del self[image_name]
delattr(self, image_name)
def plot(self, **kwargs) -> None:
"""Plot images using matplotlib.
Args:
**kwargs: Keyword arguments that will be passed on to
:meth:`~torchio.Image.plot`.
"""
from ..visualization import plot_subject # avoid circular import
plot_subject(self, **kwargs)
|
the-stack_106_16140
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""This object is returned when an error occurs in the Maps API.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: If available, a human readable description of the error.
:vartype message: str
:ivar target: If available, the component generating the error.
:vartype target: str
:ivar details: If available, a list of additional details about the error.
:vartype details: list[~azure.mgmt.maps.models.ErrorDetailsItem]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetailsItem]'},
}
def __init__(self, **kwargs):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
|
the-stack_106_16143
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Matilda Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import MatildaTestFramework
from test_framework.test_node import ErrorMatch
class LoggingTest(MatildaTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def relative_log_path(self, name):
return os.path.join(self.nodes[0].datadir, self.chain, name)
def run_test(self):
# test default log file name
default_log_path = self.relative_log_path("debug.log")
assert os.path.isfile(default_log_path)
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(self.relative_log_path("foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = self.relative_log_path("foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
exp_stderr = r"Error: Could not open debug log file \S+$"
self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % (invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % invalidname], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that -nodebuglogfile disables logging
self.stop_node(0)
os.unlink(default_log_path)
assert not os.path.isfile(default_log_path)
self.start_node(0, ["-nodebuglogfile"])
assert not os.path.isfile(default_log_path)
# just sanity check no crash here
self.restart_node(0, ["-debuglogfile=%s" % os.devnull])
if __name__ == '__main__':
LoggingTest().main()
|
the-stack_106_16144
|
from collections.abc import Collection, Iterable, Iterator, Sequence
from typing import Any, Generic, TypeVar, overload
T = TypeVar('T')
S = TypeVar('S', bound=Sequence)
def _get_slice_value(o: Any, if_none: int) -> int:
if o is None:
return if_none
if isinstance(o, int):
return o
if (idx := getattr(o, '__index__', None)) and callable(idx) and isinstance(i := idx(), int):
# noinspection PyUnboundLocalVariable
return i
raise TypeError(f'slice indices must be integers or None or have an __index__ method, got {o!r}')
class CyclicSequence(Generic[T]):
__slots__ = ()
def _convert_index(self, i: int, /) -> int:
return i % len(self)
def indices_between(self, start: int, stop: int, step: int = 1, /) -> Iterator[int]:
if step == 0:
raise ValueError('step cannot be zero')
if step > 0:
compare = int.__lt__
else:
compare = int.__gt__
if compare(start, stop):
start_idx = self._convert_index(start)
yield start_idx
start += step
# yield indices until the first yielded index is met again
while compare(start, stop) and (y := self._convert_index(start)) != start_idx:
# noinspection PyUnboundLocalVariable
yield y
start += step
def _slice_indices(self, s: slice, /) -> Iterator[int]:
start = _get_slice_value(s.start, 0)
stop = _get_slice_value(s.stop, len(self))
step = _get_slice_value(s.step, 1)
return self.indices_between(start, stop, step)
def _check_emptiness(self, /):
if len(self) == 0:
raise IndexError(f'{self.__class__.__name__} is empty')
@overload
def __getitem__(self, index: int, /) -> T: ...
@overload
def __getitem__(self: S, indices: slice, /) -> S: ...
def __getitem__(self, item, /):
self._check_emptiness()
if isinstance(item, int):
return super().__getitem__(self._convert_index(item))
if isinstance(item, slice):
getitem = super().__getitem__
# noinspection PyArgumentList
return self.__class__(getitem(i) for i in self._slice_indices(item))
raise TypeError(f'{self.__class__.__name__} indices must be integers or slices, '
f'not {item.__class__.__name__}')
def index(self, value: T, start: int = 0, stop: int = None) -> int:
if stop is None:
stop = len(self)
if len(self) != 0:
getitem = super().__getitem__
for i in self.indices_between(start, stop):
v = getitem(i)
if v is value or v == value:
return i
raise ValueError(f'{value!r} is not in {self.__class__.__name__}')
class MutableCyclicSequence(CyclicSequence[T]):
__slots__ = ()
@overload
def __setitem__(self, index: int, value: T, /): ...
@overload
def __setitem__(self, indices: slice, values: Iterable[T], /): ...
def __setitem__(self, item, value, /):
self._check_emptiness()
if isinstance(item, int):
super().__setitem__(self._convert_index(item), value)
elif isinstance(item, slice):
indices = tuple(self._slice_indices(item))
setitem = super().__setitem__
if not isinstance(value, Collection):
value = tuple(value)
if len(indices) != len(value):
raise ValueError(f'slice and value have different lengths, {len(indices)} and {len(value)}')
for i, v in zip(indices, value):
setitem(i, v)
else:
raise TypeError(f'{self.__class__.__name__} indices must be integers or slices, '
f'not {item.__class__.__name__}')
@overload
def __delitem__(self, index: int, /): ...
@overload
def __delitem__(self, indices: slice, /): ...
def __delitem__(self, item, /):
self._check_emptiness()
if isinstance(item, int):
super().__delitem__(self._convert_index(item))
elif isinstance(item, slice):
indices = set(self._slice_indices(item))
delitem = super().__delitem__
for i in range(len(self) - 1, -1, -1):
if i not in indices:
delitem(i)
else:
raise TypeError(f'{self.__class__.__name__} indices must be integers or slices, '
f'not {item.__class__.__name__}')
class CyclicTuple(CyclicSequence, tuple):
__slots__ = ()
class CyclicList(MutableCyclicSequence, list):
__slots__ = ()
|
the-stack_106_16145
|
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from unittest.mock import Mock
from cibyl.cli.output import OutputStyle
from cibyl.outputs.cli.ci.env.factory import CIPrinterFactory
from cibyl.utils.colors import ClearText, DefaultPalette
class TestCIPrinterFactory(TestCase):
def test_unknown_style(self):
query = Mock()
verbosity = Mock()
factory = CIPrinterFactory()
with self.assertRaises(NotImplementedError):
factory.from_style(-1, query, verbosity)
def test_returns_clear_text_printer(self):
query = Mock()
verbosity = Mock()
factory = CIPrinterFactory()
result = factory.from_style(OutputStyle.TEXT, query, verbosity)
self.assertEqual(query, result.query)
self.assertEqual(verbosity, result.verbosity)
self.assertIsInstance(result.palette, ClearText)
def test_returns_colored_text_printer(self):
query = Mock()
verbosity = Mock()
factory = CIPrinterFactory()
result = factory.from_style(OutputStyle.COLORIZED, query, verbosity)
self.assertEqual(query, result.query)
self.assertEqual(verbosity, result.verbosity)
self.assertIsInstance(result.palette, DefaultPalette)
|
the-stack_106_16149
|
"""
The command line interface. Trains a directory of data.
"""
from .configuration import Configuration
from .inputs import *
from hypergan.gan_component import ValidationException
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.process_manager import ProcessManager
from hypergan.trainable_gan import TrainableGAN
from time import sleep
import gc
import hyperchamber as hc
import hypergan as hg
import numpy as np
import os
import shutil
import sys
import sys
import tempfile
import time
class CLI:
def __init__(self, args={}, input_config=None, gan_config=None):
self.steps = 0
self.should_sample=False
self.gan_config = gan_config
self.input_config = input_config
args = hc.Config(args)
self.args = args
self.devices = args.devices
crop = self.args.crop
self.config_name = self.args.config or 'default'
self.method = args.method or 'test'
self.total_steps = args.steps or -1
self.sample_every = self.args.sample_every or 100
self.sampler_name = args.sampler
self.sampler = None
self.sample_path = "samples/%s" % self.config_name
self.loss_every = self.args.loss_every or 1
if (self.args.save_losses):
import matplotlib.pyplot as plt
self.arr = []
self.fig,self.ax = plt.subplots()
self.temp = 0
self.advSavePath = os.path.abspath("saves/"+self.config_name)+"/"
if self.args.save_file:
self.save_file = self.args.save_file + "/"
else:
default_save_path = os.path.abspath("saves/"+self.config_name)
self.save_file = default_save_path + "/"
self.create_path(self.save_file)
title = "[hypergan] " + self.config_name
self.process_manager = ProcessManager()
if self.args.method == 'train' or self.args.method == 'sample':
if self.args.server:
self.process_manager.spawn_websocket_server()
#GlobalViewer.set_options(
# enable_menu = self.args.menu,
# title = title,
# viewer_size = self.args.viewer_size,
# enabled = self.args.viewer,
# zoom = self.args.zoom)
def lazy_create(self):
if(self.sampler == None):
self.sampler = self.gan.sampler_for(self.sampler_name)(self.gan, samples_per_row=self.args.width)
if(self.sampler == None):
raise ValidationException("No sampler found by the name '"+self.sampler_name+"'")
def step(self):
self.steps+=1
self.trainable_gan.step()
if(self.steps % self.sample_every == 0):
sample_list = self.trainable_gan.sample(self.sampler, self.sample_path)
def create_path(self, filename):
return os.makedirs(os.path.expanduser(os.path.dirname(filename)), exist_ok=True)
def create_input(self, blank=False, rank=None):
klass = GANComponent.lookup_function(None, self.input_config['class'])
self.input_config["blank"]=blank
self.input_config["rank"]=rank
return klass(self.input_config)
def build(self):
return self.gan.build()
def serve(self, gan):
return gan_server(self.gan.session, config)
def sample_forever(self):
self.gan.inputs.next()
steps = 0
while not self.gan.destroy and (steps <= self.args.steps or self.args.steps == -1):
self.trainable_gan.sample(self.sampler, self.sample_path)
steps += 1
def train(self):
i=0
if(self.args.ipython):
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.gan = hg.GAN(config=self.gan_config, inputs=self.create_input(), device=self.args.parameter_server_device)
self.gan.inputs.next()
self.lazy_create()
self.trainable_gan = hg.TrainableGAN(self.gan, save_file = self.save_file, devices = self.devices, backend_name = self.args.backend)
if self.trainable_gan.load():
print("Model loaded")
else:
print("Initializing new model")
self.trainable_gan.sample(self.sampler, self.sample_path)
while((self.steps < self.total_steps or self.total_steps == -1) and not self.gan.destroy):
self.step()
if self.should_sample:
self.should_sample = False
self.sample(False)
if (self.args.save_every != None and
self.args.save_every != -1 and
self.args.save_every > 0 and
self.steps % self.args.save_every == 0):
print(" |= Saving network")
self.trainable_gan.save()
self.create_path(self.advSavePath+'advSave.txt')
if os.path.isfile(self.advSavePath+'advSave.txt'):
with open(self.advSavePath+'advSave.txt', 'w') as the_file:
the_file.write(str(self.samples)+"\n")
if self.args.ipython:
self.check_stdin()
print("Done training model. Saving")
self.trainable_gan.save()
print("============================")
print("HyperGAN model trained")
print("============================")
def check_stdin(self):
try:
input = sys.stdin.read()
if input[0]=="y":
return
from IPython import embed
# Misc code
embed()
except:
return
def new(self):
if self.args.toml:
config_format = '.toml'
else:
config_format = '.json'
template = self.args.directory + config_format
print("[hypergan] Creating new configuration file '"+template+"' based off of '"+self.config_name+config_format)
if os.path.isfile(template):
raise ValidationException("File exists: " + template)
source_configuration = Configuration.find(self.config_name+config_format, config_format=config_format, prepackaged=True)
shutil.copyfile(source_configuration, template)
return
def run(self):
if self.method == 'train':
self.train()
elif self.method == 'build':
self.gan = hg.GAN(config=self.gan_config, inputs=self.create_input(blank=True))
if not self.gan.load(self.save_file):
raise ValidationException("Could not load model: "+ self.save_file)
self.build()
elif self.method == 'new':
self.new()
elif self.method == 'sample':
self.gan = hg.GAN(config=self.gan_config, inputs=self.create_input(blank=False))
if not self.gan.load(self.save_file):
print("Initializing new model")
self.sample_forever()
|
the-stack_106_16150
|
from flask import (Blueprint, current_app, render_template)
from ..errors import ErroInterno, UsoInvalido, TipoErro
from . import generic_handler
bp = Blueprint('docs', __name__, url_prefix='/apidocs')
bp.register_error_handler(ErroInterno, generic_handler)
bp.register_error_handler(UsoInvalido, generic_handler)
@bp.route('/', methods=('GET',))
def apidocs():
"""
View function para retornar a documentacao de API.
"""
try:
return render_template('/apidocs/index.html')
except UsoInvalido as e:
current_app.logger.error(e)
raise e
except ErroInterno as e:
current_app.logger.error(e)
raise e
except Exception as e:
current_app.logger.error(e)
raise ErroInterno(TipoErro.ERRO_INTERNO.name, payload="Erro ao recuperar campi.")
|
the-stack_106_16151
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'scripting alerte resoudre'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmResoudre(Parametre):
"""Commande 'scripting alerte resoudre'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "résoudre", "resolve")
self.schema = "<nombre>"
self.aide_courte = "marque une alerte comme résolue"
self.aide_longue = \
"Cette commande marque une alerte comme résolue et la " \
"supprime donc."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
nombre = dic_masques["nombre"].nombre
try:
alerte = type(self).importeur.scripting.alertes[nombre]
except KeyError:
personnage << "|err|Ce numéro d'alerte est invalide.|ff|"
else:
del type(self).importeur.scripting.alertes[alerte.no]
alerte.detruire()
personnage << "L'alerte {} a bien été supprimée.".format(alerte.no)
|
the-stack_106_16153
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
class ROIBoxHead(torch.nn.Module):
"""
Generic Box Head class.
"""
def __init__(self, cfg, in_channels):
super(ROIBoxHead, self).__init__()
self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.predictor = make_roi_box_predictor(
cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None, is_source = True):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training and is_source:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
box_logits = self.feature_extractor(features, proposals)
# final classifier that converts the features into predictions
class_logits, box_regression = self.predictor(box_logits)
if not self.training:
result, keep_list, keep_list2 = self.post_processor((class_logits, box_regression), proposals)
return box_logits, result, {}
if not is_source:
result, keep_list, keep_list2 = self.post_processor((class_logits, box_regression), proposals)
box_logits_tgt = box_logits[keep_list]
if keep_list2 is not None:
box_logits_tgt = box_logits_tgt[keep_list2]
return box_logits_tgt, result, {}
loss_classifier, loss_box_reg = self.loss_evaluator(
[class_logits], [box_regression]
)
return (
box_logits,
proposals,
dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg),
)
def build_roi_box_head(cfg, in_channels):
"""
Constructs a new box head.
By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIBoxHead(cfg, in_channels)
|
the-stack_106_16154
|
import sys
from cleo.helpers import argument
from cleo.helpers import option
from poetry.utils.helpers import module_name
from .command import Command
class NewCommand(Command):
name = "new"
description = "Creates a new Python project at <path>."
arguments = [argument("path", "The path to create the project at.")]
options = [
option("name", None, "Set the resulting package name.", flag=False),
option("src", None, "Use the src layout for the project."),
]
def handle(self) -> None:
from pathlib import Path
from poetry.core.semver.helpers import parse_constraint
from poetry.core.vcs.git import GitConfig
from poetry.layouts import layout
from poetry.utils.env import SystemEnv
if self.option("src"):
layout_ = layout("src")
else:
layout_ = layout("standard")
path = Path.cwd() / Path(self.argument("path"))
name = self.option("name")
if not name:
name = path.name
if path.exists():
if list(path.glob("*")):
# Directory is not empty. Aborting.
raise RuntimeError(
"Destination <fg=yellow>{}</> "
"exists and is not empty".format(path)
)
readme_format = "rst"
config = GitConfig()
author = None
if config.get("user.name"):
author = config["user.name"]
author_email = config.get("user.email")
if author_email:
author += " <{}>".format(author_email)
current_env = SystemEnv(Path(sys.executable))
default_python = "^{}".format(
".".join(str(v) for v in current_env.version_info[:2])
)
dev_dependencies = {}
python_constraint = parse_constraint(default_python)
if parse_constraint("<3.5").allows_any(python_constraint):
dev_dependencies["pytest"] = "^4.6"
if parse_constraint(">=3.5").allows_all(python_constraint):
dev_dependencies["pytest"] = "^5.2"
layout_ = layout_(
name,
"0.1.0",
author=author,
readme_format=readme_format,
python=default_python,
dev_dependencies=dev_dependencies,
)
layout_.create(path)
self.line(
"Created package <info>{}</> in <fg=blue>{}</>".format(
module_name(name), path.relative_to(Path.cwd())
)
)
|
the-stack_106_16156
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef CIC_CHAINPARAMSSEEDS_H\n')
g.write('#define CIC_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the cic network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 17817)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 17717)
g.write('#endif // CIC_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
the-stack_106_16158
|
from collections import OrderedDict, abc, deque
import datetime as dt
from datetime import datetime
from decimal import Decimal
from io import StringIO
from itertools import combinations
from warnings import catch_warnings
import dateutil
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
def setup_method(self, method):
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
self.data = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": pd.Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, pd.Index):
if label == "bool":
assert obj.dtype == "object"
else:
assert obj.dtype == label
elif isinstance(obj, pd.Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self):
# to confirm test case covers intended dtypes
for typ, vals in self.data.items():
self._check_expected_dtype(pd.Index(vals), typ)
self._check_expected_dtype(pd.Series(vals), typ)
def test_concatlike_same_dtypes(self):
# GH 13660
for typ1, vals1 in self.data.items():
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = pd.Categorical(list(vals1) + list(vals2))
exp_data3 = pd.Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = pd.Index(vals1).append(pd.Index(vals2))
exp = pd.Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = pd.Index(vals1).append([pd.Index(vals2), pd.Index(vals3)])
exp = pd.Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = pd.Index(vals1, name="x")
i2 = pd.Index(vals2, name="y")
res = i1.append(i2)
exp = pd.Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = pd.Index(vals1, name="x")
i2 = pd.Index(vals2, name="x")
res = i1.append(i2)
exp = pd.Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
pd.Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
pd.Index(vals1).append([pd.Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = pd.Series(vals1).append(pd.Series(vals2), ignore_index=True)
exp = pd.Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([pd.Series(vals1), pd.Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = pd.Series(vals1).append(
[pd.Series(vals2), pd.Series(vals3)], ignore_index=True
)
exp = pd.Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[pd.Series(vals1), pd.Series(vals2), pd.Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = pd.Series(vals1, name="x")
s2 = pd.Series(vals2, name="y")
res = s1.append(s2, ignore_index=True)
exp = pd.Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = pd.Series(vals1, name="x")
s2 = pd.Series(vals2, name="x")
res = s1.append(s2, ignore_index=True)
exp = pd.Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
pd.Series(vals1).append(vals2)
with pytest.raises(TypeError, match=msg):
pd.Series(vals1).append([pd.Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([pd.Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([pd.Series(vals1), pd.Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self):
# GH 13660
for typ1, vals1 in self.data.items():
for typ2, vals2 in self.data.items():
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
continue
elif typ1 == "category" or typ2 == "category":
# TODO: suspicious
continue
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = pd.Index(vals1).append(pd.Index(vals2))
exp = pd.Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = pd.Index(vals1).append([pd.Index(vals2), pd.Index(vals3)])
exp = pd.Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series.append
res = pd.Series(vals1).append(pd.Series(vals2), ignore_index=True)
exp = pd.Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([pd.Series(vals1), pd.Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = pd.Series(vals1).append(
[pd.Series(vals2), pd.Series(vals3)], ignore_index=True
)
exp = pd.Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
res = pd.concat(
[pd.Series(vals1), pd.Series(vals2), pd.Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = pd.Series(dti)
tds = pd.Series(tdi)
res = dts.append(tds)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = pd.Series(dti1)
dts2 = pd.Series(dti2)
res = dts1.append(dts2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = pd.DataFrame(0, index=ix1, columns=["A", "B"])
df2 = pd.DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = pd.DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1.append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = pd.Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = pd.Series(dti1)
dts2 = pd.Series(dti2)
res = dts1.append(dts2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = pd.Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
# tm.assert_index_equal(res, exp)
dts1 = pd.Series(dti1)
dts3 = pd.Series(dti3)
res = dts1.append(dts3)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
ps2 = pd.Series(pi2)
res = ps1.append(ps2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = pd.Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
ps2 = pd.Series(pi2)
res = ps1.append(ps2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = pd.Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
tds = pd.Series(tdi)
res = ps1.append(tds)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = pd.Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
tds = pd.Series(tdi)
res = tds.append(ps1)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = pd.Series([1, 2, np.nan], dtype="category")
s2 = pd.Series([2, 1, 2], dtype="category")
exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = pd.Series([3, 2], dtype="category")
s2 = pd.Series([2, 1], dtype="category")
exp = pd.Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = pd.Series([10, 11, np.nan], dtype="category")
s2 = pd.Series([np.nan, 1, 3, 2], dtype="category")
exp = pd.Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = pd.Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = pd.Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = pd.Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = pd.Series([1, 2, np.nan], dtype="category")
s2 = pd.Series([2, 1, 2])
exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = pd.Series([2, 1, 2, 1, 2, np.nan], dtype="object")
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = pd.Series([3, 2], dtype="category")
s2 = pd.Series([2, 1])
exp = pd.Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# completely different categories => not-category
s1 = pd.Series([10, 11, np.nan], dtype="category")
s2 = pd.Series([1, 3, 2])
exp = pd.Series([10, 11, np.nan, 1, 3, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series([1, 3, 2, 10, 11, np.nan], dtype="object")
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = pd.Series([10, 11, np.nan], dtype="category")
s2 = pd.Series(["a", "b", "c"])
exp = pd.Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = pd.Series([10, 11], dtype="category")
s2 = pd.Series([np.nan, np.nan, np.nan])
exp = pd.Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
s1 = pd.Series([1, 2, np.nan], dtype="category")
s2 = pd.Series([2, 1, 2], dtype="category")
s3 = pd.Series([1, 2, 1, 2, np.nan])
exp = pd.Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = pd.Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = pd.Series([4, 5, 6], dtype="category")
s2 = pd.Series([1, 2, 3], dtype="category")
s3 = pd.Series([1, 3, 4])
exp = pd.Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = pd.Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = pd.Series([4, 5, 6], dtype="category")
s2 = pd.Series([1, 2, 3], dtype="category")
s3 = pd.Series([10, 11, 12])
exp = pd.Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = pd.Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
s1 = pd.Series([1, 3], dtype="category")
s2 = pd.Series([3, 4], dtype="category")
s3 = pd.Series([2, 3])
s4 = pd.Series([2, 2], dtype="category")
s5 = pd.Series([1, np.nan])
s6 = pd.Series([1, 3, 2], dtype="category")
# mixed dtype, values are all in categories => not-category
exp = pd.Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s1.append([s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = pd.Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s6.append([s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
def test_concat_categorical_ordered(self):
# GH 13524
s1 = pd.Series(pd.Categorical([1, 2, np.nan], ordered=True))
s2 = pd.Series(pd.Categorical([2, 1, 2], ordered=True))
exp = pd.Series(pd.Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series(
pd.Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)
)
tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s1], ignore_index=True), exp)
def test_concat_categorical_coercion_nan(self):
# GH 13524
# some edge cases
# category + not-category => not category
s1 = pd.Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
s2 = pd.Series([np.nan, 1])
exp = pd.Series([np.nan, np.nan, np.nan, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
s1 = pd.Series([1, np.nan], dtype="category")
s2 = pd.Series([np.nan, np.nan])
exp = pd.Series([1, np.nan, np.nan, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# mixed dtype, all nan-likes => not-category
s1 = pd.Series([np.nan, np.nan], dtype="category")
s2 = pd.Series([np.nan, np.nan])
exp = pd.Series([np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all category nan-likes => category
s1 = pd.Series([np.nan, np.nan], dtype="category")
s2 = pd.Series([np.nan, np.nan], dtype="category")
exp = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_concat_categorical_empty(self):
# GH 13524
s1 = pd.Series([], dtype="category")
s2 = pd.Series([1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
s1 = pd.Series([], dtype="category")
s2 = pd.Series([], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
s1 = pd.Series([], dtype="category")
s2 = pd.Series([], dtype="object")
# different dtype => not-category
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
s1 = pd.Series([], dtype="category")
s2 = pd.Series([np.nan, np.nan])
# empty Series is ignored
exp = pd.Series([np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
class TestAppend:
def test_append(self, sort, float_frame):
mixed_frame = float_frame.copy()
mixed_frame["foo"] = "bar"
begin_index = float_frame.index[:5]
end_index = float_frame.index[5:]
begin_frame = float_frame.reindex(begin_index)
end_frame = float_frame.reindex(end_index)
appended = begin_frame.append(end_frame)
tm.assert_almost_equal(appended["A"], float_frame["A"])
del end_frame["A"]
partial_appended = begin_frame.append(end_frame, sort=sort)
assert "A" in partial_appended
partial_appended = end_frame.append(begin_frame, sort=sort)
assert "A" in partial_appended
# mixed type handling
appended = mixed_frame[:5].append(mixed_frame[5:])
tm.assert_frame_equal(appended, mixed_frame)
# what to test here
mixed_appended = mixed_frame[:5].append(float_frame[5:], sort=sort)
mixed_appended2 = float_frame[:5].append(mixed_frame[5:], sort=sort)
# all equal except 'foo' column
tm.assert_frame_equal(
mixed_appended.reindex(columns=["A", "B", "C", "D"]),
mixed_appended2.reindex(columns=["A", "B", "C", "D"]),
)
def test_append_empty(self, float_frame):
empty = DataFrame()
appended = float_frame.append(empty)
tm.assert_frame_equal(float_frame, appended)
assert appended is not float_frame
appended = empty.append(float_frame)
tm.assert_frame_equal(float_frame, appended)
assert appended is not float_frame
def test_append_overlap_raises(self, float_frame):
msg = "Indexes have overlapping values"
with pytest.raises(ValueError, match=msg):
float_frame.append(float_frame, verify_integrity=True)
def test_append_new_columns(self):
# see gh-6129: new columns
df = DataFrame({"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}})
row = Series([5, 6, 7], index=["a", "b", "c"], name="z")
expected = DataFrame(
{
"a": {"x": 1, "y": 2, "z": 5},
"b": {"x": 3, "y": 4, "z": 6},
"c": {"z": 7},
}
)
result = df.append(row)
tm.assert_frame_equal(result, expected)
def test_append_length0_frame(self, sort):
df = DataFrame(columns=["A", "B", "C"])
df3 = DataFrame(index=[0, 1], columns=["A", "B"])
df5 = df.append(df3, sort=sort)
expected = DataFrame(index=[0, 1], columns=["A", "B", "C"])
tm.assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=("i4,f4,a10"))
arr1[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
arr2 = np.zeros((3,), dtype=("i4,f4,a10"))
arr2[:] = [(3, 4.0, "foo"), (5, 6.0, "bar"), (7.0, 8.0, "baz")]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
tm.assert_frame_equal(result, expected)
# rewrite sort fixture, since we also want to test default of None
def test_append_sorts(self, sort):
df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = pd.DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3])
with tm.assert_produces_warning(None):
result = df1.append(df2, sort=sort)
# for None / True
expected = pd.DataFrame(
{"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
tm.assert_frame_equal(result, expected)
def test_append_different_columns(self, sort):
df = DataFrame(
{
"bools": np.random.randn(10) > 0,
"ints": np.random.randint(0, 10, 10),
"floats": np.random.randn(10),
"strings": ["foo", "bar"] * 5,
}
)
a = df[:5].loc[:, ["bools", "ints", "floats"]]
b = df[5:].loc[:, ["strings", "ints", "floats"]]
appended = a.append(b, sort=sort)
assert isna(appended["strings"][0:4]).all()
assert isna(appended["bools"][5:]).all()
def test_append_many(self, sort, float_frame):
chunks = [
float_frame[:5],
float_frame[5:10],
float_frame[10:15],
float_frame[15:],
]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, float_frame)
chunks[-1] = chunks[-1].copy()
chunks[-1]["foo"] = "bar"
result = chunks[0].append(chunks[1:], sort=sort)
tm.assert_frame_equal(result.loc[:, float_frame.columns], float_frame)
assert (result["foo"][15:] == "bar").all()
assert result["foo"][:15].isna().all()
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(columns=["A", "B", "C"])
df1 = df1.set_index(["A"])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["A", "B", "C"])
df2 = df2.set_index(["A"])
result = df1.append(df2)
assert result.index.name == "A"
indexes_can_append = [
pd.RangeIndex(3),
pd.Index([4, 5, 6]),
pd.Index([4.5, 5.5, 6.5]),
pd.Index(list("abc")),
pd.CategoricalIndex("A B C".split()),
pd.CategoricalIndex("D E F".split(), ordered=True),
pd.IntervalIndex.from_breaks([7, 8, 9, 10]),
pd.DatetimeIndex(
[
dt.datetime(2013, 1, 3, 0, 0),
dt.datetime(2013, 1, 3, 6, 10),
dt.datetime(2013, 1, 3, 7, 12),
]
),
]
indexes_cannot_append_with_other = [
pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()])
]
all_indexes = indexes_can_append + indexes_cannot_append_with_other
@pytest.mark.parametrize("index", all_indexes, ids=lambda x: type(x).__name__)
def test_append_same_columns_type(self, index):
# GH18359
# df wider than ser
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index)
ser_index = index[:2]
ser = pd.Series([7, 8], index=ser_index, name=2)
result = df.append(ser)
expected = pd.DataFrame(
[[1.0, 2.0, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index
)
tm.assert_frame_equal(result, expected)
# ser wider than df
ser_index = index
index = index[:2]
df = pd.DataFrame([[1, 2], [4, 5]], columns=index)
ser = pd.Series([7, 8, 9], index=ser_index, name=2)
result = df.append(ser)
expected = pd.DataFrame(
[[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]],
index=[0, 1, 2],
columns=ser_index,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df_columns, series_index",
combinations(indexes_can_append, r=2),
ids=lambda x: type(x).__name__,
)
def test_append_different_columns_types(self, df_columns, series_index):
# GH18359
# See also test 'test_append_different_columns_types_raises' below
# for errors raised when appending
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns)
ser = pd.Series([7, 8, 9], index=series_index, name=2)
result = df.append(ser)
idx_diff = ser.index.difference(df_columns)
combined_columns = Index(df_columns.tolist()).append(idx_diff)
expected = pd.DataFrame(
[
[1.0, 2.0, 3.0, np.nan, np.nan, np.nan],
[4, 5, 6, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, 7, 8, 9],
],
index=[0, 1, 2],
columns=combined_columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index_can_append", indexes_can_append, ids=lambda x: type(x).__name__
)
@pytest.mark.parametrize(
"index_cannot_append_with_other",
indexes_cannot_append_with_other,
ids=lambda x: type(x).__name__,
)
def test_append_different_columns_types_raises(
self, index_can_append, index_cannot_append_with_other
):
# GH18359
# Dataframe.append will raise if MultiIndex appends
# or is appended to a different index type
#
# See also test 'test_append_different_columns_types' above for
# appending without raising.
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append)
ser = pd.Series([7, 8, 9], index=index_cannot_append_with_other, name=2)
msg = (
r"Expected tuple, got (int|long|float|str|"
r"pandas._libs.interval.Interval)|"
r"object of type '(int|float|Timestamp|"
r"pandas._libs.interval.Interval)' has no len\(\)|"
)
with pytest.raises(TypeError, match=msg):
df.append(ser)
df = pd.DataFrame(
[[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other
)
ser = pd.Series([7, 8, 9], index=index_can_append, name=2)
with pytest.raises(TypeError, match=msg):
df.append(ser)
def test_append_dtype_coerce(self, sort):
# GH 4993
# appending with datetime will incorrectly convert datetime64
df1 = DataFrame(
index=[1, 2],
data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)],
columns=["start_time"],
)
df2 = DataFrame(
index=[4, 5],
data=[
[dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)],
[dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)],
],
columns=["start_time", "end_time"],
)
expected = concat(
[
Series(
[
pd.NaT,
pd.NaT,
dt.datetime(2013, 1, 3, 6, 10),
dt.datetime(2013, 1, 4, 7, 10),
],
name="end_time",
),
Series(
[
dt.datetime(2013, 1, 1, 0, 0),
dt.datetime(2013, 1, 2, 0, 0),
dt.datetime(2013, 1, 3, 0, 0),
dt.datetime(2013, 1, 4, 0, 0),
],
name="start_time",
),
],
axis=1,
sort=sort,
)
result = df1.append(df2, ignore_index=True, sort=sort)
if sort:
expected = expected[["end_time", "start_time"]]
else:
expected = expected[["start_time", "end_time"]]
tm.assert_frame_equal(result, expected)
def test_append_missing_column_proper_upcast(self, sort):
df1 = DataFrame({"A": np.array([1, 2, 3, 4], dtype="i8")})
df2 = DataFrame({"B": np.array([True, False, True, False], dtype=bool)})
appended = df1.append(df2, ignore_index=True, sort=sort)
assert appended["A"].dtype == "f8"
assert appended["B"].dtype == "O"
def test_append_empty_frame_to_series_with_dateutil_tz(self):
# GH 23682
date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc())
s = Series({"date": date, "a": 1.0, "b": 2.0})
df = DataFrame(columns=["c", "d"])
result = df.append(s, ignore_index=True)
# n.b. it's not clear to me that expected is correct here.
# It's possible that the `date` column should have
# datetime64[ns, tz] dtype for both result and expected.
# that would be more consistent with new columns having
# their own dtype (float for a and b, datetime64ns, tz for date).
expected = DataFrame(
[[np.nan, np.nan, 1.0, 2.0, date]],
columns=["c", "d", "a", "b", "date"],
dtype=object,
)
# These columns get cast to object after append
expected["a"] = expected["a"].astype(float)
expected["b"] = expected["b"].astype(float)
tm.assert_frame_equal(result, expected)
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame(
{
"dt": [
datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3),
],
"b": ["A", "B", "C"],
"c": [1, 2, 3],
"d": [4, 5, 6],
}
)
df["dt"] = df["dt"].apply(lambda d: Timestamp(d, tz="US/Pacific"))
df = df.set_index(["dt", "b"])
exp_idx1 = DatetimeIndex(
["2014-01-01", "2014-01-02", "2014-01-03"] * 2, tz="US/Pacific", name="dt"
)
exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame(
{"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"]
)
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = pd.DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = pd.DataFrame(
{"col": list(range(5)) * 2}, index=index, dtype=np.int32
)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = pd.DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=pd.Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_datetime64_block(self):
from pandas.core.indexes.datetimes import date_range
rng = date_range("1/1/2000", periods=10)
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = pd.DataFrame()
df_a = pd.DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = pd.DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
msg = (
"cannot concatenate object of type '{}'; "
"only Series and DataFrame objs are valid"
)
for obj in [1, dict(), [1, 2], (1, 2)]:
with pytest.raises(TypeError, match=msg.format(type(obj))):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_NaT_series(self):
# GH 11693
# test for merging NaT series with datetime series.
x = Series(
date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern")
)
y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT with tz
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]")
result = pd.concat([y, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# without tz
x = pd.Series(pd.date_range("20151124 08:00", "20151124 09:00", freq="1h"))
y = pd.Series(pd.date_range("20151124 10:00", "20151124 11:00", freq="1h"))
y[:] = pd.NaT
expected = pd.Series([x[0], x[1], pd.NaT, pd.NaT])
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT without tz
x[:] = pd.NaT
expected = pd.Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_tz_frame(self):
df2 = DataFrame(
dict(
A=pd.Timestamp("20130102", tz="US/Eastern"),
B=pd.Timestamp("20130603", tz="CET"),
),
index=range(5),
)
# concat
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
tm.assert_frame_equal(df2, df3)
def test_concat_tz_series(self):
# gh-11755: tz and no tz
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
y = Series(date_range("2012-01-01", "2012-01-02"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# gh-11887: concat tz and object
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
y = Series(["a", "b"])
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# see gh-12217 and gh-12306
# Concatenating two UTC times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize("UTC")
second = pd.DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize("UTC")
result = pd.concat([first, second])
assert result[0].dtype == "datetime64[ns, UTC]"
# Concatenating two London times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize("Europe/London")
second = pd.DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize("Europe/London")
result = pd.concat([first, second])
assert result[0].dtype == "datetime64[ns, Europe/London]"
# Concatenating 2+1 London times
first = pd.DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]])
first[0] = first[0].dt.tz_localize("Europe/London")
second = pd.DataFrame([[datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize("Europe/London")
result = pd.concat([first, second])
assert result[0].dtype == "datetime64[ns, Europe/London]"
# Concat'ing 1+2 London times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize("Europe/London")
second = pd.DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize("Europe/London")
result = pd.concat([first, second])
assert result[0].dtype == "datetime64[ns, Europe/London]"
def test_concat_tz_series_with_datetimelike(self):
# see gh-12620: tz and timedelta
x = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-02-01", tz="US/Eastern"),
]
y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
tm.assert_series_equal(result, pd.Series(x + y, dtype="object"))
# tz and period
y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
tm.assert_series_equal(result, pd.Series(x + y, dtype="object"))
def test_concat_tz_series_tzlocal(self):
# see gh-13583
x = [
pd.Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()),
pd.Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()),
]
y = [
pd.Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()),
pd.Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
]
result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
tm.assert_series_equal(result, pd.Series(x + y))
assert result.dtype == "datetime64[ns, tzlocal()]"
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
@pytest.mark.parametrize("s", [pd.NaT, pd.Timestamp("20150101")])
def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s):
# GH 12396
# tz-naive
first = pd.DataFrame([[pd.NaT], [pd.NaT]]).apply(
lambda x: x.dt.tz_localize(tz1)
)
second = pd.DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2))
result = pd.concat([first, second], axis=0)
expected = pd.DataFrame(pd.Series([pd.NaT, pd.NaT, s], index=[0, 1, 0]))
expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
if tz1 != tz2:
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
# GH 12396
first = pd.DataFrame(pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
second = pd.DataFrame(pd.Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
expected = pd.DataFrame(
{
0: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
1: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
}
)
result = pd.concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
# GH 12396
# tz-naive
first = pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
second = pd.DataFrame(
[
[pd.Timestamp("2015/01/01", tz=tz2)],
[pd.Timestamp("2016/01/01", tz=tz2)],
],
index=[2, 3],
)
expected = pd.DataFrame(
[
pd.NaT,
pd.NaT,
pd.Timestamp("2015/01/01", tz=tz2),
pd.Timestamp("2016/01/01", tz=tz2),
]
)
if tz1 != tz2:
expected = expected.astype(object)
result = pd.concat([first, second])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_concat_NaT_dataframes(self, tz):
# GH 12396
first = pd.DataFrame([[pd.NaT], [pd.NaT]])
first = first.apply(lambda x: x.dt.tz_localize(tz))
second = pd.DataFrame(
[[pd.Timestamp("2015/01/01", tz=tz)], [pd.Timestamp("2016/01/01", tz=tz)]],
index=[2, 3],
)
expected = pd.DataFrame(
[
pd.NaT,
pd.NaT,
pd.Timestamp("2015/01/01", tz=tz),
pd.Timestamp("2016/01/01", tz=tz),
]
)
result = pd.concat([first, second], axis=0)
tm.assert_frame_equal(result, expected)
def test_concat_period_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_period_multiple_freq_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
# non-period
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.DatetimeIndex(["2015-11-01", "2015-12-01"]))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(["A", "B"])
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_empty_series(self):
# GH 11082
s1 = pd.Series([1, 2, 3], name="x")
s2 = pd.Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = pd.DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=pd.Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = pd.Series([1, 2, 3], name="x")
s2 = pd.Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = pd.Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = pd.Series([1, 2, 3], name="x")
s2 = pd.Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = pd.DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=pd.Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: pd.Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = pd.Series([1, 2, 3], name="x")
s2 = pd.Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = pd.DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = pd.DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = pd.DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = pd.DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = pd.DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = pd.DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = pd.DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = pd.Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = pd.Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = pd.Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = pd.DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [pd.DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [
pd.DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)
]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_datetime_timezone(self):
# GH 18523
idx1 = pd.date_range("2011-01-01", periods=3, freq="H", tz="Europe/Paris")
idx2 = pd.date_range(start=idx1[0], end=idx1[-1], freq="H")
df1 = pd.DataFrame({"a": [1, 2, 3]}, index=idx1)
df2 = pd.DataFrame({"b": [1, 2, 3]}, index=idx2)
result = pd.concat([df1, df2], axis=1)
exp_idx = (
DatetimeIndex(
[
"2011-01-01 00:00:00+01:00",
"2011-01-01 01:00:00+01:00",
"2011-01-01 02:00:00+01:00",
],
freq="H",
)
.tz_convert("UTC")
.tz_convert("Europe/Paris")
)
expected = pd.DataFrame(
[[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
)
tm.assert_frame_equal(result, expected)
idx3 = pd.date_range("2011-01-01", periods=3, freq="H", tz="Asia/Tokyo")
df3 = pd.DataFrame({"b": [1, 2, 3]}, index=idx3)
result = pd.concat([df1, df3], axis=1)
exp_idx = DatetimeIndex(
[
"2010-12-31 15:00:00+00:00",
"2010-12-31 16:00:00+00:00",
"2010-12-31 17:00:00+00:00",
"2010-12-31 23:00:00+00:00",
"2011-01-01 00:00:00+00:00",
"2011-01-01 01:00:00+00:00",
]
)
expected = pd.DataFrame(
[
[np.nan, 1],
[np.nan, 2],
[np.nan, 3],
[1, np.nan],
[2, np.nan],
[3, np.nan],
],
index=exp_idx,
columns=["a", "b"],
)
tm.assert_frame_equal(result, expected)
# GH 13783: Concat after resample
result = pd.concat(
[df1.resample("H").mean(), df2.resample("H").mean()], sort=True
)
expected = pd.DataFrame(
{"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]},
index=idx1.append(idx1),
)
tm.assert_frame_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = pd.Series(pd.core.arrays.integer_array([1, 2]))
b = pd.Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = pd.Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[pd.Series(range(3)), pd.Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
OrderedDict(
[("First", pd.Series(range(3))), ("Another", pd.Series(range(4)))]
)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("pdt", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = pd.DataFrame({"foo": [1]})
df2 = pd.DataFrame({"foo": []})
expected = pd.DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = pd.Series([1])
s2 = pd.Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = pd.DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = pd.DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = pd.DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = pd.DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = pd.DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = pd.DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = pd.DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = pd.DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = pd.DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = pd.DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = pd.Series({"a": 1, "b": 2}, name=s1name)
s2 = pd.Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = pd.Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = pd.Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = pd.Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = pd.Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = pd.DataFrame(pd.Series(["a", "b", "c"], dtype="category", name="A"))
ser = pd.Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = pd.DataFrame(
{
"A": pd.Series(["a", "b", "c", np.nan], dtype="category"),
"B": pd.Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
def test_concat_datetimeindex_freq():
# GH 3232
# Monotonic index result
dr = pd.date_range("01-Jan-2013", periods=100, freq="50L", tz="UTC")
data = list(range(100))
expected = pd.DataFrame(data, index=dr)
result = pd.concat([expected[:50], expected[50:]])
tm.assert_frame_equal(result, expected)
# Non-monotonic index result
result = pd.concat([expected[50:], expected[:50]])
expected = pd.DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))
expected.index._data.freq = None
tm.assert_frame_equal(result, expected)
def test_concat_empty_df_object_dtype():
# GH 9149
df_1 = pd.DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = pd.DataFrame(columns=df_1.columns)
result = pd.concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = pd.Series(SparseArray([0, 1, 2]))
expected = pd.DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
def test_concat_copy_index(test_series, axis):
# GH 29879
if test_series:
ser = Series([1, 2])
comb = concat([ser, ser], axis=axis, copy=True)
assert comb.index is not ser.index
else:
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
comb = concat([df, df], axis=axis, copy=True)
assert comb.index is not df.index
assert comb.columns is not df.columns
def test_concat_multiindex_datetime_object_index():
# https://github.com/pandas-dev/pandas/issues/11058
s = Series(
["a", "b"],
index=MultiIndex.from_arrays(
[[1, 2], Index([dt.date(2013, 1, 1), dt.date(2014, 1, 1)], dtype="object")],
names=["first", "second"],
),
)
s2 = Series(
["a", "b"],
index=MultiIndex.from_arrays(
[[1, 2], Index([dt.date(2013, 1, 1), dt.date(2015, 1, 1)], dtype="object")],
names=["first", "second"],
),
)
expected = DataFrame(
[["a", "a"], ["b", np.nan], [np.nan, "b"]],
index=MultiIndex.from_arrays(
[
[1, 2, 2],
DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01"],
dtype="datetime64[ns]",
freq=None,
),
],
names=["first", "second"],
),
)
result = concat([s, s2], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
def test_duplicate_keys(keys):
# GH 33654
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
s1 = Series([7, 8, 9], name="c")
s2 = Series([10, 11, 12], name="d")
result = concat([df, s1, s2], axis=1, keys=keys)
expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
expected_columns = pd.MultiIndex.from_tuples(
[(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
)
expected = DataFrame(expected_values, columns=expected_columns)
tm.assert_frame_equal(result, expected)
|
the-stack_106_16163
|
#!/usr/bin/env python
from nipype.pipeline import engine as pe
from nipype.interfaces import fsl, ants, utility as niu
from ...interfaces.fmap import Phases2Fieldmap
def init_phase_wf(bet_mag):
wf = pe.Workflow(name='phase_prep_wf')
inputnode = pe.Node(
niu.IdentityInterface(
fields=['magnitude1', 'phasediff', 'b0_stripped', 'phases_meta']),
name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(fields=['out_fmap', 'out_mag']),
name='outputnode')
phases2fmap = pe.Node(Phases2Fieldmap(), name='phases2fmap')
phdiff_wf = init_phdiff_wf(bet_mag)
wf.connect([
(inputnode, phases2fmap, [('phases_meta', 'metadatas')]),
(inputnode, phases2fmap, [('phasediff', 'phase_files')]),
(inputnode, phdiff_wf, [('magnitude1', 'inputnode.magnitude1'),
('phases_meta', 'inputnode.phases_meta')]),
(phases2fmap, phdiff_wf, [('out_file', 'inputnode.phasediff')]),
(phdiff_wf, outputnode, [('outputnode.out_fmap', 'out_fmap'),
('outputnode.out_mag', 'out_mag')])
])
return wf
def init_phdiff_wf(bet_mag):
wf = pe.Workflow(name='phdiff_prep_wf')
inputnode = pe.Node(
niu.IdentityInterface(
fields=['magnitude1', 'phasediff', 'phases_meta']),
name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(fields=['out_fmap', 'out_mag']),
name='outputnode')
n4_correct = pe.Node(
ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
name='n4_correct')
mag_bet = pe.Node(
fsl.BET(frac=bet_mag, robust=True, mask=True), name='mag_bet')
prep_fmap = pe.Node(
fsl.PrepareFieldmap(scanner='SIEMENS'), name='prep_fmap')
fslroi = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='fslroi_phase')
delta = pe.Node(
niu.Function(
input_names=['in_values'],
output_names=['out_value'],
function=delta_te),
name='delta')
wf.connect([
(inputnode, n4_correct, [('magnitude1', 'input_image')]),
(n4_correct, mag_bet, [('output_image', 'in_file')]),
(inputnode, delta, [('phases_meta', 'in_values')]),
(mag_bet, prep_fmap, [('out_file', 'in_magnitude')]),
(inputnode, prep_fmap, [('phasediff', 'in_phase')]),
(delta, prep_fmap, [('out_value', 'delta_TE')]),
(prep_fmap, fslroi, [('out_fieldmap', 'in_file')]),
(fslroi, outputnode, [('roi_file', 'out_fmap')]),
(mag_bet, outputnode, [('out_file', 'out_mag')])
])
return wf
def delta_te(in_values, te1=None, te2=None):
"""
Read :math:`\Delta_\text{TE}` from BIDS metadata dict
"""
if isinstance(in_values, float):
te2 = in_values
te1 = 0.0
if isinstance(in_values, dict):
te1 = in_values.get('EchoTime1')
te2 = in_values.get('EchoTime2')
if not all((te1, te2)):
te2 = in_values.get('EchoTimeDifference')
te1 = 0
if isinstance(in_values, list):
te2, te1 = in_values
if isinstance(te1, list):
te1 = te1[1]
if isinstance(te2, list):
te2 = te2[1]
# For convienience if both are missing we should give one error about them
if te1 is None and te2 is None:
raise RuntimeError(
'EchoTime1 and EchoTime2 metadata fields not found. '
'Please consult the BIDS specification.'
)
if te1 is None:
raise RuntimeError(
'EchoTime1 metadata field not found. Please consult the BIDS specification.'
)
if te2 is None:
raise RuntimeError(
'EchoTime2 metadata field not found. Please consult the BIDS specification.'
)
return 1000 * abs(float(te2) - float(te1))
|
the-stack_106_16165
|
from push_relabel import *
def isbipartite(g: Graph) -> bool:
"""A bipartite graph (or bigraph) is a graph whose vertices can be divided into two disjoint
and independent sets U and V such that every edge connects a vertex in U to one in V.
Vertex sets U and V are usually called the parts of the graph.
Equivalently, a bipartite graph is a graph that does not contain any odd-length cycles.
Let G be a graph. Then G is 2-colorable if and only if G is bipartite.
source: https://cp-algorithms.com/graph/bipartite-check.html
"""
color = defaultdict(lambda: -1)
Q = deque()
is_bipartite = True
for source in nodes(g):
if color[source] == -1:
Q.appendleft(source)
color[source] = 0
while Q:
v = Q.pop()
for u in adjacency(g, v):
if color[u] == -1:
color[u] = color[v] ^ 1
Q.appendleft(u)
else:
is_bipartite &= (color[u] != color[v])
return is_bipartite
def match(U, V, E, maxcap):
"""Takes at input the left and right edges
Given a bipartite graph G = (A ∪ B, E), find an S ⊆ A × B that is
a matching and is as large as possible."""
g = defaultdict(dict)
if len(E[0]) == 2:
E = tuple(map(lambda arc: arc + (1,), E))
insert_edges_from_iterable(g, E)
if not isbipartite(g):
raise ValueError("The graph must be bipartite for maximum bipartite matching.")
# add supersink and supersource
supersource, supersink = '#', '@'
for source in U:
insert_edge(g, (supersource, source, maxcap)) # flow is 0
for sink in V:
insert_edge(g, (sink, supersink, maxcap))
maxflow = fifo_push_relabel(g, supersource, supersink)
S = [(u, v, g[u][v].flow) for (u, v, _) in E]
return S, maxflow
def test_is_bipartite():
g = defaultdict(dict)
insert_edges_from_iterable(g, [(1, 3, 0), (1, 2, 0), (2, 4, 0)])
print(isbipartite(g))
def test_bipartite_matching():
people = ['p1', 'p2', 'p3', 'p4', 'p5']
books = ['b1', 'b2', 'b3', 'b4', 'b5']
edges = [('p1', 'b2'), ('p1', 'b3'), ('p2', 'b2'), ('p2', 'b3'), ('p2', 'b4'), ('p3', 'b1'), ('p3', 'b2'),
('p3', 'b3'), ('p3', 'b5'), ('p4', 'b3'), ('p5', 'b3'), ('p5', 'b4'), ('p5', 'b5')]
print("using fifo push-relabel... ")
g = defaultdict(dict)
insert_edges_from_iterable(g, map(lambda arc: arc + (1, ), edges))
S, maxflow = match(people, books, edges, 1)
pprint(S)
pprint(maxflow)
if __name__ == '__main__':
test_bipartite_matching()
|
the-stack_106_16166
|
import csv
import os
import os.path as op
import threading
import time
import timeit
from collections import OrderedDict
from .Profiler import Profiler
from functools import reduce
class ConfigError(Exception):
pass
class AndroidPlugin(Profiler):
def __init__(self, config, paths):
super(AndroidPlugin, self).__init__(config, paths)
self.output_dir = ''
self.paths = paths
self.profile = False
available_data_points = ['cpu', 'mem']
self.interval = float(self.is_integer(config.get('sample_interval', 0))) / 1000
self.data_points = config['data_points']
invalid_data_points = [dp for dp in config['data_points'] if dp not in set(available_data_points)]
if invalid_data_points:
self.logger.warning('Invalid data points in config: {}'.format(invalid_data_points))
self.data_points = [dp for dp in config['data_points'] if dp in set(available_data_points)]
self.data = [['datetime'] + self.data_points]
@staticmethod
def get_cpu_usage(device):
"""Get CPU usage in percentage"""
# return device.shell('dumpsys cpuinfo | grep TOTAL | cut -d" " -f1').strip()[:-1]
shell_result = device.shell('dumpsys cpuinfo | grep TOTAL')
shell_splitted = shell_result.split('%')[0]
if '.-' in shell_splitted:
shell_splitted = shell_splitted.replace('.-', '.')
return shell_splitted
# return device.shell('dumpsys cpuinfo | grep TOTAL').split('%')[0]
@staticmethod
def get_mem_usage(device, app):
"""Get memory usage in KB for app, if app is None system usage is used"""
if not app:
# return device.shell('dumpsys meminfo | grep Used | cut -d" " -f5').strip()[1:-1]
# return device.shell('dumpsys meminfo | grep Used').split()[2].strip()[1:-1].replace(",", ".")
return device.shell('dumpsys meminfo | grep Used').translate(str.maketrans('','', '(kB,K')).split()[2]
else:
result = device.shell('dumpsys meminfo {} | grep TOTAL'.format(app))
if result == '':
result = device.shell('dumpsys meminfo {}'.format(app))
if 'No process found' in result:
raise Exception('Android Profiler: {}'.format(result))
return ' '.join(result.strip().split()).split()[1]
def start_profiling(self, device, **kwargs):
self.profile = True
app = kwargs.get('app', None)
self.get_data(device, app)
def get_data(self, device, app):
"""Runs the profiling methods every self.interval seconds in a separate thread"""
start = timeit.default_timer()
device_time = device.shell('date -u')
row = [device_time]
if 'cpu' in self.data_points:
row.append(self.get_cpu_usage(device))
if 'mem' in self.data_points:
row.append(self.get_mem_usage(device, app))
self.data.append(row)
end = timeit.default_timer()
# timer results could be negative
interval = max(float(0), self.interval - max(0, int(end - start)))
if self.profile:
threading.Timer(interval, self.get_data, args=(device, app)).start()
def stop_profiling(self, device, **kwargs):
self.profile = False
def collect_results(self, device):
filename = '{}_{}.csv'.format(device.id, time.strftime('%Y.%m.%d_%H%M%S'))
with open(op.join(self.output_dir, filename), 'w+') as f:
writer = csv.writer(f)
for row in self.data:
writer.writerow(row)
def set_output(self, output_dir):
self.output_dir = output_dir
def dependencies(self):
return []
def load(self, device):
return
def unload(self, device):
return
def aggregate_subject(self):
filename = os.path.join(self.output_dir, 'Aggregated.csv')
subject_rows = list()
subject_rows.append(self.aggregate_android_subject(self.output_dir))
self.write_to_file(filename, subject_rows)
def aggregate_end(self, data_dir, output_file):
rows = self.aggregate_final(data_dir)
self.write_to_file(output_file, rows)
@staticmethod
def aggregate_android_subject(logs_dir):
def add_row(accum, new):
row = {k: v + float(new[k]) for k, v in list(accum.items()) if k not in ['Component', 'count']}
count = accum['count'] + 1
return dict(row, **{'count': count})
runs = []
for run_file in [f for f in os.listdir(logs_dir) if os.path.isfile(os.path.join(logs_dir, f))]:
with open(os.path.join(logs_dir, run_file), 'r') as run:
reader = csv.DictReader(run)
init = dict({fn: 0 for fn in reader.fieldnames if fn != 'datetime'}, **{'count': 0})
run_total = reduce(add_row, reader, init)
runs.append({k: v / run_total['count'] for k, v in list(run_total.items()) if k != 'count'})
runs_total = reduce(lambda x, y: {k: v + y[k] for k, v in list(x.items())}, runs)
return OrderedDict(
sorted(list({'android_' + k: v / len(runs) for k, v in list(runs_total.items())}.items()), key=lambda x: x[0]))
def aggregate_final(self, data_dir):
rows = []
for device in self.list_subdir(data_dir):
row = OrderedDict({'device': device})
device_dir = os.path.join(data_dir, device)
for subject in self.list_subdir(device_dir):
row.update({'subject': subject})
subject_dir = os.path.join(device_dir, subject)
if os.path.isdir(os.path.join(subject_dir, 'AndroidPlugin')):
row.update(self.aggregate_android_final(os.path.join(subject_dir, 'AndroidPlugin')))
rows.append(row.copy())
else:
for browser in self.list_subdir(subject_dir):
row.update({'browser': browser})
browser_dir = os.path.join(subject_dir, browser)
if os.path.isdir(os.path.join(browser_dir, 'AndroidPlugin')):
row.update(self.aggregate_android_final(os.path.join(browser_dir, 'AndroidPlugin')))
rows.append(row.copy())
return rows
@staticmethod
def aggregate_android_final(logs_dir):
for aggregated_file in [f for f in os.listdir(logs_dir) if os.path.isfile(os.path.join(logs_dir, f))]:
if aggregated_file == "Aggregated.csv":
with open(os.path.join(logs_dir, aggregated_file), 'r') as aggregated:
reader = csv.DictReader(aggregated)
row_dict = OrderedDict()
for row in reader:
for f in reader.fieldnames:
row_dict.update({f: row[f]})
return OrderedDict(row_dict)
@staticmethod
def list_subdir(a_dir):
"""List immediate subdirectories of a_dir"""
# https://stackoverflow.com/a/800201
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
@staticmethod
def write_to_file(filename, rows):
with open(filename, 'w') as f:
writer = csv.DictWriter(f, list(rows[0].keys()))
writer.writeheader()
writer.writerows(rows)
@staticmethod
def is_integer(number, minimum=0):
if not isinstance(number, int):
raise ConfigError('%s is not an integer' % number)
if number < minimum:
raise ConfigError('%s should be equal or larger than %i' % (number, minimum))
return number
|
the-stack_106_16168
|
#! /usr/bin/env python
import rospy
import time
import actionlib
from ardrone_as.msg import ArdroneAction, ArdroneGoal, ArdroneResult, ArdroneFeedback
# We create some constants with the corresponing vaules from the SimpleGoalState class
PENDING = 0
ACTIVE = 1
DONE = 2
WARN = 3
ERROR = 4
nImage = 1
# definition of the feedback callback. This will be called when feedback
# is received from the action server
# it just prints a message indicating a new message has been received
def feedback_callback(feedback):
"""
Error that might jump
self._feedback.lastImage =
AttributeError: 'ArdroneAS' obj
"""
global nImage
print('[Feedback] image n.%d received'%nImage)
nImage += 1
# initializes the action client node
rospy.init_node('example_no_waitforresult_action_client_node')
action_server_name = '/ardrone_action_server'
client = actionlib.SimpleActionClient(action_server_name, ArdroneAction)
# waits until the action server is up and running
rospy.loginfo('Waiting for action Server '+action_server_name)
client.wait_for_server()
rospy.loginfo('Action Server Found...'+action_server_name)
# creates a goal to send to the action server
goal = ArdroneGoal()
goal.nseconds = 10 # indicates, take pictures along 10 seconds
client.send_goal(goal, feedback_cb=feedback_callback)
# You can access the SimpleAction Variable "simple_state", that will be 1 if active, and 2 when finished.
#Its a variable, better use a function like get_state.
#state = client.simple_state
# state_result will give the FINAL STATE. Will be 1 when Active, and 2 if NO ERROR, 3 If Any Warning, and 3 if ERROR
state_result = client.get_state()
rate = rospy.Rate(1)
rospy.loginfo("state_result: "+str(state_result))
counter = 0
while state_result < DONE:
rospy.loginfo("Doing Stuff while waiting for the Server to give a result....")
counter += 1
rate.sleep()
state_result = client.get_state()
rospy.loginfo("state_result: "+str(state_result)+", counter ="+str(counter))
if counter == 2:
rospy.logwarn("Canceling Goal...")
client.cancel_goal()
rospy.logwarn("Goal Canceled")
state_result = client.get_state()
rospy.loginfo("Update state_result after Cancel : "+str(state_result)+", counter ="+str(counter))
|
the-stack_106_16169
|
"""SCons.Tool.swig
Tool-specific initialization for swig.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Defaults
import SCons.Tool
import SCons.Util
from SCons.Scanner import Scanner
import os
import re
SwigAction = SCons.Action.Action('$SWIGCOM', '$SWIGCOMSTR')
def swigSuffixEmitter(env, source):
if '-c++' in SCons.Util.CLVar(env.subst("$SWIGFLAGS")):
return '$SWIGCXXFILESUFFIX'
else:
return '$SWIGCFILESUFFIX'
_reInclude = re.compile(r'%include\s+(\S+)')
_reModule = re.compile(r'%module\s+(.+)')
def recurse(path, searchPath):
global _reInclude
f = open(path)
try: contents = f.read()
finally: f.close()
found = []
# Better code for when we drop Python 1.5.2.
#for m in _reInclude.finditer(contents):
# fname = m.group(1)
for fname in _reInclude.findall(contents):
for dpath in searchPath:
absPath = os.path.join(dpath, fname)
if os.path.isfile(absPath):
found.append(absPath)
break
# Equivalent code for when we drop Python 1.5.2.
#for f in [f for f in found if os.path.splitext(f)[1] == ".i"]:
# found += recurse(f, searchPath)
for f in filter(lambda f: os.path.splitext(f)[1] == ".i", found):
found = found + recurse(f, searchPath)
return found
def _scanSwig(node, env, path):
r = recurse(str(node), [os.path.abspath(os.path.dirname(str(node))), os.path.abspath(os.path.join("include", "swig"))])
return r
def _swigEmitter(target, source, env):
for src in source:
src = str(src)
mname = None
flags = SCons.Util.CLVar(env.subst("$SWIGFLAGS"))
if "-python" in flags and "-noproxy" not in flags:
f = open(src)
try:
for l in f.readlines():
m = _reModule.match(l)
if m:
mname = m.group(1)
finally:
f.close()
if mname is not None:
target.append(mname + ".py")
return (target, source)
def generate(env):
"""Add Builders and construction variables for swig to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.i'] = swigSuffixEmitter
cxx_file.suffix['.i'] = swigSuffixEmitter
c_file.add_action('.i', SwigAction)
c_file.add_emitter('.i', _swigEmitter)
cxx_file.add_action('.i', SwigAction)
cxx_file.add_emitter('.i', _swigEmitter)
env['SWIG'] = 'swig'
env['SWIGFLAGS'] = SCons.Util.CLVar('')
env['SWIGCFILESUFFIX'] = '_wrap$CFILESUFFIX'
env['SWIGCXXFILESUFFIX'] = '_wrap$CXXFILESUFFIX'
env['SWIGCOM'] = '$SWIG $SWIGFLAGS -o $TARGET $SOURCES'
env.Append(SCANNERS=Scanner(function=_scanSwig, skeys=[".i"]))
def exists(env):
return env.Detect(['swig'])
|
the-stack_106_16170
|
from datetime import date, datetime, timedelta
import functools
import inspect
import re
from typing import Any, List
import warnings
import numpy as np
from pandas._libs import NaT, Timestamp, lib, tslib, writers
import pandas._libs.internals as libinternals
from pandas._libs.tslibs import Timedelta, conversion
from pandas._libs.tslibs.timezones import tz_compare
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
astype_nansafe,
find_common_type,
infer_dtype_from,
infer_dtype_from_scalar,
maybe_downcast_numeric,
maybe_downcast_to_dtype,
maybe_infer_dtype_type,
maybe_promote,
maybe_upcast,
soft_convert_objects,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
_TD_DTYPE,
ensure_platform_int,
is_bool_dtype,
is_categorical,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_extension_type,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_re,
is_re_compilable,
is_sparse,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.concat import concat_categorical, concat_datetime
from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import (
_isna_compat,
array_equivalent,
is_valid_nat_for_dtype,
isna,
notna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.indexers import (
check_setitem_lengths,
is_empty_indexer,
is_scalar_indexer,
)
import pandas.core.missing as missing
from pandas.core.nanops import nanpercentile
from pandas.io.formats.printing import pprint_thing
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ["_mgr_locs", "values", "ndim"]
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_extension = False
_can_hold_na = False
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = "dense"
_concatenator = staticmethod(np.concatenate)
def __init__(self, values, placement, ndim=None):
self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
self.values = values
if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):
raise ValueError(
"Wrong number of items passed {val}, placement implies "
"{mgr}".format(val=len(self.values), mgr=len(self.mgr_locs))
)
def _check_ndim(self, values, ndim):
"""
ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
values : array-like
ndim : int or None
Returns
-------
ndim : int
Raises
------
ValueError : the number of dimensions do not match
"""
if ndim is None:
ndim = values.ndim
if self._validate_ndim and values.ndim != ndim:
msg = "Wrong number of dimensions. values.ndim != ndim [{} != {}]"
raise ValueError(msg.format(values.ndim, ndim))
return ndim
@property
def _holder(self):
"""The array-like that can hold the underlying values.
None for 'Block', overridden by subclasses that don't
use an ndarray.
"""
return None
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
elif is_categorical_dtype(dtype):
return True
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def get_block_values(self, dtype=None):
"""
This is used in the JSON C code
"""
return self.get_values(dtype=dtype)
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, libinternals.BlockPlacement):
new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=self.ndim)
def make_block_same_class(self, values, placement=None, ndim=None, dtype=None):
""" Wrap given values in a block of same type as self. """
if dtype is not None:
# issue 19431 fastparquet is passing this
warnings.warn(
"dtype argument is deprecated, will be removed in a future release.",
FutureWarning,
)
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(
values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype
)
def __repr__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = "{name}: {len} dtype: {dtype}".format(
name=name, len=len(self), dtype=self.dtype
)
else:
shape = " x ".join(pprint_thing(s) for s in self.shape)
result = "{name}: {index}, {shape}, dtype: {dtype}".format(
name=name,
index=pprint_thing(self.mgr_locs.indexer),
shape=shape,
dtype=self.dtype,
)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = libinternals.BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
if getattr(self.values, "_pandas_ftype", False):
dtype = self.dtype.subtype
else:
dtype = self.dtype
return "{dtype}:{ftype}".format(dtype=dtype, ftype=self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1)
)
def iget(self, i):
return self.values[i]
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all="ignore"):
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result, ndim=self.ndim))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, "inplace")
mask = isna(self.values)
if limit is not None:
if not is_integer(limit):
raise ValueError("Limit must be an integer")
if limit < 1:
raise ValueError("Limit must be greater than 0")
mask[mask.cumsum(self.ndim - 1) > limit] = False
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
if self._can_hold_element(value):
# equivalent: _try_coerce_args(value) would not raise
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# operate column-by-column
def f(mask, val, idx):
block = self.coerce_to_target_dtype(value)
# slice out our block
if idx is not None:
# i.e. self.ndim == 2
block = block.getitem_block(slice(idx, idx + 1))
return block.fillna(value, limit=limit, inplace=inplace, downcast=None)
return self.split_and_operate(None, f, inplace)
def split_and_operate(self, mask, f, inplace: bool):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
masking which will not change a block unless needed.
Parameters
----------
mask : 2-d boolean mask
f : callable accepting (1d-mask, 1d values, indexer)
inplace : boolean
Returns
-------
list of blocks
"""
if mask is None:
mask = np.broadcast_to(True, shape=self.shape)
new_values = self.values
def make_a_block(nv, ref_loc):
if isinstance(nv, list):
assert len(nv) == 1, nv
assert isinstance(nv[0], Block)
block = nv[0]
else:
# Put back the dimension that was taken from it and make
# a block out of the result.
nv = _block_shape(nv, ndim=self.ndim)
block = self.make_block(values=nv, placement=ref_loc)
return block
# ndim == 1
if self.ndim == 1:
if mask.any():
nv = f(mask, new_values, None)
else:
nv = new_values if inplace else new_values.copy()
block = make_a_block(nv, self.mgr_locs)
return [block]
# ndim > 1
new_blocks = []
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
nv = f(m, v, i)
else:
nv = v if inplace else v.copy()
block = make_a_block(nv, [ref_loc])
new_blocks.append(block)
return new_blocks
def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
# no need to downcast our float
# unless indicated
if downcast is None and (
self.is_float or self.is_timedelta or self.is_datetime
):
return blocks
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = "infer"
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == "infer" or isinstance(dtypes, dict)):
raise ValueError(
"downcast must have a dictionary or 'infer' as its argument"
)
elif dtypes != "infer":
raise AssertionError("dtypes as dict is not supported yet")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(mask, val, idx):
val = maybe_downcast_to_dtype(val, dtype="infer")
return val
return self.split_and_operate(None, f, False)
def astype(self, dtype, copy=False, errors="raise", **kwargs):
return self._astype(dtype, copy=copy, errors=errors, **kwargs)
def _astype(self, dtype, copy=False, errors="raise", **kwargs):
"""Coerce to the new type
Parameters
----------
dtype : str, dtype convertible
copy : boolean, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
Block
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(list(errors_legal_values), errors)
)
raise ValueError(invalid_arg)
if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
msg = (
"Expected an instance of {}, but got the class instead. "
"Try instantiating 'dtype'.".format(dtype.__name__)
)
raise TypeError(msg)
# may need to convert to categorical
if self.is_categorical_astype(dtype):
# deprecated 17636
for deprecated_arg in ("categories", "ordered"):
if deprecated_arg in kwargs:
raise ValueError(
"Got an unexpected argument: {}".format(deprecated_arg)
)
categories = kwargs.get("categories", None)
ordered = kwargs.get("ordered", None)
if com.any_not_none(categories, ordered):
dtype = CategoricalDtype(categories, ordered)
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
return self.make_block(Categorical(self.values, dtype=dtype))
dtype = pandas_dtype(dtype)
# astype processing
if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
# force the copy here
if self.is_extension:
# TODO: Should we try/except this astype?
values = self.values.astype(dtype)
else:
if issubclass(dtype.type, str):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.get_values()
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
vals1d = values.ravel()
try:
values = astype_nansafe(vals1d, dtype, copy=True, **kwargs)
except (ValueError, TypeError):
# e.g. astype_nansafe can fail on object-dtype of strings
# trying to convert to float
if errors == "raise":
raise
newb = self.copy() if copy else self
return newb
# TODO(extension)
# should we make this attribute?
if isinstance(values, np.ndarray):
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [{copy}] for dtype "
"({dtype} [{shape}]) to different shape "
"({newb_dtype} [{newb_shape}])".format(
copy=copy,
dtype=self.dtype.name,
shape=self.shape,
newb_dtype=newb.dtype.name,
newb_shape=newb.shape,
)
)
return newb
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, element: Any) -> bool:
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, dtype)
return isinstance(element, dtype)
def _try_coerce_args(self, other):
""" provide coercion to our input arguments """
if np.any(notna(other)) and not self._can_hold_element(other):
# coercion issues
# let higher levels handle
raise TypeError(
"cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace("Block", ""),
)
)
if np.any(isna(other)) and not self._can_hold_na:
raise TypeError(
"cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace("Block", ""),
)
)
return other
def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.get_values()
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
if not self.is_object and not quoting:
itemsize = writers.word_len(na_rep)
values = values.astype("<U{size}".format(size=itemsize))
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
# block actions #
def copy(self, deep=True):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values, ndim=self.ndim)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API compatibility.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
original_to_replace = to_replace
# If we cannot replace with own dtype, convert to ObjectBlock and
# retry
if not self._can_hold_element(to_replace):
if not isinstance(to_replace, list):
if inplace:
return [self]
return [self.copy()]
to_replace = [x for x in to_replace if self._can_hold_element(x)]
if not len(to_replace):
# GH#28084 avoid costly checks since we can infer
# that there is nothing to replace in this block
if inplace:
return [self]
return [self.copy()]
if len(to_replace) == 1:
# _can_hold_element checks have reduced this back to the
# scalar case and we can avoid a costly object cast
return self.replace(
to_replace[0],
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise AssertionError
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
values = self.values
to_replace = self._try_coerce_args(to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
try:
blocks = self.putmask(mask, value, inplace=inplace)
# Note: it is _not_ the case that self._can_hold_element(value)
# is always true at this point. In particular, that can fail
# for:
# "2u" with bool-dtype, float-dtype
# 0.5 with int64-dtype
# np.nan with int64-dtype
except (TypeError, ValueError):
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise
assert not self._can_hold_element(value), value
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
if convert:
blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks]
return blocks
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs["inplace"] else self.copy()
def setitem(self, indexer, value):
"""Set the value inplace, returning a a maybe different typed block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
transpose = self.ndim == 2
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
if self._can_hold_element(value):
value = self._try_coerce_args(value)
else:
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, "dtype"):
dtype = value.dtype
find_dtype = True
elif lib.is_scalar(value) and not isna(value):
dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
find_dtype = True
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
if transpose:
values = values.T
# length checking
check_setitem_lengths(indexer, value, values)
if is_empty_indexer(indexer, arr_value):
# GH#8669 empty indexers
pass
elif is_scalar_indexer(indexer, arr_value):
# setting a single element for each dim and with a rhs that could
# be e.g. a list; see GH#6043
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (
len(arr_value.shape)
and arr_value.shape[0] == values.shape[0]
and arr_value.size == values.size
):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
if transpose:
values = values.T
block = self.make_block(values)
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new = getattr(new, "values", new)
mask = getattr(mask, "values", mask)
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
# FIXME: make sure we have compatible NA
new = self.fill_value
if self._can_hold_element(new):
new = self._try_coerce_args(new)
if transpose:
new_values = new_values.T
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explicitly repeat and reshape new instead
if getattr(new, "ndim", 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
# we require exact matches between the len of the
# values we are setting (or is compat). np.putmask
# doesn't check this and will simply truncate / pad
# the output, but we want sane error messages
#
# TODO: this prob needs some better checking
# for 2D cases
if (
is_list_like(new)
and np.any(mask[mask])
and getattr(new, "ndim", 1) == 1
):
if not (
mask.shape[-1] == len(new)
or mask[mask].shape[-1] == len(new)
or len(new) == 1
):
raise ValueError("cannot assign mismatch length to masked array")
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, "ndim", 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# operate column-by-column
def f(mask, val, idx):
if idx is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
n = np.squeeze(new[idx % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(val, mask, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values)]
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif (self.is_float or self.is_complex) and (
is_integer_dtype(dtype) or is_float_dtype(dtype)
):
# don't coerce float/complex to int
return self
elif (
self.is_datetime
or is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
):
# not a datetime
if not (
(is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype))
and self.is_datetime
):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, "tz", None)
othertz = getattr(dtype, "tz", None)
if not tz_compare(mytz, othertz):
return self.astype(object)
raise AssertionError(
"possible recursion in "
"coerce_to_target_dtype: {} {}".format(self, other)
)
elif self.is_timedelta or is_timedelta64_dtype(dtype):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError(
"possible recursion in "
"coerce_to_target_dtype: {} {}".format(self, other)
)
try:
return self.astype(dtype)
except (ValueError, TypeError, OverflowError):
return self.astype(object)
def interpolate(
self,
method="pad",
axis=0,
index=None,
values=None,
inplace=False,
limit=None,
limit_direction="forward",
limit_area=None,
fill_value=None,
coerce=False,
downcast=None,
**kwargs
):
inplace = validate_bool_kwarg(inplace, "inplace")
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except ValueError:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(
method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast,
)
# validate the interp method
m = missing.clean_interp_method(method, **kwargs)
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(
method=m,
index=index,
values=values,
axis=axis,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs
)
def _interpolate_with_fill(
self,
method="pad",
axis=0,
inplace=False,
limit=None,
fill_value=None,
coerce=False,
downcast=None,
):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, "inplace")
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
fill_value = self._try_coerce_args(fill_value)
values = missing.interpolate_2d(
values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype,
)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(
self,
method=None,
index=None,
values=None,
fill_value=None,
axis=0,
limit=None,
limit_direction="forward",
limit_area=None,
inplace=False,
downcast=None,
**kwargs
):
""" interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, "inplace")
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ("krogh", "piecewise_polynomial", "pchip"):
if not index.is_monotonic:
raise ValueError(
"{0} interpolation requires that the "
"index be monotonic.".format(method)
)
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(
index,
x,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False,
**kwargs
)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block_same_class(interp_values)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if fill_tuple is None:
fill_value = self.fill_value
allow_fill = False
else:
fill_value = fill_tuple[0]
allow_fill = True
new_values = algos.take_nd(
values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value
)
# Called from three places in managers, all of which satisfy
# this assertion
assert not (axis == 0 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n: int, axis: int = 1) -> List["Block"]:
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values)]
def shift(self, periods, axis=0, fill_value=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values, fill_value)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values)]
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
axis : int
Returns
-------
a new block(s), the result of the func
"""
import pandas.core.computation.expressions as expressions
assert errors in ["raise", "ignore"]
transpose = self.ndim == 2
values = self.values
orig_other = other
if transpose:
values = values.T
other = getattr(other, "_values", getattr(other, "values", other))
cond = getattr(cond, "values", cond)
# If the default broadcasting would go in the wrong direction, then
# explicitly reshape other instead
if getattr(other, "ndim", 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1,)))
elif transpose and values.ndim == self.ndim - 1:
cond = cond.T
if not hasattr(cond, "shape"):
raise ValueError("where must have a condition that is ndarray like")
# our where function
def func(cond, values, other):
if not (
(self.is_integer or self.is_bool)
and lib.is_float(other)
and np.isnan(other)
):
# np.where will cast integer array to floats in this case
other = self._try_coerce_args(other)
fastres = expressions.where(cond, values, other)
return fastres
if cond.ravel().all():
result = values
else:
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicitly ignoring errors
block = self.coerce_to_target_dtype(other)
blocks = block.where(
orig_other,
cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=axis,
)
return self._maybe_downcast(blocks, "infer")
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
return [self.make_block(result)]
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
taken = result.take(m.nonzero()[0], axis=axis)
r = maybe_downcast_numeric(taken, self.dtype)
nb = self.make_block(r.T, placement=self.mgr_locs[m])
result_blocks.append(nb)
return result_blocks
def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
n_rows : int
Only used in ExtensionBlock._unstack
fill_value : int
Only used in ExtensionBlock._unstack
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
def quantile(self, qs, interpolation="linear", axis=0):
"""
compute the quantiles of the
Parameters
----------
qs: a scalar or list of the quantiles to be computed
interpolation: type of interpolation, default 'linear'
axis: axis to compute, default 0
Returns
-------
Block
"""
# We should always have ndim == 2 becase Series dispatches to DataFrame
assert self.ndim == 2
values = self.get_values()
is_empty = values.shape[axis] == 0
orig_scalar = not is_list_like(qs)
if orig_scalar:
# make list-like, unpack later
qs = [qs]
if is_empty:
# create the array of na_values
# 2d len(values) * len(qs)
result = np.repeat(
np.array([self.fill_value] * len(qs)), len(values)
).reshape(len(values), len(qs))
else:
# asarray needed for Sparse, see GH#24600
mask = np.asarray(isna(values))
result = nanpercentile(
values,
np.array(qs) * 100,
axis=axis,
na_value=self.fill_value,
mask=mask,
ndim=values.ndim,
interpolation=interpolation,
)
result = np.array(result, copy=False)
result = result.T
if orig_scalar and not lib.is_scalar(result):
# result could be scalar in case with is_empty and self.ndim == 1
assert result.shape[-1] == 1, result.shape
result = result[..., 0]
result = lib.item_from_zerodim(result)
ndim = np.ndim(result)
return make_block(result, placement=np.arange(len(result)), ndim=ndim)
def _replace_coerce(
self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
if not regex:
self = self.coerce_to_target_dtype(value)
return self.putmask(mask, value, inplace=inplace)
else:
return self._replace_single(
to_replace,
value,
inplace=inplace,
regex=regex,
convert=convert,
mask=mask,
)
return self
class NonConsolidatableMixIn:
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
def __init__(self, values, placement, ndim=None):
"""Initialize a non-consolidatable block.
'ndim' may be inferred from 'placement'.
This will call continue to call __init__ for the other base
classes mixed in with this Mixin.
"""
# Placement must be converted to BlockPlacement so that we can check
# its length
if not isinstance(placement, libinternals.BlockPlacement):
placement = libinternals.BlockPlacement(placement)
# Maybe infer ndim from placement
if ndim is None:
if len(placement) != 1:
ndim = 1
else:
ndim = 2
super().__init__(values, placement, ndim=ndim)
@property
def shape(self):
if self.ndim == 1:
return ((len(self.values)),)
return (len(self.mgr_locs), len(self.values))
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
elif isinstance(col, slice):
if col != slice(None):
raise NotImplementedError(col)
return self.values[[loc]]
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block, the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
new = self._try_coerce_args(new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
return [self.make_block(values=new_values)]
def _get_unstack_items(self, unstacker, new_columns):
"""
Get the placement, values, and mask for a Block unstack.
This is shared between ObjectBlock and ExtensionBlock. They
differ in that ObjectBlock passes the values, while ExtensionBlock
passes the dummy ndarray of positions to be used by a take
later.
Parameters
----------
unstacker : pandas.core.reshape.reshape._Unstacker
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
new_placement : ndarray[int]
The placement of the new columns in `new_columns`.
new_values : Union[ndarray, ExtensionArray]
The first return value from _Unstacker.get_new_values.
mask : ndarray[bool]
The second return value from _Unstacker.get_new_values.
"""
# shared with ExtensionBlock
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
return new_placement, new_values, mask
class ExtensionBlock(NonConsolidatableMixIn, Block):
"""Block for holding extension types.
Notes
-----
This holds all 3rd-party extension array types. It's also the immediate
parent class for our internal extension types' blocks, CategoricalBlock.
ExtensionArrays are limited to 1-D.
"""
is_extension = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super().__init__(values, placement, ndim)
def _maybe_coerce_values(self, values):
"""
Unbox to an extension array.
This will unbox an ExtensionArray stored in an Index or Series.
ExtensionArrays pass through. No dtype coercion is done.
Parameters
----------
values : Index, Series, ExtensionArray
Returns
-------
ExtensionArray
"""
return extract_array(values)
@property
def _holder(self):
# For extension blocks, the holder is values-dependent.
return type(self.values)
@property
def fill_value(self):
# Used in reindex_indexer
return self.values.dtype.na_value
@property
def _can_hold_na(self):
# The default ExtensionArray._can_hold_na is True
return self._holder._can_hold_na
@property
def is_view(self):
"""Extension arrays are never treated as views."""
return False
@property
def is_numeric(self):
return self.values.dtype._is_numeric
def setitem(self, indexer, value):
"""Set the value inplace, returning a same-typed block.
This differs from Block.setitem by not allowing setitem to change
the dtype of the Block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
if isinstance(indexer, tuple):
# we are always 1-D
indexer = indexer[0]
check_setitem_lengths(indexer, value, self.values)
self.values[indexer] = value
return self
def get_values(self, dtype=None):
# ExtensionArrays must be iterable, so this works.
values = np.asarray(self.values)
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def to_dense(self):
return np.asarray(self.values)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)
# Called from three places in managers, all of which satisfy
# this assertion
assert not (self.ndim == 1 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _can_hold_element(self, element: Any) -> bool:
# XXX: We may need to think about pushing this onto the array.
# We're doing the same as CategoricalBlock here.
return True
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
slicer = slicer[1]
return self.values[slicer]
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._holder._concat_same_type([blk.values for blk in to_concat])
placement = placement or slice(0, len(values), 1)
return self.make_block_same_class(values, ndim=self.ndim, placement=placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
values = self.values if inplace else self.values.copy()
values = values.fillna(value=value, limit=limit)
return [
self.make_block_same_class(
values=values, placement=self.mgr_locs, ndim=self.ndim
)
]
def interpolate(
self, method="pad", axis=0, inplace=False, limit=None, fill_value=None, **kwargs
):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(value=fill_value, method=method, limit=limit),
placement=self.mgr_locs,
)
def shift(
self,
periods: int,
axis: libinternals.BlockPlacement = 0,
fill_value: Any = None,
) -> List["ExtensionBlock"]:
"""
Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock.
"""
return [
self.make_block_same_class(
self.values.shift(periods=periods, fill_value=fill_value),
placement=self.mgr_locs,
ndim=self.ndim,
)
]
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
if isinstance(other, ABCDataFrame):
# ExtensionArrays are 1-D, so if we get here then
# `other` should be a DataFrame with a single column.
assert other.shape[1] == 1
other = other.iloc[:, 0]
other = extract_array(other, extract_numpy=True)
if isinstance(cond, ABCDataFrame):
assert cond.shape[1] == 1
cond = cond.iloc[:, 0]
cond = extract_array(cond, extract_numpy=True)
if lib.is_scalar(other) and isna(other):
# The default `other` for Series / Frame is np.nan
# we want to replace that with the correct NA value
# for the type
other = self.dtype.na_value
if is_sparse(self.values):
# TODO(SparseArray.__setitem__): remove this if condition
# We need to re-infer the type of the data after doing the
# where, for cases where the subtypes don't match
dtype = None
else:
dtype = self.dtype
result = self.values.copy()
icond = ~cond
if lib.is_scalar(other):
set_other = other
else:
set_other = other[icond]
try:
result[icond] = set_other
except (NotImplementedError, TypeError):
# NotImplementedError for class not implementing `__setitem__`
# TypeError for SparseArray, which implements just to raise
# a TypeError
result = self._holder._from_sequence(
np.where(cond, self.values, other), dtype=dtype
)
return [self.make_block_same_class(result, placement=self.mgr_locs)]
@property
def _ftype(self):
return getattr(self.values, "_pandas_ftype", Block._ftype)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
# ExtensionArray-safe unstack.
# We override ObjectBlock._unstack, which unstacks directly on the
# values of the array. For EA-backed blocks, this would require
# converting to a 2-D ndarray of objects.
# Instead, we unstack an ndarray of integer positions, followed by
# a `take` on the actual values.
dummy_arr = np.arange(n_rows)
dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)
unstacker = dummy_unstacker(dummy_arr)
new_placement, new_values, mask = self._get_unstack_items(
unstacker, new_columns
)
blocks = [
self.make_block_same_class(
self.values.take(indices, allow_fill=True, fill_value=fill_value),
[place],
)
for indices, place in zip(new_values.T, new_placement)
]
return blocks, mask
class ObjectValuesExtensionBlock(ExtensionBlock):
"""
Block providing backwards-compatibility for `.values`.
Used by PeriodArray and IntervalArray to ensure that
Series[T].values is an ndarray of objects.
"""
def external_values(self, dtype=None):
return self.values.astype(object)
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(
tipo.type, (np.datetime64, np.timedelta64)
)
return isinstance(
element, (float, int, np.floating, np.int_)
) and not isinstance(
element,
(bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64),
)
def to_native_types(
self,
slicer=None,
na_rep="",
float_format=None,
decimal=".",
quoting=None,
**kwargs
):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
# see gh-13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == ".":
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(
values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))
return isinstance(
element, (float, int, complex, np.float_, np.int_)
) and not isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return (
issubclass(tipo.type, np.integer)
and not issubclass(tipo.type, (np.datetime64, np.timedelta64))
and self.dtype.itemsize >= tipo.itemsize
)
return is_integer(element)
def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin:
"""Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock."""
@property
def _holder(self):
return DatetimeArray
@property
def fill_value(self):
return np.datetime64("NaT", "ns")
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if is_object_dtype(dtype):
values = self.values.ravel()
result = self._holder(values).astype(object)
return result.reshape(self.values.shape)
return self.values
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _can_hold_na(self):
return True
def _maybe_coerce_values(self, values):
"""
Input validation for values passed to __init__. Ensure that
we have datetime64ns, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : ndarray[datetime64ns]
Overridden by DatetimeTZBlock.
"""
if values.dtype != _NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
if isinstance(values, DatetimeArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
return values
def _astype(self, dtype, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
dtype = pandas_dtype(dtype)
# if we are passed a datetime64[ns, tz]
if is_datetime64tz_dtype(dtype):
values = self.values
if getattr(values, "tz", None) is None:
values = DatetimeArray(values).tz_localize("UTC")
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super()._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
if self.is_datetimetz:
# require exact match, since non-nano does not exist
return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(
element, self.dtype
)
# GH#27419 if we get a non-nano datetime64 object
return is_datetime64_dtype(tipo)
elif element is NaT:
return True
elif isinstance(element, datetime):
if self.is_datetimetz:
return tz_compare(element.tzinfo, self.dtype.tz)
return element.tzinfo is None
return is_valid_nat_for_dtype(element, self.dtype)
def _try_coerce_args(self, other):
"""
Coerce other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
other : ndarray-like or scalar
Returns
-------
base-type other
"""
if is_valid_nat_for_dtype(other, self.dtype):
other = np.datetime64("NaT", "ns")
elif isinstance(other, (datetime, np.datetime64, date)):
other = Timestamp(other)
if other.tz is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a naive Block")
other = other.asm8
elif hasattr(other, "dtype") and is_datetime64_dtype(other):
# TODO: can we get here with non-nano?
pass
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return other
def to_native_types(
self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs
):
""" convert to our native types format, slicing if desired """
values = self.values
i8values = self.values.view("i8")
if slicer is not None:
values = values[..., slicer]
i8values = i8values[..., slicer]
from pandas.io.formats.format import _get_format_datetime64_from_values
fmt = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
i8values.ravel(),
tz=getattr(self.values, "tz", None),
format=fmt,
na_rep=na_rep,
).reshape(i8values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (
issubclass(value.dtype.type, np.datetime64)
and not is_datetime64tz_dtype(value)
and not is_extension_array_dtype(value)
)
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
values = conversion.ensure_datetime64ns(values, copy=False)
self.values[locs] = values
def external_values(self):
return np.asarray(self.values.astype("datetime64[ns]", copy=False))
class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
is_datetimetz = True
is_extension = True
_can_hold_element = DatetimeBlock._can_hold_element
fill_value = np.datetime64("NaT", "ns")
@property
def _holder(self):
return DatetimeArray
def _maybe_coerce_values(self, values):
"""Input validation for values passed to __init__. Ensure that
we have datetime64TZ, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : DatetimeArray
"""
if not isinstance(values, self._holder):
values = self._holder(values)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
return values
@property
def is_view(self):
""" return a boolean if I am possibly a view """
# check the ndarray values of the DatetimeIndex values
return self.values._data.base is not None
def get_values(self, dtype=None):
"""
Returns an ndarray of values.
Parameters
----------
dtype : np.dtype
Only `object`-like dtypes are respected here (not sure
why).
Returns
-------
values : ndarray
When ``dtype=object``, then and object-dtype ndarray of
boxed values is returned. Otherwise, an M8[ns] ndarray
is returned.
DatetimeArray is always 1-d. ``get_values`` will reshape
the return value to be the same dimensionality as the
block.
"""
values = self.values
if is_object_dtype(dtype):
values = values.astype(object)
values = np.asarray(values)
if self.ndim == 2:
# Ensure that our shape is correct for DataFrame.
# ExtensionArrays are always 1-D, even in a DataFrame when
# the analogous NumPy-backed column would be a 2-D ndarray.
values = values.reshape(1, -1)
return values
def to_dense(self):
# we request M8[ns] dtype here, even though it discards tzinfo,
# as lots of code (e.g. anything using values_from_object)
# expects that behavior.
return np.asarray(self.values, dtype=_NS_DTYPE)
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, other):
# DatetimeArray handles this for us
return other
def diff(self, n: int, axis: int = 0) -> List["Block"]:
"""
1st discrete difference.
Parameters
----------
n : int
Number of periods to diff.
axis : int, default 0
Axis to diff upon.
Returns
-------
A list with a new TimeDeltaBlock.
Notes
-----
The arguments here are mimicking shift so they are called correctly
by apply.
"""
if axis == 0:
# Cannot currently calculate diff across multiple blocks since this
# function is invoked via apply
raise NotImplementedError
new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8
# Reshape the new_values like how algos.diff does for timedelta data
new_values = new_values.reshape(1, len(new_values))
new_values = new_values.astype("timedelta64[ns]")
return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
def concat_same_type(self, to_concat, placement=None):
# need to handle concat([tz1, tz2]) here, since DatetimeArray
# only handles cases where all the tzs are the same.
# Instead of placing the condition here, it could also go into the
# is_uniform_join_units check, but I'm not sure what is better.
if len({x.dtype for x in to_concat}) > 1:
values = concat_datetime([x.values for x in to_concat])
placement = placement or slice(0, len(values), 1)
if self.ndim > 1:
values = np.atleast_2d(values)
return ObjectBlock(values, ndim=self.ndim, placement=placement)
return super().concat_same_type(to_concat, placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# We support filling a DatetimeTZ with a `value` whose timezone
# is different by coercing to object.
if self._can_hold_element(value):
return super().fillna(value, limit, inplace, downcast)
# different timezones, or a non-tz
return self.astype(object).fillna(
value, limit=limit, inplace=inplace, downcast=downcast
)
def setitem(self, indexer, value):
# https://github.com/pandas-dev/pandas/issues/24020
# Need a dedicated setitem until #24020 (type promotion in setitem
# for extension arrays) is designed and implemented.
if self._can_hold_element(value) or (
isinstance(indexer, np.ndarray) and indexer.size == 0
):
return super().setitem(indexer, value)
obj_vals = self.values.astype(object)
newb = make_block(
obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim
)
return newb.setitem(indexer, value)
def equals(self, other) -> bool:
# override for significant performance improvement
if self.dtype != other.dtype or self.shape != other.shape:
return False
return (self.values.view("i8") == other.values.view("i8")).all()
def quantile(self, qs, interpolation="linear", axis=0):
naive = self.values.view("M8[ns]")
# kludge for 2D block with 1D values
naive = naive.reshape(self.shape)
blk = self.make_block(naive)
res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)
# ravel is kludge for 2D block with 1D values, assumes column-like
aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)
return self.make_block_same_class(aware, ndim=res_blk.ndim)
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
fill_value = np.timedelta64("NaT", "ns")
def __init__(self, values, placement, ndim=None):
if values.dtype != _TD_DTYPE:
values = conversion.ensure_timedelta64ns(values)
if isinstance(values, TimedeltaArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
return TimedeltaArray
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.timedelta64)
elif element is NaT:
return True
elif isinstance(element, (timedelta, np.timedelta64)):
return True
return is_valid_nat_for_dtype(element, self.dtype)
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as nanoseconds
if is_integer(value):
# Deprecation GH#24694, GH#19233
warnings.warn(
"Passing integers to fillna is deprecated, will "
"raise a TypeError in a future version. To retain "
"the old behavior, pass pd.Timedelta(seconds=n) "
"instead.",
FutureWarning,
stacklevel=6,
)
value = Timedelta(value, unit="s")
return super().fillna(value, **kwargs)
def _try_coerce_args(self, other):
"""
Coerce values and other to datetime64[ns], with null values
converted to datetime64("NaT", "ns").
Parameters
----------
other : ndarray-like or scalar
Returns
-------
base-type other
"""
if is_valid_nat_for_dtype(other, self.dtype):
other = np.timedelta64("NaT", "ns")
elif isinstance(other, (timedelta, np.timedelta64)):
other = Timedelta(other).to_timedelta64()
elif hasattr(other, "dtype") and is_timedelta64_dtype(other):
# TODO: can we get here with non-nano dtype?
pass
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return other
def should_store(self, value):
return issubclass(
value.dtype.type, np.timedelta64
) and not is_extension_array_dtype(value)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = "NaT"
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array(
[Timedelta(val)._repr_base(format="all") for val in values.ravel()[imask]],
dtype=object,
)
return rvalues
def external_values(self, dtype=None):
return np.asarray(self.values.astype("timedelta64[ns]", copy=False))
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.bool_)
return isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_) and not is_extension_array_dtype(
value
)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
inplace = validate_bool_kwarg(inplace, "inplace")
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super().replace(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, placement=None, ndim=2):
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
super().__init__(values, ndim=ndim, placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# operate column-by-column
def f(mask, val, idx):
shape = val.shape
values = soft_convert_objects(
val.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
if isinstance(values, np.ndarray):
# TODO: allow EA once reshape is supported
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
return values
if self.ndim == 2:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)]
return blocks
def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])
def _can_hold_element(self, element: Any) -> bool:
return True
def _try_coerce_args(self, other):
""" provide coercion to our input arguments """
if isinstance(other, ABCDatetimeIndex):
# May get a DatetimeIndex here. Unbox it.
other = other.array
if isinstance(other, DatetimeArray):
# hit in pandas/tests/indexing/test_coercion.py
# ::TestWhereCoercion::test_where_series_datetime64[datetime64tz]
# when falling back to ObjectBlock.where
other = other.astype(object)
return other
def should_store(self, value):
return not (
issubclass(
value.dtype.type,
(np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),
)
or
# TODO(ExtensionArray): remove is_extension_type
# when all extension arrays have been ported.
is_extension_type(value)
or is_extension_array_dtype(value)
)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and is_re(to_replace):
return self._replace_single(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=True,
convert=convert,
)
elif not (either_list or regex):
return super().replace(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(
to_rep,
v,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(
to_rep,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(
to_replace,
value,
inplace=inplace,
filter=filter,
convert=convert,
regex=regex,
)
def _replace_single(
self,
to_replace,
value,
inplace=False,
filter=None,
regex=False,
convert=True,
mask=None,
):
"""
Replace elements by the given value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
filter : list, optional
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
a new block, the result after replacing
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
# regex is regex compilable
regex_re = is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError(
"only one of to_replace and regex can be regex compilable"
)
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
if is_re(to_replace):
pattern = to_replace.pattern
else:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super().replace(
to_replace, value, inplace=inplace, filter=filter, regex=regex
)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, str):
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return value if rx.search(s) is not None else s
else:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return rx.sub(value, s)
else:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
if mask is None:
new_values[filt] = f(new_values[filt])
else:
new_values[filt][mask] = f(new_values[filt][mask])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(numeric=False)
return block
def _replace_coerce(
self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
block = super()._replace_coerce(
to_replace=to_replace,
value=value,
inplace=inplace,
regex=regex,
convert=convert,
mask=mask,
)
if convert:
block = [b.convert(numeric=False, copy=True) for b in block]
return block
return self
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_concatenator = staticmethod(concat_categorical)
def __init__(self, values, placement, ndim=None):
# coerce to categorical if we can
values = extract_array(values)
assert isinstance(values, Categorical), type(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
return Categorical
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def to_dense(self):
# Categorical.get_values returns a DatetimeIndex for datetime
# categories, so we can't simply use `np.asarray(self.values)` like
# other types.
return self.values._internal_get_values()
def to_native_types(self, slicer=None, na_rep="", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isna(values)
values = np.array(values, dtype="object")
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
Note that this CategoricalBlock._concat_same_type *may* not
return a CategoricalBlock. When the categories in `to_concat`
differ, this will return an object ndarray.
If / when we decide we don't like that behavior:
1. Change Categorical._concat_same_type to use union_categoricals
2. Delete this method.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
# not using self.make_block_same_class as values can be object dtype
return make_block(
values, placement=placement or slice(0, len(values), 1), ndim=self.ndim
)
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
# TODO(CategoricalBlock.where):
# This can all be deleted in favor of ExtensionBlock.where once
# we enforce the deprecation.
object_msg = (
"Implicitly converting categorical to object-dtype ndarray. "
"One or more of the values in 'other' are not present in this "
"categorical's categories. A future version of pandas will raise "
"a ValueError when 'other' contains different categories.\n\n"
"To preserve the current behavior, add the new categories to "
"the categorical before calling 'where', or convert the "
"categorical to a different dtype."
)
try:
# Attempt to do preserve categorical dtype.
result = super().where(other, cond, align, errors, try_cast, axis)
except (TypeError, ValueError):
warnings.warn(object_msg, FutureWarning, stacklevel=6)
result = self.astype(object).where(
other, cond, align=align, errors=errors, try_cast=try_cast, axis=axis
)
return result
# -----------------------------------------------------------------
# Constructor Helpers
def get_block_type(values, dtype=None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
dtype = dtype or values.dtype
vtype = dtype.type
if is_sparse(dtype):
# Need this first(ish) so that Sparse[datetime] is sparse
cls = ExtensionBlock
elif is_categorical(values):
cls = CategoricalBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetime64tz_dtype(values)
cls = DatetimeBlock
elif is_datetime64tz_dtype(values):
cls = DatetimeTZBlock
elif is_interval_dtype(dtype) or is_period_dtype(dtype):
cls = ObjectValuesExtensionBlock
elif is_extension_array_dtype(values):
cls = ExtensionBlock
elif issubclass(vtype, np.floating):
cls = FloatBlock
elif issubclass(vtype, np.timedelta64):
assert issubclass(vtype, np.integer)
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
elif issubclass(vtype, np.integer):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
else:
cls = ObjectBlock
return cls
def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=None):
# Ensure that we don't allow PandasArray / PandasDtype in internals.
# For now, blocks should be backed by ndarrays when possible.
if isinstance(values, ABCPandasArray):
values = values.to_numpy()
if ndim and ndim > 1:
values = np.atleast_2d(values)
if isinstance(dtype, PandasDtype):
dtype = dtype.numpy_dtype
if fastpath is not None:
# GH#19265 pyarrow is passing this
warnings.warn(
"fastpath argument is deprecated, will be removed in a future release.",
FutureWarning,
)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values):
# TODO: This is no longer hit internally; does it need to be retained
# for e.g. pyarrow?
values = DatetimeArray._simple_new(values, dtype=dtype)
return klass(values, ndim=ndim, placement=placement)
# -----------------------------------------------------------------
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
from pandas.core.internals import BlockManager
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim:
if shape is None:
shape = values.shape
if not is_extension_array_dtype(values):
# TODO: https://github.com/pandas-dev/pandas/issues/23023
# block.shape is incorrect for "2D" ExtensionArrays
# We can't, and don't need to, reshape.
values = values.reshape(tuple((1,) + shape))
return values
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = np.vstack([b.values for b in blocks])
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, placement=new_mgr_locs)
# no merge
return blocks
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
1) If `arr` is a ExtensionArray or Index, `arr` will be
returned as is.
2) If `arr` is a Series, the `_values` attribute will
be reshaped and returned.
Parameters
----------
arr : array-like, object to be reshaped
new_shape : int or tuple of ints, the new shape
"""
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, ABCExtensionArray):
arr = arr.reshape(new_shape)
return arr
def _putmask_smart(v, mask, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
mask : np.ndarray
Applies to both sides (array like).
n : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.repeat(n, len(mask))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[mask]
except TypeError:
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
else:
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
# only compare integers/floats
pass
elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):
# only compare integers/floats
pass
else:
# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(v.dtype)
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[mask] = nn_at
return nv
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
nv[mask] = n[mask]
except (IndexError, ValueError):
nv[mask] = n
return nv
# preserves dtype if possible
if v.dtype.kind == n.dtype.kind:
return _putmask_preserve(v, n)
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
if is_extension_type(v.dtype) and is_object_dtype(dtype):
v = v._internal_get_values(dtype)
else:
v = v.astype(dtype)
return _putmask_preserve(v, n)
|
the-stack_106_16175
|
from unittest import TestCase, mock
from flask import Response
from api import init_app
from config import config
from application.resource.resources import Person
from api.controller import create_person, get_persons, get_person, Service
from uuid import uuid4
from json import loads
class TestController(TestCase):
def setUp(self):
self.app = init_app(config.get('test'))
self.app.test_request_context().push()
self.service = mock.Mock(spec_set=Service)
def test_get_persons(self):
person_id = uuid4()
person_mock = {
'id': str(person_id),
'name': 'jane',
'birth_date': None
}
self.service.configure_mock(**{
'fetch_all_persons_service.return_value': ({'persons': [person_mock]}, 200)
})
response = get_persons(self.service)
self.assertIsInstance(cls=Response, obj=response)
self.assertEqual(200, response.status_code)
self.assertEqual({
'persons': [{'id': str(person_id), 'name': 'jane', 'birth_date': None}]},
loads(response.data)
)
def test_get_person(self):
person_id = uuid4()
person_mock = {
'id': str(person_id),
'name': 'jane',
'birth_date': None
}
self.service.configure_mock(**{
'get_person_service.return_value': (person_mock, 200)
})
response = get_person(self.service, str(person_id))
self.assertIsInstance(cls=Response, obj=response)
self.assertEqual(200, response.status_code)
self.assertEqual({'id': str(person_id), 'name': 'jane', 'birth_date': None}, loads(response.data))
def test_create_person(self):
person_id = uuid4()
person_mock = {
'id': str(person_id),
'name': 'jane',
'birth_date': None
}
self.service.configure_mock(**{
'create_person_service.return_value': (person_mock, 201)
})
with mock.patch('api.controller.request') as request_mock:
request_mock.return_value = request = mock.Mock()
request.get_json.return_value = {}
response = create_person(self.service)
self.assertIsInstance(cls=Response, obj=response)
self.assertEqual(201, response.status_code)
self.assertEqual({'id': str(person_id), 'name': 'jane', 'birth_date': None}, loads(response.data))
|
the-stack_106_16176
|
import sima
import sima.motion
import numpy as np
import os
import pickle
import h5py
import sys
from sima import sequence
import time
import bidi_offset_correction
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import tifffile as tiff
import utils
# important for text to be detecting when importing saved figures into illustrator
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
def unpack(args):
print(args)
return sima_motion_correction(*args)
# function that takes in mean image and plots
def subplot_mean_img(axs, data_name, mean_img, clims, zoom_window=None):
im = axs.imshow(mean_img, cmap='gray')
axs.set_title(data_name, fontsize=20)
im.set_clim(vmin=clims[0], vmax=clims[1])
if zoom_window is not None:
axs.set_title(data_name + ' Zoom', fontsize=20)
axs.axis(zoom_window)
axs.invert_yaxis()
axs.axis('off')
def save_mean_imgs(save_dir, data_raw, data_mc):
# make image save directory if it doesn't exist
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# compute mean images
raw_mean = np.mean(np.squeeze(data_raw), axis=0)
mc_mean = np.mean(np.squeeze(data_mc), axis=0)
# calculate min and max array values across datasets to make color limits consistent
clims = [np.min([np.min(raw_mean), np.min(mc_mean)]),
np.max([np.max(raw_mean), np.max(mc_mean)])]
print(list(clims))
# make plot and save
fig, axs = plt.subplots(1, 2, figsize=(18, 8))
subplot_mean_img(axs[0], 'Raw', raw_mean, clims)
subplot_mean_img(axs[1], "Motion-Corrected", mc_mean, clims)
plt.savefig(os.path.join(save_dir, 'raw_mc_imgs.png'))
plt.savefig(os.path.join(save_dir, 'raw_mc_imgs.pdf'))
def save_projections(save_dir, data_in):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
max_img = utils.uint8_arr(np.max(data_in, axis=0))
mean_img = utils.uint8_arr(np.mean(data_in, axis=0))
std_img = utils.uint8_arr(np.std(data_in, axis=0))
tiff.imwrite(os.path.join(save_dir, 'mean_img.tif'), mean_img)
tiff.imwrite(os.path.join(save_dir, 'max_img.tif'), max_img)
tiff.imwrite(os.path.join(save_dir, 'std_img.tif'), std_img)
def full_process(fpath, max_disp, flag_bidi_corr=True, save_displacement=False):
"""
important note: sima saves a folder (.sima) that contains a sequences pickle file. This file contains the offsets
calculated from the motion correction algorithm. Sima by itself does not save a new video/tiff dataset that is motion
corrected.
:param fpath:
:param max_disp:
:param save_displacement:
:return:
"""
print('Performing SIMA motion correction')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
fdir = os.path.split(fpath)[0]
fname = os.path.splitext(os.path.split(fpath)[1])[0]
fext = os.path.splitext(os.path.split(fpath)[1])[1]
save_dir = os.path.join(fdir, fname + '_output_images')
if fext == '.tif' or fext == '.tiff':
# sequence: object that contains record of whole dataset; data not stored into memory all at once
sequences = [sima.Sequence.create('TIFF', fpath)]
elif fext == '.h5':
sequences = [sima.Sequence.create('HDF5', fpath, 'tyx')]
else:
raise Exception('Inappropriate file extension')
if not os.path.exists(os.path.join(fdir, fname + '_mc.sima')):
start_time = time.time()
# define motion correction method
# n_processes can only handle =1! Bug in their code where >1 runs into an error
# max_displacement: The maximum allowed displacement magnitudes in pixels in [y,x]
mc_approach = sima.motion.HiddenMarkov2D(granularity='row', max_displacement=max_disp, n_processes=1, verbose=True)
# apply motion correction to data
dataset = mc_approach.correct(sequences, os.path.join(fdir, fname + '_mc.sima'),
channel_names=['GCaMP'])
# dataset dimensions are frame, plane, row(y), column (x), channel
# use sima's fill_gaps function to interpolate missing data from motion correction
# dtype can be changed to int16 since none of values are floats
data_mc = np.empty(dataset[0]._sequences[0].shape, dtype='int16')
filled_data = sequence._fill_gaps(iter(dataset[0]._sequences[0]), iter(dataset[0]._sequences[0]))
for f_idx, frame in enumerate(filled_data):
data_mc[f_idx, ...] = frame
filled_data = None # clear filled_data intermediate variable
data_mc = np.squeeze(data_mc)
if save_displacement is True:
# show motion displacements after motion correction
mcDisp_approach = sima.motion.HiddenMarkov2D(granularity='row', max_displacement=max_disp, n_processes=1,
verbose=True)
displacements = mcDisp_approach.estimate(dataset)
# save the resulting displacement file
# only useful if you want to see the values of displacement calculated by SIMA to perform the motion correction
displacement_file = open(os.path.join(fdir, fname + '_mc.sima/displacement.pkl'), "wb")
pickle.dump(displacements, displacement_file)
displacement_file.close()
# process and save np array of composite displacement
data_dims = displacements[0].shape
disp_np = np.squeeze(np.array(displacements[0]))
disp_meanpix = np.mean(disp_np, axis=1) # avg across lines (y axis)
sima_disp = np.sqrt(
np.square(disp_meanpix[:, 0]) + np.square(disp_meanpix[:, 1])) # calculate composite x + y offsets
np.save(os.path.join(fdir, 'displacements\\displacements_sima.npy'), sima_disp)
end_time = time.time()
print("Motion correction execution time: {} seconds".format(end_time - start_time))
if flag_bidi_corr:
start_time = time.time()
# perform bidirection offset correction
my_bidi_corr_obj = bidi_offset_correction.bidi_offset_correction(data_mc) # initialize data to object
my_bidi_corr_obj.compute_mean_image() # compute mean image across time
my_bidi_corr_obj.determine_bidi_offset() # calculated bidirectional offset via fft cross-correlation
data_out, bidi_offset = my_bidi_corr_obj.correct_bidi_frames() # apply bidi offset to data
end_time = time.time()
print("Bidi offset correction execution time: {} seconds".format(end_time - start_time))
else:
data_out = data_mc
data_mc = None # clear data_mc variable
# save motion-corrected, bidi offset corrected dataset
start_time = time.time()
# sima_mc_bidi_outpath = os.path.join(fdir, fname + '_sima_mc.h5')
# h5_write_bidi_corr = h5py.File(sima_mc_bidi_outpath, 'w')
# h5_write_bidi_corr.create_dataset('imaging', data=data_out)
# h5_write_bidi_corr.close()
# save raw and MC mean images as figure
# save_mean_imgs(save_dir, np.array(sequences), data_out)
# calculate and save projection images and save as tiffs
save_projections(save_dir, data_out)
# sima by itself doesn't perform bidi corrections on the offset info, so do so here:
sequence_file = os.path.join(fdir, fname + '_mc.sima/sequences.pkl')
sequence_data = pickle.load(open(sequence_file, "rb")) # load the saved sequences pickle file
sequence_data[0]['base']['displacements'][:, 0, 1::2, 1] += bidi_offset # add bidi shift to existing offset values
with open(sequence_file, 'wb') as handle:
pickle.dump(sequence_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
end_time = time.time()
print("Data save execution time: {} seconds".format(end_time - start_time))
|
the-stack_106_16178
|
""" @ukinti_bot
Available Commands:
.unbanall
.kick option
Available Options: d, y, m, w, o, q, r """
from telethon import events
from datetime import datetime, timedelta
from telethon.tl.types import UserStatusEmpty, UserStatusLastMonth, UserStatusLastWeek, UserStatusOffline, UserStatusOnline, UserStatusRecently, ChannelParticipantsKicked, ChatBannedRights
from telethon.tl import functions, types
from time import sleep
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="unbanall ?(.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if input_str:
logger.info("TODO: Not yet Implemented")
else:
if event.is_private:
return False
await event.edit("Searching Participant Lists.")
p = 0
async for i in borg.iter_participants(event.chat_id, filter=ChannelParticipantsKicked, aggressive=True):
rights = ChatBannedRights(
until_date=0,
view_messages=False
)
try:
await borg(functions.channels.EditBannedRequest(event.chat_id, i, rights))
except FloodWaitError as ex:
logger.warn("sleeping for {} seconds".format(ex.seconds))
sleep(ex.seconds)
except Exception as ex:
await event.edit(str(ex))
else:
p += 1
await event.edit("{}: {} unbanned".format(event.chat_id, p))
@borg.on(admin_cmd(pattern="ikuck ?(.*)"))
async def _(event):
if event.fwd_from:
return
if event.is_private:
return False
input_str = event.pattern_match.group(1)
if input_str:
chat = await event.get_chat()
if not (chat.admin_rights or chat.creator):
await event.edit("`You aren't an admin here!`")
return False
p = 0
b = 0
c = 0
d = 0
e = []
m = 0
n = 0
y = 0
w = 0
o = 0
q = 0
r = 0
await event.edit("Searching Participant Lists.")
async for i in borg.iter_participants(event.chat_id):
p = p + 1
#
# Note that it's "reversed". You must set to ``True`` the permissions
# you want to REMOVE, and leave as ``None`` those you want to KEEP.
rights = ChatBannedRights(
until_date=None,
view_messages=True
)
if isinstance(i.status, UserStatusEmpty):
y = y + 1
if "y" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
if isinstance(i.status, UserStatusLastMonth):
m = m + 1
if "m" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
if isinstance(i.status, UserStatusLastWeek):
w = w + 1
if "w" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
if isinstance(i.status, UserStatusOffline):
o = o + 1
if "o" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
if isinstance(i.status, UserStatusOnline):
q = q + 1
if "q" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
if isinstance(i.status, UserStatusRecently):
r = r + 1
if "r" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
if i.bot:
b = b + 1
if "b" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
break
else:
c = c + 1
elif i.deleted:
d = d + 1
if "d" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit("I need admin priveleges to perform this action!")
e.append(str(e))
else:
c = c + 1
elif i.status is None:
n = n + 1
if input_str:
required_string = """Kicked {} / {} users
Deleted Accounts: {}
UserStatusEmpty: {}
UserStatusLastMonth: {}
UserStatusLastWeek: {}
UserStatusOffline: {}
UserStatusOnline: {}
UserStatusRecently: {}
Bots: {}
None: {}"""
await event.edit(required_string.format(c, p, d, y, m, w, o, q, r, b, n))
await asyncio.sleep(5)
await event.edit("""Total: {} users
Deleted Accounts: {}
UserStatusEmpty: {}
UserStatusLastMonth: {}
UserStatusLastWeek: {}
UserStatusOffline: {}
UserStatusOnline: {}
UserStatusRecently: {}
Bots: {}
None: {}""".format(p, d, y, m, w, o, q, r, b, n))
async def ban_user(chat_id, i, rights):
try:
await borg(functions.channels.EditBannedRequest(chat_id, i, rights))
return True, None
except Exception as exc:
return False, str(exc)
|
the-stack_106_16179
|
import numpy as np
import matplotlib.pyplot as plt
# Make a plot of cosine
thetas = np.linspace(0, 8, 32)
cosines = []
for theta in thetas:
cosines.append(np.cos(theta))
# Plot the data
fig, ax = plt.subplots()
ax.plot(thetas, cosines, 'r.', label="Cosine")
ax.set_title("Cosine")
plt.show()
|
the-stack_106_16180
|
from functools import cache
from itertools import count
from aoc_utils import Vec, dirs4
from aocd import get_data
@cache
def find(n):
return next(p for p, c in mapp.items() if c == n)
@cache
def distances_from_point(startpos):
boundary = {startpos}
visited = boundary.copy()
res = {}
for i in count(1):
nb = set()
for b in boundary:
for d in dirs4:
if (b + d) not in visited and mapp.get(b + d, '#') != '#':
nb.add(b + d)
visited.add(b + d)
if mapp[b + d] != '.':
res[mapp[b + d]] = i
boundary = nb
if not boundary:
return res
@cache
def shortest_distance(pos, collected):
try:
return min(d + shortest_distance(p, collected.union({p}))
for p, d in distances_from_point(find(pos)).items() if p not in collected and p != '0')
except ValueError:
return 0
def part1():
return shortest_distance('0', frozenset())
@cache
def shortest_distance_return(pos, collected):
try:
return min(d + shortest_distance_return(p, collected.union({p}))
for p, d in distances_from_point(find(pos)).items() if p not in collected and p != '0')
except ValueError:
return distances_from_point(find(pos))['0']
def part2():
return shortest_distance_return('0', frozenset())
if __name__ == '__main__':
data = get_data(day=24, year=2016)
mapp = {Vec(x, y): c for y, l in enumerate(data.splitlines()) for x, c in enumerate(l)}
print(part1())
print(part2())
|
the-stack_106_16182
|
import numbers
import numpy as np
import pytest
import ubermagutil.typesystem as ts
@ts.typesystem(
t1=ts.Typed(expected_type=int),
t2=ts.Typed(expected_type=numbers.Real),
t3=ts.Typed(expected_type=str, allow_none=True),
t4c=ts.Typed(expected_type=list, const=True),
s1=ts.Scalar(),
s2=ts.Scalar(expected_type=float),
s3=ts.Scalar(positive=True),
s4=ts.Scalar(unsigned=True, otherwise=str),
s5=ts.Scalar(expected_type=int, positive=True),
s6=ts.Scalar(expected_type=numbers.Real, positive=False),
s7c=ts.Scalar(expected_type=float, unsigned=True, const=True),
v1=ts.Vector(),
v2=ts.Vector(size=5),
v3=ts.Vector(unsigned=True),
v4=ts.Vector(positive=True, otherwise=int),
v5=ts.Vector(size=1, positive=False),
v6=ts.Vector(component_type=int),
v7=ts.Vector(size=3, component_type=float),
v8c=ts.Vector(size=2, component_type=int, const=True),
n1=ts.Name(),
n2=ts.Name(allowed_char=":"),
n3c=ts.Name(const=True),
d1=ts.Dictionary(
key_descriptor=ts.Name(), value_descriptor=ts.Scalar(), allow_empty=True
),
d2=ts.Dictionary(
key_descriptor=ts.Scalar(),
value_descriptor=ts.Vector(size=3),
otherwise=str,
allow_empty=False,
),
d3=ts.Dictionary(key_descriptor=ts.Name(), value_descriptor=ts.Scalar()),
d4c=ts.Dictionary(
key_descriptor=ts.Name(),
value_descriptor=ts.Typed(expected_type=str),
const=True,
),
ss1=ts.Subset(sample_set=set([1, 2, "5"]), unpack=False),
ss2=ts.Subset(sample_set=set([-1, 5]), unpack=False),
ss3=ts.Subset(sample_set="xyz", unpack=True, otherwise=float),
ss4c=ts.Subset(sample_set="abc", unpack=True, const=True),
p1=ts.Parameter(),
p2=ts.Parameter(descriptor=ts.Scalar(expected_type=int)),
p3=ts.Parameter(descriptor=ts.Scalar(positive=True)),
p4=ts.Parameter(descriptor=ts.Vector(size=3), otherwise=float),
p5c=ts.Parameter(descriptor=ts.Scalar(), const=True),
)
class DecoratedClass:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def test_typed():
dc = DecoratedClass()
# Valid sets
dc.t1 = -999
dc.t2 = 3.1e6
dc.t3 = ""
dc.t3 = None
dc.t4c = []
# Exceptions
with pytest.raises(TypeError):
dc.t1 = 0.1
with pytest.raises(TypeError):
dc.t2 = {}
with pytest.raises(TypeError):
dc.t3 = 5
with pytest.raises(AttributeError):
dc.t4c = [1, 2] # const attribute
with pytest.raises(AttributeError):
del dc.t1 # delete attribute
# Is value affected?
assert dc.t3 is None
def test_scalar():
dc = DecoratedClass(s2=3.3)
# Valid sets
dc.s1 = -1
dc.s2 = -5.2
dc.s3 = 1e-11
dc.s4 = 101
dc.s4 = "ubermag"
dc.s5 = 20
dc.s6 = -500
dc.s7c = 3.14
# Exceptions
with pytest.raises(TypeError):
dc.s1 = []
with pytest.raises(TypeError):
dc.s2 = 5000
with pytest.raises(ValueError):
dc.s3 = 0
with pytest.raises(ValueError):
dc.s4 = -1.2
with pytest.raises(TypeError):
dc.s5 = -0.1 # Caught when checking type
with pytest.raises(TypeError):
dc.s6 = []
with pytest.raises(AttributeError):
dc.s7c = 1.2 # const attribute
with pytest.raises(AttributeError):
del dc.s2 # delete attribute
# Is value affected?
assert dc.s4 == "ubermag"
assert dc.s5 == 20
def test_vector():
dc = DecoratedClass()
# Valid sets
dc.v1 = (1, 5e-9)
dc.v2 = np.array([1, 2, 3, 0.1, -1e-9])
dc.v3 = [1, 0]
dc.v4 = np.array([5, 1e5])
dc.v4 = 15 # otherwise int
dc.v5 = (-5,)
dc.v6 = [5, 9]
dc.v7 = (11.1, np.pi, 0.0)
dc.v8c = [1, 9]
# Exceptions
with pytest.raises(TypeError):
dc.v1 = {}
with pytest.raises(ValueError):
dc.v2 = (20, 11, -13)
with pytest.raises(ValueError):
dc.v3 = [-1, 5]
with pytest.raises(ValueError):
dc.v4 = (9, -3)
with pytest.raises(ValueError):
dc.v5 = []
with pytest.raises(TypeError):
dc.v6 = ["a", 1, 3]
with pytest.raises(TypeError):
dc.v7 = [1.1, 2, np.pi]
with pytest.raises(AttributeError):
dc.v8c = [1, 55] # const attribute
with pytest.raises(AttributeError):
del dc.v2 # delete attribute
# Is value affected?
assert dc.v5 == (-5,)
def test_name():
dc = DecoratedClass(n1="var_name")
# Valid sets
dc.n1 = "a1"
dc.n1 = "some_name"
dc.n1 = "a1a"
dc.n2 = "r1:r2"
dc.n2 = "r1r2"
dc.n3c = "var_name123_2"
# Exceptions
with pytest.raises(TypeError):
dc.n1 = 5
with pytest.raises(ValueError):
dc.n1 = "1a"
with pytest.raises(ValueError):
dc.n1 = "-a"
with pytest.raises(ValueError):
dc.n1 = "var name"
with pytest.raises(ValueError):
dc.n1 = "var-name"
with pytest.raises(ValueError):
dc.n2 = "r1-r2"
with pytest.raises(AttributeError):
dc.n3c = "ubermag" # const attribute
with pytest.raises(AttributeError):
del dc.n3c # delete attribute
# Is value affected?
assert dc.n1 == "a1a"
def test_dictionary():
dc = DecoratedClass()
# Valid sets
dc.d1 = {"a": 15, "b": -51} # Valid set
dc.d1 = {}
dc.d2 = {1: (1, 2, -3), -11: (0, 0, 0)}
dc.d2 = "ubermag"
dc.d3 = {"a": -1e-9, "b": 1e6}
dc.d4c = {"r1": "Southampton", "r2": "Hamburg"}
# Exceptions
with pytest.raises(TypeError):
dc.d1 = "a"
with pytest.raises(TypeError):
dc.d1 = {"a": 15, "b": [1, 2, 3]}
with pytest.raises(ValueError):
dc.d2 = {} # empty dictionary
with pytest.raises(ValueError):
dc.d3 = {} # empty dictionary
with pytest.raises(AttributeError):
dc.d4c = {"r1": "Hamburg", "r2": "London"} # const attribute
with pytest.raises(AttributeError):
del dc.d2 # delete attribute
# Is value affected?
assert dc.d2 == "ubermag"
def test_parameter():
dc = DecoratedClass()
# Valid sets
dc.p2 = 1
dc.p2 = {"r1": 2, "r2": -15}
dc.p3 = np.pi
dc.p4 = np.pi # otherwise float
dc.p4 = (1, 2, 3)
dc.p5c = {"a": 1.1, "b": 2e-3, "a:b": 3e-6}
# Exceptions
with pytest.raises(AttributeError):
dc.p1 = -1.2 # descriptor not passed
with pytest.raises(ValueError):
dc.p2 = {}
with pytest.raises(TypeError):
dc.p3 = {1: 1, "b": 5}
with pytest.raises(ValueError):
dc.p4 = {"string with spaces": (1, 2, 3)}
with pytest.raises(AttributeError):
dc.p5c = {"a": 1.2, "b": 2.2e-3} # const attribute
with pytest.raises(AttributeError):
del dc.p4
# Is value affected?
assert dc.p2 == {"r1": 2, "r2": -15}
assert dc.p4 == (1, 2, 3)
def test_subset():
dc = DecoratedClass()
# Valid sets
dc.ss1 = "5"
dc.ss2 = -1
dc.ss3 = "zzxy"
dc.ss3 = 3.14
dc.ss4c = "cbacbaaab"
# Exceptions
with pytest.raises(ValueError):
dc.ss1 = -1
with pytest.raises(ValueError):
dc.ss2 = 6
with pytest.raises(ValueError):
dc.ss3 = "k"
with pytest.raises(AttributeError):
dc.ss4c = "a" # const attribute
with pytest.raises(AttributeError):
del dc.ss3 # delete attribute
# Is value affected?
assert dc.ss1 == "5"
assert dc.ss2 == -1
assert dc.ss3 == 3.14
assert dc.ss4c == set("abc")
|
the-stack_106_16183
|
from tests import ScraperTest
from recipe_scrapers.thewoksoflife import Thewoksoflife
class TestThewoksoflifeScraper(ScraperTest):
scraper_class = Thewoksoflife
def test_host(self):
self.assertEqual(
'thewoksoflife.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'The Perfect Whole Wheat Mantou Recipe'
)
def test_yields(self):
self.assertEqual("12 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
'https://thewoksoflife.com/wp-content/uploads/2018/01/whole-wheat-mantou-9-1.jpg',
self.harvester_class.image()
)
def test_ingredients(self):
self.assertCountEqual([
"1 \u2154 cups warm milk ((400 ml))",
"1 teaspoon active dry yeast ((3 grams))",
"1 tablespoon sugar ((12 grams))",
"2 \u00be cups all-purpose flour ((400 grams))",
"1\u00bc to 1\u00bd cups whole wheat flour ((about 170-200 grams; how much you\u2019ll need is dependent on the humidity in your kitchen))"
], self.harvester_class.ingredients()
)
def test_instructions(self):
self.assertTrue(self.harvester_class.instructions().startswith(
'Heat the milk until warm to the touch (not hot). Then '))
self.assertEqual(len(self.harvester_class.instructions()), 1786)
|
the-stack_106_16184
|
# -*- coding: utf-8 -*-
import os, re
from configurations import Configuration, importer, values
from froide.settings import ThemeBase, Base # noqa
# importer.install(check_options=True)
class OpenGovHK(ThemeBase, Base):
FROIDE_THEME = 'opengovhk.theme'
LANGUAGES = (
('en', 'English'),
('zh-hk', '繁體中文'),
('zh-cn', '简体中文'),
)
ROOT_URLCONF = 'froide.urls'
SITE_NAME = "OpenGov.HK"
SITE_EMAIL = "[email protected]"
SITE_URL = 'http://localhost:8000'
SECRET_URLS = {
"admin": "admin",
}
@property
def LOCALE_PATHS(self):
return list(super(OpenGovHK, self).LOCALE_PATHS.default) + [
os.path.abspath(
os.path.join(os.path.dirname(__file__), "locale")
)
]
@property
def INSTALLED_APPS(self):
installed = super(OpenGovHK, self).INSTALLED_APPS
# installed += [
# # 'foiidea',
# # 'celery_haystack',
# # 'djcelery_email',
# # 'djangosecure',
# # 'django.contrib.redirects',
# # 'django.contrib.flatpages'
# ]
return installed
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
}
}
class Dev(OpenGovHK):
rec = re.compile
FROIDE_CONFIG = dict(
create_new_publicbody=True,
publicbody_empty=True,
user_can_hide_web=True,
public_body_officials_public=True,
public_body_officials_email_public=False,
request_public_after_due_days=14,
payment_possible=True,
currency="Euro",
default_law=1,
search_engine_query="http://www.google.de/search?as_q=%(query)s&as_epq=&as_oq=&as_eq=&hl=en&lr=&cr=&as_ft=i&as_filetype=&as_qdr=all&as_occt=any&as_dt=i&as_sitesearch=%(domain)s&as_rights=&safe=images",
greetings=[rec(u"Dear (?:Mr\.?|Ms\.? .*?)")],
closings=[rec(u"Sincerely yours,?")],
public_body_boosts={},
dryrun=True,
dryrun_domain="opengov.hk",
allow_pseudonym=False,
doc_conversion_binary=None, # replace with libreoffice instance
doc_conversion_call_func=None # see settings_test for use
)
class Production(OpenGovHK):
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['opengov.hk']
ALLOWED_HOSTS = ['*'] # temp flexibility
# PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# STATICFILES_DIRS = (
# os.path.join(PROJECT_PATH, 'static'),
# )
# STATIC_URL = '/static/'
# PROJECT_ROOT = os.path.abspath(os.path.dirname(__name__))
# STATIC_ROOT = os.path.abspath(os.path.join(PROJECT_ROOT, 'static'))
CELERY_ALWAYS_EAGER = False
COMPRESS_ENABLED = False
COMPRESS_OFFLINE = False
rec = re.compile
FROIDE_CONFIG = dict(
create_new_publicbody=True,
publicbody_empty=True,
user_can_hide_web=True,
public_body_officials_public=True,
public_body_officials_email_public=False,
request_public_after_due_days=14,
payment_possible=True,
currency="Euro",
default_law=1,
search_engine_query="http://www.google.de/search?as_q=%(query)s&as_epq=&as_oq=&as_eq=&hl=en&lr=&cr=&as_ft=i&as_filetype=&as_qdr=all&as_occt=any&as_dt=i&as_sitesearch=%(domain)s&as_rights=&safe=images",
greetings=[rec(u"Dear (?:Mr\.?|Ms\.? .*?)")],
closings=[rec(u"Sincerely yours,?")],
public_body_boosts={},
dryrun=True,
dryrun_domain="opengov.hk",
allow_pseudonym=False,
doc_conversion_binary=None, # replace with libreoffice instance
doc_conversion_call_func=None # see settings_test for use
)
try:
from .local_settings import * # noqa
except ImportError:
pass
|
the-stack_106_16189
|
import time
import os
import MySQLdb
from flask import Flask, request, g, Response
from utils import (
Json, build_image_info, build_range_query, build_keyword_query,
build_search_query_from_dic, set_params
)
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
app = Flask(__name__)
def connect_db():
kwargs = dict(
user=os.environ['DB_USER'],
passwd=os.environ['DB_PASSWD'],
host=os.environ['DB_HOST'],
port=int(os.environ['DB_PORT']),
db=os.environ['DB_NAME'],
use_unicode=True,
charset='utf8mb4',
)
if app.testing:
kwargs['db'] = os.environ['TEST_DB_NAME']
return MySQLdb.connect(**kwargs)
def db():
if not hasattr(g, 'db_conn'):
g.db_conn = connect_db()
return g.db_conn.cursor(MySQLdb.cursors.DictCursor)
@app.route('/ping')
def ping():
return Response('pong', mimetype='text/plain')
@app.route('/image/<int:image_id>')
def get_image(image_id):
image_id = int(image_id)
query = '''
SELECT
i.id AS id
, i.filename AS filename
, i.created_at AS created_at
, ii.id AS image_info_id
, ii.comment AS comment
, ii.source AS source
FROM images i
LEFT JOIN image_info ii
ON i.id = ii.image_id
WHERE i.id = %s
'''
app.logger.debug(f'Query: {query}' % image_id)
c = db()
c.execute(query, (image_id,))
result = c.fetchone()
if result is None:
return Json({'ok': False, 'message': 'image_not_found'}, 404)
return Json({'ok': True, 'data': build_image_info(result)})
@app.route('/images')
@set_params
def get_images(count, max_id, since_id):
_reversed = request.args.get('reversed', '0') == '1'
range_query = build_range_query(max_id, since_id)
query = f'''
SELECT
i.id AS id
, i.filename AS filename
, i.created_at AS created_at
, ii.id AS image_info_id
, ii.comment AS comment
, ii.source AS source
FROM images i
LEFT JOIN image_info ii
ON i.id = ii.image_id
{('WHERE ' + range_query) if range_query else ''}
ORDER BY id {'ASC' if _reversed else 'DESC'} LIMIT %s
'''
app.logger.debug(f'Query: {query}' % count)
t_s = time.time()
c = db()
c.execute(query, (count,))
result = c.fetchall()
if result is None:
return Json({'ok': False, 'message': 'invalid parameters'}, 400)
query = f'''
SELECT
COUNT(*) AS cnt
FROM images
'''
app.logger.debug(f'Query: {query}')
c.execute(query)
count = c.fetchone()['cnt']
t_e = time.time()
return Json({
'ok': True,
'elapsed_time': t_e - t_s,
'whole_count': count,
'data': [build_image_info(info) for info in result]
})
@app.route('/images/search')
@set_params
def search_images(count, max_id, since_id):
_reversed = request.args.get('reversed', '0') == '1'
keyword = request.args.get("keyword", "").strip()
and_keyword = request.args.get("all", "").strip()
or_keyword = request.args.get("any", "").strip()
not_keyword = request.args.get("ex", "").strip()
if keyword:
keyword_query = build_keyword_query(keyword)
elif and_keyword or or_keyword or not_keyword:
query_dic = {
"and": and_keyword.split(),
"or": or_keyword.split(),
"ex": not_keyword.split(),
}
keyword_query = build_search_query_from_dic(query_dic)
else:
return Json({
'ok': False,
'message': 'you must specify a keyword'
}, 400)
range_query = build_range_query(max_id, since_id)
query = f'''
SELECT
i.id AS id
, i.filename AS filename
, i.created_at AS created_at
, ii.id AS image_info_id
, ii.comment AS comment
, ii.source AS source
FROM images i
LEFT JOIN image_info ii
ON i.id = ii.image_id
WHERE
{keyword_query}
{('AND ' + range_query) if range_query else ''}
ORDER BY id {'ASC' if _reversed else 'DESC'} LIMIT %s
'''
app.logger.debug(f'Query: {query}')
c = db()
t_s = time.time()
c.execute(query, (count,))
result = c.fetchall()
if result is None:
return Json({'ok': False, 'message': 'invalid parameters'}, 400)
query = f'''
SELECT
COUNT(*) AS cnt
FROM images i
LEFT JOIN image_info ii
ON
i.id = ii.image_id
WHERE
{keyword_query}
'''
app.logger.debug(f'Query: {query}')
c.execute(query)
count = c.fetchone()['cnt']
t_e = time.time()
return Json({
'ok': True,
'elapsed_time': t_e - t_s,
'whole_count': count,
'data': [build_image_info(info) for info in result]
})
if __name__ == '__main__':
app.run()
|
the-stack_106_16191
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import distutils.command.build
import distutils.util
import fnmatch
import glob
import io
import os
import sys
from pathlib import Path
import setuptools
from setuptools.command.build_py import build_py as build_py_orig
from setuptools.dist import Distribution
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
"--package-dir",
help="Source directory of package files.",
default="bazel-bin/package.runfiles/CompilerGym",
)
argparser.add_argument(
"--get-wheel-filename",
action="store_true",
help="Print only output filename without building it.",
)
argparser.add_argument(
"--build-dir",
help="Path to build dir. This is where this script copies files from the source before making the wheel package.",
default="build",
)
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
sys.path.insert(0, str((Path(args.package_dir) / "compiler_gym").absolute()))
import config # noqa: E402
with open("VERSION") as f:
version = f.read().strip()
with open("README.md") as f:
# Force UTF-8 file encoding to support non-ascii characters in the readme.
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
with open("compiler_gym/requirements.txt") as f:
requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
# When building a bdist_wheel we need to set the appropriate tags: this package
# includes compiled binaries, and does not include compiled python extensions.
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
def get_tag(self):
python, abi, plat = _bdist_wheel.get_tag(self)
python, abi = "py3", "none"
return python, abi, plat
except ImportError:
bdist_wheel = None
class build(distutils.command.build.build):
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.build_base = args.build_dir
# Add files that should be excluded from the package.
# The argument exclude_package_data of setuptools.setup(...)
# does not work with py files. They have to be excluded here.
excluded = [
str(Path(args.package_dir) / "compiler_gym/envs/llvm/make_specs.py"),
str(Path(args.package_dir) / "compiler_gym/bin/random_eval.py"),
]
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
modules = super().find_package_modules(package, package_dir)
res = [
(pkg, mod, file)
for (pkg, mod, file) in modules
if not any(fnmatch.fnmatchcase(file, pat=pattern) for pattern in excluded)
]
return res
def wheel_filename(**kwargs):
# create a fake distribution from arguments
dist = Distribution(attrs=kwargs)
# finalize bdist_wheel command
bdist_wheel_cmd = dist.get_command_obj("bdist_wheel")
bdist_wheel_cmd.ensure_finalized()
# assemble wheel file name
distname = bdist_wheel_cmd.wheel_dist_name
tag = "-".join(bdist_wheel_cmd.get_tag())
return f"{distname}-{tag}.whl"
setup_kwargs = {
"name": "compiler_gym",
"version": version,
"description": "Reinforcement learning environments for compiler research",
"author": "Facebook AI Research",
"long_description": long_description,
"long_description_content_type": "text/markdown",
"url": "https://github.com/facebookresearch/CompilerGym",
"license": "MIT",
"packages": [
"compiler_gym.bin",
"compiler_gym.datasets",
"compiler_gym.envs.gcc.datasets",
"compiler_gym.envs.gcc.service",
"compiler_gym.envs.gcc",
"compiler_gym.envs.loop_tool",
"compiler_gym.envs.loop_tool.service",
"compiler_gym.envs",
"compiler_gym.envs",
"compiler_gym.errors",
"compiler_gym.leaderboard",
"compiler_gym.service.proto",
"compiler_gym.service.runtime",
"compiler_gym.service",
"compiler_gym.spaces",
"compiler_gym.third_party.autophase",
"compiler_gym.third_party.gccinvocation",
"compiler_gym.third_party.inst2vec",
"compiler_gym.third_party",
"compiler_gym.util.flags",
"compiler_gym.util",
"compiler_gym.views",
"compiler_gym.wrappers",
"compiler_gym",
],
"package_dir": {
"": args.package_dir,
},
"package_data": {
"compiler_gym": [
"envs/gcc/service/compiler_gym-gcc-service",
"envs/loop_tool/service/compiler_gym-loop_tool-service",
"third_party/csmith/csmith/bin/csmith",
"third_party/csmith/csmith/include/csmith-2.3.0/*.h",
"third_party/inst2vec/*.pickle",
]
},
"install_requires": requirements,
"include_package_data": True,
"python_requires": ">=3.6",
"classifiers": [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Compilers",
],
"cmdclass": {"bdist_wheel": bdist_wheel, "build": build, "build_py": build_py},
"platforms": [distutils.util.get_platform()],
"zip_safe": False,
}
if config.enable_llvm_env:
setup_kwargs["packages"].extend(
[
"compiler_gym.envs.llvm.datasets",
"compiler_gym.envs.llvm.service",
"compiler_gym.envs.llvm",
"compiler_gym.third_party.llvm",
"compiler_gym.third_party.autophase",
]
)
setup_kwargs["package_data"]["compiler_gym"].extend(
[
"envs/llvm/service/compiler_gym-llvm-service",
"envs/llvm/service/compute_observation",
"envs/llvm/service/libLLVMPolly.so",
"third_party/cbench/benchmarks.txt",
"third_party/cbench/cbench-v*/crc32.bc",
]
)
if config.enable_mlir_env:
setup_kwargs["packages"].extend(
[
"compiler_gym.envs.mlir.datasets",
"compiler_gym.envs.mlir.service",
"compiler_gym.envs.mlir",
]
)
setup_kwargs["package_data"]["compiler_gym"].extend(
["envs/mlir/service/compiler_gym-mlir-service"]
)
original_cwd = os.getcwd()
try:
os.chdir(os.path.join(args.package_dir, "compiler_gym"))
setup_kwargs["package_data"]["compiler_gym"].extend(
glob.glob("envs/mlir/service/llvm/**", recursive=True)
)
setup_kwargs["package_data"]["compiler_gym"].extend(
glob.glob("envs/mlir/service/google_benchmark/**", recursive=True)
)
finally:
os.chdir(original_cwd)
if args.get_wheel_filename:
# Instead of generating the wheel file,
# print its filename.
file_name = wheel_filename(**setup_kwargs)
sys.stdout.write(file_name)
else:
setuptools.setup(**setup_kwargs)
|
the-stack_106_16193
|
from typing import List, Dict, Optional, Tuple
import math
MIN_RATED_PCT = 50
# Based on the given judgements score the ordered list of document IDs in document_ids
def ndcg(judgements: Dict[str, float], document_ids: List[str], at_n=10) -> Optional[float]:
judgements_sorted: List[Tuple[str, float]] = sorted(judgements.items(), key=lambda item: item[1], reverse=True)
# For each document on the list, get its judgment (which may be None if the document has not been judged so far)
scored_docs: List[Tuple[str, Optional[float]]] = [(d_id, judgements.get(d_id)) for d_id in document_ids][:at_n]
if len(scored_docs) == 0:
return None
# only calculate the ndcg for lists that have at least MIN_RATED_PCT percent of documents rated
documents_with_score = [d for d in scored_docs if d[1] is not None]
rated_pct = 100 * len(documents_with_score) / len(scored_docs)
if rated_pct >= MIN_RATED_PCT:
rating_average = sum([d[1] for d in documents_with_score]) / len(documents_with_score)
# fill the unrated documents with the average, this likely over-rates them but that's fine for now
scored_docs_adjusted = [d if d[1] is not None else (d[0], rating_average) for d in scored_docs]
return _ndcg(judgements_sorted, scored_docs_adjusted, at_n)
else:
return None
def _ndcg(judged_items_sorted: List[Tuple[str, float]], list_sorted: List[Tuple[str, float]], at_n=10) -> float:
judged_scores: List[float] = [item_and_score[1] for item_and_score in judged_items_sorted]
list_scores: List[float] = [item_and_score[1] for item_and_score in list_sorted]
dcg = 0
idcg = 0
for i in range(min(len(judged_scores), len(list_scores), at_n)):
denom = math.log(i + 2, 2)
dcg += list_scores[i] / denom
idcg += judged_scores[i] / denom
if idcg > 0:
ndcg = min(dcg, idcg) / idcg
else:
ndcg = None
return ndcg
|
the-stack_106_16195
|
"""Test init of Brother integration."""
from homeassistant.components.brother.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_HOST, CONF_TYPE, STATE_UNAVAILABLE
from tests.async_mock import patch
from tests.common import MockConfigEntry
from tests.components.brother import init_integration
async def test_async_setup_entry(hass):
"""Test a successful setup entry."""
await init_integration(hass)
state = hass.states.get("sensor.hl_l2340dw_status")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
async def test_config_not_ready(hass):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(
domain=DOMAIN,
title="HL-L2340DW 0123456789",
unique_id="0123456789",
data={CONF_HOST: "localhost", CONF_TYPE: "laser"},
)
with patch("brother.Brother._get_data", side_effect=ConnectionError()):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
|
the-stack_106_16196
|
import os, json, argparse, sys, datetime, time, csv, datetime
import urllib.request as ureq
from curses import wrapper
"""
bzcat latest-all.json.bz2 |wikibase-dump-filter --simplify --claim 'P356' |jq '[.id,.claims.P356]' -c >DOI.ndjson
or use wdumper
"""
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--item', action='store', required=True)
parser.add_argument('-d', '--doi', action='store', required=True)
# Read arguments from the command line
args = parser.parse_args()
# Check for --version or -V
DOI = args.doi
item = args.item
script = os.path.basename(sys.argv[0])[:-3]
crossref = 'https://api.crossref.org/works/{}'.format(DOI, script)
print('contacting CrossRef', file=sys.stderr)
cref = None
with ureq.urlopen(crossref) as f:
cref = f.read().decode('utf-8')
if cref is None:
raise
file = open('{}.json'.format(script), "w")
file.write(cref)
jol = json.loads(cref)
reflist = jol.get('message').get('reference')
print('references received: {}'.format(len(reflist)), file=sys.stderr)
dois = {}
if False:
print('reading dump data...', file=sys.stderr)
file = open('DOI.ndjson')
for line in file.readlines():
arr = json.loads(line.strip())
qit = arr[0]
doilist = arr[1]
if len(doilist) == 0:
continue
for doi in doilist:
doi = doi.upper()
d = dois.get(doi)
if d is not None:
#print('duplicate DOI {}'.format(doi), file=sys.stderr)
continue
dois[doi] = qit
P2860claims = []
nodoi = 0
missing = set()
for ref in reflist:
doi = ref.get('DOI')
if doi is None:
nodoi = nodoi + 1
continue
doi = doi.upper()
if doi[len(doi)-1:] == '.':
doi = doi[:-1]
d = dois.get(doi)
if d is None:
missing.add(doi)
continue
c = { "value": d,
"references": { "P248": "Q5188229", "P854": crossref,
"P813": datetime.date.today().isoformat() } }
P2860claims.append(c)
if nodoi > 0:
print('{} references without DOI received'.format(nodoi), file=sys.stderr)
inp = ''
while len(missing) > 0 and inp != 'y':
print('querying {} missing DOIs'.format(len(missing)), file=sys.stderr)
query="""
SELECT DISTINCT ?item ?doi
WHERE
{{
VALUES ?art {{ wd:Q580922 wd:Q13442814 }}
VALUES ?doi {{ '{}' }}
?item wdt:P31 ?art.
?item wdt:P356 ?doi.
}}
""".format("' '".join(missing))
f = open('{}-1.rq'.format(script), 'w')
f.write(query)
f.close()
print('performing query... ', file=sys.stderr)
ret = os.popen('wd sparql {}-1.rq >{}-1.json'.format(script, script))
time.sleep(5)
f = open('{}-1.json'.format(script))
s = ''
s = f.read()
f.close()
jol = []
try:
jol = json.loads(s)
except json.JSONDecodeError:
pass
for d in jol:
c = { "value": d.get('item'),
"references": { "P248": "Q5188229", "P854": crossref,
"P813": datetime.date.today().isoformat() } }
P2860claims.append(c)
missing.remove(d.get('doi'))
for doi in missing:
print('{}'.format(doi))
inp = input("Press y to continue...")
j = {"id": item,
"claims": { "P2860": P2860claims } }
f = open('{}.out'.format(script), 'w')
f.write(json.dumps(j))
f.close()
|
the-stack_106_16197
|
import sys
import dlib
detector = dlib.simple_object_detector("detector.svm")
win = dlib.image_window()
for f in sys.argv[1:]:
img = dlib.load_rgb_image(f)
dets = detector(img)
win.clear_overlay()
win.set_image(img)
win.add_overlay(dets)
input("hit enter to continue")
|
the-stack_106_16198
|
from __future__ import division, print_function
__all__ = ["Signal", "LikelihoodError"]
from .global_imports import *
from . import global_imports
from .Data import Data
from .Instrument import Instrument, ChannelError
from .Background import Background
from .Interstellar import Interstellar
from .tools.energy_integrator import energy_integrator
from .tools.energy_interpolator import energy_interpolator
from .tools.phase_integrator import phase_integrator
from abc import abstractmethod
from .Parameter import Parameter
from .ParameterSubspace import ParameterSubspace
class LikelihoodError(xpsiError):
""" Raised if there is a problem with the value of the log-likelihood. """
class Signal(ParameterSubspace):
"""
A signal is constituted by some X-ray dataset, a model instrument
with which that data was acquired, a model background, and an object for
modelling interstellar processes.
The methods in this class must transform incident specific flux signals
into a structure congruent to that of the data for the purpose of
evaluation of the custom likelihood implemented via subclassing.
:param obj data:
An instance of :class:`~.Data.Data`.
:param obj instrument:
An instance of :class:`~.Instrument.Instrument`.
:param obj background:
If not ``None``, an instance of :class:`~.Background.Background`.
It is assumed if one constructs a model using instances of
:class:`~.Background.Background` that the background needs to be
registered by a model instrument. If ``None``, it is still possible
for one to define and use background parameters in a custom subclass
of :class:`~.Signal`. In particular, background parameters for some
model which directly specifies background contribution in units of
count/s per *output* channels. These background parameters can even
*be* the counts/s in output channels.
:param obj interstellar:
If not ``None``, an instance of :class:`~.Interstellar.Interstellar`.
To be applied to the incident signal as a callable that modifies the
signal in place.
:param str photosphere_prefix:
The ``str`` prefix of the photosphere object with which this signal
object is associated.
:param bool cache:
Cache intermediary signals during likelihood evalation? When performing
post-processing, this needs to be activated for full functionality of
the :mod:`~.xpsi.PostProcessing` module. For likelihood function
evaluation during sampling, caching should be deactivated because it is
not used. It might be useful to activate caching also when preparing a
model for a sampling application, to check the likelihood function
works as intended.
:param bool store:
Deprecated. You can use this or ``cache``, which has the same effect.
"""
def __init__(self,
data,
instrument,
background = None,
interstellar = None,
photosphere_prefix = None,
cache = False,
bounds = None,
values = None,
*args,
**kwargs):
if not isinstance(data, Data):
raise TypeError('Invalid type for a data object.')
else:
self._data = data
if not isinstance(instrument, Instrument):
raise TypeError('Invalid type for an instrument object.')
else:
self._instrument = instrument
a, b = data.index_range
if (data.channels != instrument.channels[a:b]).any():
raise ChannelError('Channel array declared for event data does not '
'match channel array declared for the loaded '
'instrument response (sub)matrix.')
self._identify_waveband()
if background is not None:
if not isinstance(background, Background):
raise TypeError('Invalid type for a background object.')
else:
self._background = background
else:
self._background = None
if interstellar is not None:
if not isinstance(interstellar, Interstellar):
raise TypeError('Invalid type for an interstellar object.')
else:
self._interstellar = interstellar
else:
self._interstellar = None
if photosphere_prefix is not None:
self._photosphere = photosphere_prefix
cache = kwargs.get('store', cache)
if not isinstance(cache, bool):
raise TypeError('Activate or deactivate caching with a boolean.')
self._cache = cache
if bounds is None: bounds = {}
if values is None: values = {}
doc = """
The phase shift for the signal, a periodic parameter [cycles].
"""
phase_bounds = bounds.get('phase_shift', None)
phase_value = values.get('phase_shift', 0.0 if phase_bounds is None else None)
if phase_value is None:
if not phase_bounds or None in phase_bounds:
raise ValueError('Phase-shift bounds must be specified.')
elif _np.array([not _np.isfinite(b) for b in phase_bounds]).any():
raise ValueError('Phase-shift bounds must be finite.')
elif not (0.0 <= (phase_bounds[1] - phase_bounds[0]) <= 1.0):
raise ValueError('Phase bounds must be separated by '
'a maximum of one cycle.')
phase_shift = Parameter('phase_shift',
strict_bounds = (-_np.infty, _np.infty),
bounds = phase_bounds,
doc = doc,
symbol = r'$\phi$',
value = phase_value)
# merge the subspaces; order unimportant
super(Signal, self).__init__(self._instrument,
self._background,
self._interstellar,
*args, **kwargs)
@property
def background(self):
""" Get the instance of :class:`~.Background.Background`."""
return self._background
@property
def interstellar(self):
""" Get the instance of :class:`~.Interstellar.Interstellar`."""
return self._interstellar
@property
def instrument(self):
""" Get the instance of :class:`~.Instrument.Instrument`."""
return self._instrument
@property
def photosphere(self):
return self._photosphere
def _identify_waveband(self):
""" Bound the waveband for signal integration.
Constructs an array of energy edges for instrument operation.
This method thus automatically constructs energy bounds for this
a particular instrument. At energies between these bounds signals
are calculated. This requires details about the contiguous
subset of output channels the photon data spans (in an instance of
the :class:`~.Data.Data` class) and the redistribution matrix of the
model instrument (in an instance of the
:class:`~.Instrument.Instrument` class).
:raises IndexError:
If the channel range of the data object is not consistent with
the instrument object.
"""
a, b = self._data.index_range
def search(i, j, k):
while self._instrument.matrix[i,j] == 0.0:
j += k
return j
a = search(a, 0, 1)
b = self._instrument.matrix.shape[1] + search(b-1, -1, -1) + 1
self._input_interval_range = (a, b)
self._energy_edges = self._instrument.energy_edges[a:b + 1]
self._energy_mids = (self._energy_edges[:-1] + self._energy_edges[1:])/2.0
@property
def fast_energies(self):
""" Get coarse array of energies for fast-mode likelihood evals. """
return self._fast_energies
@fast_energies.setter
def fast_energies(self, energies):
""" Set energies for fast mode."""
self._fast_energies = energies
def create_energy_array(self, rel_num_energies=10.0):
""" Get a (finer) array of energies spanning instrument waveband.
Useful for getting an appropriately bounded and spaced set of energies
for signal interpolation.
:param float rel_num_energies:
The number of energies desired as a fraction of the number of
energies implemented for incident signal integration.
"""
L = self.energy_edges[0]
R = self.energy_edges[-1]
energies = _np.logspace(_np.log10(L), _np.log10(R),
int(rel_num_energies * len(self.energies)),
base=10.0)
return energies
@property
def energy_edges(self):
""" Get a :class:`numpy.ndarray` of energy edges. """
return self._energy_edges
def register(self, signals, fast_mode=False, threads=1):
""" Register an incident signal by operating with the response matrix.
A :class:`numpy.ndarray` is stored as an instance attribute containing
source signal for each *output* channel in units of counts cm^2/s
(assuming instrument effective area units are cm^2).
"""
if fast_mode:
try:
del self.fast_total_counts
except AttributeError:
pass
for hotRegion in signals:
fast_total_counts = []
for component, phases in zip(hotRegion, self.fast_phases):
if component is None:
fast_total_counts.append(None)
else:
integrated = energy_integrator(threads,
component,
_np.log10(self.fast_energies),
_np.log10(self._energy_edges))
# move interstellar to star?
if self._interstellar is not None:
self._interstellar(self._energy_mids, integrated)
temp = self._instrument(integrated,
self._input_interval_range,
self._data.index_range)
fast_total_counts.append(_np.sum(temp))
self.fast_total_counts = tuple(fast_total_counts)
else:
try:
del self.signals
except AttributeError:
pass
if self.cache:
try:
del self.incident_specific_flux_signals
except AttributeError:
pass
for hotRegion in signals: # iterate over hot regions
signal = None
for component in hotRegion: # add other components
try:
signal += component
except TypeError:
signal = component
# cache total hot region signal
self.incident_specific_flux_signals = signal
try:
del self.incident_flux_signals
except AttributeError:
pass
try:
self.execute_custom_cache_instructions()
except NotImplementedError:
pass # no custom caching targets
for hotRegion in signals:
integrated = None
for component in hotRegion:
temp = energy_integrator(threads,
component,
_np.log10(self._energies),
_np.log10(self._energy_edges))
try:
integrated += temp
except TypeError:
integrated = temp
if self.cache:
self.incident_flux_signals = integrated.copy()
if self._interstellar is not None:
self._interstellar(self._energy_mids, integrated)
self.signals = self._instrument(integrated,
self._input_interval_range,
self._data.index_range)
if self._background is not None:
try:
self._background(self._energy_edges,
self._data.phases)
except TypeError:
print('Error when evaluating the incident background.')
raise
self._background.registered_background = \
self._instrument(self._background.incident_background,
self._input_interval_range,
self._data.index_range)
@property
def num_components(self):
return len(self._signals)
@property
def phases(self):
return [phases.copy() for phases in self._phases]
@phases.setter
def phases(self, obj):
if not isinstance(obj, list):
obj = [obj]
self._phases = obj
@property
def fast_phases(self):
return [phases.copy() for phases in self._fast_phases]
@fast_phases.setter
def fast_phases(self, obj):
if not isinstance(obj, list):
obj = [obj]
self._fast_phases = obj
@property
def energies(self):
return self._energies
@energies.setter
def energies(self, obj):
self._energies = obj
@energies.deleter
def energies(self):
del self._energies
@property
def fast_total_counts(self):
return tuple(self._fast_total_counts)
@fast_total_counts.setter
def fast_total_counts(self, obj):
try:
self._fast_total_counts.append(obj)
except AttributeError:
self._fast_total_counts = [obj]
@fast_total_counts.deleter
def fast_total_counts(self):
del self._fast_total_counts
@property
def store(self):
return self._cache
@store.setter
def store(self, value):
if isinstance(value, bool):
self._cache = value
else:
raise ValueError('Signal storage requires boolean activation.')
@property
def cache(self):
return self._cache
@cache.setter
def cache(self, value):
if isinstance(value, bool):
self._cache = value
else:
raise ValueError('Signal storage requires boolean activation.')
@property
def data(self):
""" Get the stored data object. """
return self._data
@data.setter
def data(self, data):
""" Set the data object. """
if isinstance(data, Data):
self._data = data
else:
raise TypeError('The data object is of an invalid type.')
@property
def signals(self):
""" Get the stored channel-by-channel signal components. """
return tuple(signal.copy() for signal in self._signals)
@signals.setter
def signals(self, obj):
try:
self._signals.append(obj)
except AttributeError:
self._signals = [obj]
@signals.deleter
def signals(self):
del self._signals
@property
def incident_specific_flux_signals(self):
""" Get the incident signal components. """
return tuple(s.copy() for s in self._incident_specific_flux_signals)
@incident_specific_flux_signals.setter
def incident_specific_flux_signals(self, obj):
try:
self._incident_specific_flux_signals.append(obj)
except AttributeError:
self._incident_specific_flux_signals = [obj]
@incident_specific_flux_signals.deleter
def incident_specific_flux_signals(self):
del self._incident_specific_flux_signals
@property
def incident_flux_signals(self):
""" Get the incident flux signal components.
These signals are integrated over a set of energy intervals spanning
the instrument waveband.
"""
return tuple(s.copy() for s in self._incident_flux_signals)
@incident_flux_signals.setter
def incident_flux_signals(self, obj):
try:
self._incident_flux_signals.append(obj)
except AttributeError:
self._incident_flux_signals = [obj]
@incident_flux_signals.deleter
def incident_flux_signals(self):
del self._incident_flux_signals
@property
def expected_counts(self):
return self._expected_counts
@expected_counts.setter
def expected_counts(self, obj):
self._expected_counts = obj
@expected_counts.deleter
def expected_counts(self):
del self._expected_counts
@property
def shifts(self):
""" Returns the hot region phase plus the instrument phase-shift."""
return self._shifts + self['phase_shift']
@shifts.setter
def shifts(self, obj):
if isinstance(obj, _np.ndarray) and len(obj) == len(self._phases):
self._shifts = obj
else:
raise TypeError('Store phase shift parameters as a 1D ndarray.')
@shifts.deleter
def shifts(self):
del self._shifts
@property
def background_signal(self):
""" Get stored background. """
return self._background_signal
@background_signal.setter
def background_signal(self, obj):
if isinstance(obj, _np.ndarray):
self._background_signal = obj
@background_signal.deleter
def background_signal(self):
del self._background_signal
@property
def caching_target_names(self):
""" Just return the names of the caching targets. """
return self._caching_targets
@property
def caching_targets(self):
""" Get a dictionary of model objects for caching.
Called by the post-processing module.
:raises AttributeError:
If a property is not set in methods of a subclass, or if the
``self.store`` property is not ``True``.
"""
try:
self._caching_targets
except AttributeError:
print('Caching targets not declared.')
raise
return {target: getattr(self, target) for target in self._caching_targets}
@caching_targets.setter
def caching_targets(self, obj):
if isinstance(obj, list):
if all(isinstance(o, _six.string_types) for o in obj):
if all(hasattr(self, o) for o in obj):
self._caching_targets = obj
return None
raise ValueError('Invalid caching targets.')
def execute_custom_cache_instructions(self):
""" Subclass and overwrite to specify custom cache objects.
The default cached objects, when ``cache`` mode is activated, are
handled in the :meth:`~.Signal.register` method.
"""
raise NotImplementedError('Cache method not implemented.')
@property
def loglikelihood(self):
""" Return the logarithm of the likelihood.
:raises AttributeError: If property not set in methods of a subclass.
"""
return self._loglikelihood
@loglikelihood.setter
def loglikelihood(self, ll):
""" Check and store the logarithm of the likelihood. """
if _np.isnan(ll):
raise LikelihoodError('Log-likelihood is ``NaN``.')
if not _np.isfinite(ll):
self._loglikelihood = -_np.inf
else:
self._loglikelihood = ll
@abstractmethod
def __call__(self, **kwargs):
""" Compute the logarithm of the likelihood and store it as a property.
The keyword arguments currently communicated by an
:class:`~.Likelihood.Likelihood` instance are as follows.
:param int threads:
Number of ``OpenMP`` threads to use for likelihood evaluation.
This argument can be ignored if not required.
:param float llzero:
The minimum log-likelihood setting for MultiNest. Points whose
log-likelihood is lower than this value are ignored.
"""
def synthesise(self, phase_shifts, directory, **kwargs):
""" Synthesise signal data according to the generative model.
:param iterable phase_shifts:
Container of phase shift :class:`~.Parameter.Parameter` instances,
one per hot region, communicated by the likelihood object from
the star object. The order is equal to the order of the hot
region objects stored in ``photosphere.hot.objects``.
:param str directory: Path to directory in which to write synthetic
data. It is recommended that the ``prefix`` of
the signal appears in the filename.
:param int threads:
Number of ``OpenMP`` threads to use for likelihood evaluation.
This argument can be ignored if not required.
"""
raise NotImplementedError('Cannot synthesise data.')
def construct_energy_array(num_energies, signals, max_energy=None):
""" Construct an array of photon energies for integration.
:param int num_energies:
Number of energies, distributed over union of wavebands covered
by instruments that registered the data signals.
:param list signals:
An unordered list of :class:`~.Signal` instances.
"""
ordered = [] # in waveband upper-limit, highest to lowest
coverage_gaps = [] # highest to lowest
# locate coverage gaps if any
for _ in range(len(signals)):
# find upper limit in energy from those remaining
for signal in signals:
try:
MAX
except NameError:
MAX = signal.energy_edges[-1]
s = signal
E = signal.energy_edges[-1]
if E > MAX:
MAX = E
s = signal
ordered.append(s)
signals.remove(s)
if len(ordered) > 1:
for signal in ordered[:-1]:
try:
MIN
except NameError:
MIN = signal.energy_edges[0]
E = signal.energy_edges[0]
if E < MIN:
MIN = E
if MAX < MIN: # MAX from above
coverage_gaps.append((MAX, MIN))
del MAX
# find global limits
_signal_max = ordered[0].energy_edges[-1]
if max_energy is not None and max_energy < _signal_max:
MAX = max_energy
# respect maximum energy setting
_coverage_gaps = []
for _coverage_gap in coverage_gaps:
if _coverage_gap[0] < MAX <= _coverage_gap[1]:
MAX = _coverage_gap[0]
if MAX > _coverage_gap[1]:
_coverage_gaps.append(_coverage_gap)
coverage_gaps = _coverage_gaps
else:
MAX = _signal_max
for signal in ordered:
try:
MIN
except NameError:
MIN = signal.energy_edges[0]
E = signal.energy_edges[0]
if E < MIN:
MIN = E
interval = _np.log10(MAX) - _np.log10(MIN)
# account for gaps to conserve total number of energies requested
for _coverage_gap in coverage_gaps:
interval -= ( _coverage_gap[1] - _coverage_gap[0] )
energies = _np.array([])
# distribute energies over waveband intervals
for i in range(len(coverage_gaps) + 1):
if i == 0:
U = MAX
else:
U = coverage_gaps[i-1][0]
if i == len(coverage_gaps):
L = MIN
else:
L = coverage_gaps[i][1]
frac = ( _np.log10(U) - _np.log10(L) ) / interval
num = int( _m.ceil(frac * num_energies) )
energies = _np.append(energies,
_np.logspace(_np.log10(L), _np.log10(U),
int(num),
base=10.0)[::-1])
return _np.ascontiguousarray(energies[::-1])
|
the-stack_106_16199
|
import glfw
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
import pyrr
from pyrr import Vector3, vector, vector3, matrix44
##Self-defined modules
from TextureLoader import load_texture
from ObjLoader import ObjLoader
from camera import Camera
import ShaderLoader
cam=Camera()
WIDTH,HEIGHT=1280,720
lastX,lastY=WIDTH/2, HEIGHT/2
first_mouse=True
left, right, forward, backward, Rotate, quake=False,False,False,False, False, False
##the keyboard input callback
def key_input_clb(window,key,scancode,action,mode):
global left, right, forward, backward, Rotate, quake
if key==glfw.KEY_ESCAPE and action==glfw.PRESS:
glfw.set_window_should_close(window,True)
if key==glfw.KEY_W and action==glfw.PRESS:
forward=True
elif key==glfw.KEY_W and action==glfw.RELEASE:
forward=False
if key==glfw.KEY_S and action==glfw.PRESS:
backward=True
elif key==glfw.KEY_S and action==glfw.RELEASE:
backward=False
if key==glfw.KEY_A and action==glfw.PRESS:
left=True
elif key==glfw.KEY_A and action==glfw.RELEASE:
left=False
if key==glfw.KEY_D and action==glfw.PRESS:
right=True
elif key==glfw.KEY_D and action==glfw.RELEASE:
right=False
if key==glfw.KEY_R and action==glfw.PRESS:
Rotate= not Rotate
if key==glfw.KEY_E and action==glfw.PRESS:
quake= not quake
##do the movement, call this funcn in main loop
def do_movement():
##called in every loop
steps=0.05
if left:
cam.process_keyboard("LEFT",steps)
if right:
cam.process_keyboard("RIGHT",steps)
if forward:
cam.process_keyboard("FORWARD",steps)
if backward:
cam.process_keyboard("BACKWARD",steps)
if Rotate:
##print("Rotation called")
pass
if quake:
##cam.process_keyboard("QUAKE",steps)
pass
##the mouse position callback function
def mouse_look_clb(window, xpos, ypos):
global first_mouse, lastX, lastY
if first_mouse:
lastX=xpos
lastY=ypos
first_mouse=False
xoffset=xpos-lastX
yoffset=lastY-ypos ##because Y counting starts from bottom
lastX=xpos
lastY=ypos
cam.process_mouse_movement(xoffset, yoffset)
##the scroll callback function
def scroll_clb(windows, xoff, yoff):
cam.process_scroll(yoff)
##glfw callback functions
def window_resize(window, width, height):
glViewport(0,0,width, height)
##*********change*******##
projection=pyrr.matrix44.create_perspective_projection_matrix(45, width/height,
0.1,100)
glUniformMatrix4fv(proj_loc,1,GL_FALSE, projection)
##initialising glfw library
if not glfw.init():
raise Exception("GLFW CANNOT BE INITIALISED")
##Creating the window
w_height=800
w_width=600
window=glfw.create_window(w_height,w_width,"Dharara",None,None)
# check if window was created
if not window:
glfw.terminate()
raise Exception("glfw window can not be created!")
# set window's position
glfw.set_window_pos(window, 10, 20)
##CALLBACK FUNCTIONS
##set the callback func for window resize
glfw.set_window_size_callback(window, window_resize)
##set the mouse position callback
glfw.set_cursor_pos_callback(window,mouse_look_clb)
##set the keyboard input callback
glfw.set_key_callback(window,key_input_clb)
##capture the mouse cursor
glfw.set_input_mode(window,glfw.CURSOR, glfw.CURSOR_DISABLED)
##call back for mouse scroll
glfw.set_scroll_callback(window, scroll_clb)
##make the context current
glfw.make_context_current(window)
##load here the 3d meshes
dh_indices, dh_buffer= ObjLoader.load_model("meshes/final.obj")
##dh_indices, dh_buffer=ObjLoader.load_model("meshes/cube.obj")
##shader = ShaderLoader.compile_shader("shaders/shader_vert.vs", "shaders/shader_frag.fs")
shader = ShaderLoader.compile_shader("shaders/shadow_mapping_depth.vs", "shaders/shadow_mapping_depth.fs")
##VAO AND VBO
VAO=glGenVertexArrays(1)
VBO=glGenBuffers(1)
##VAO
glBindVertexArray(VAO)
##Vertex Buffer Object
glBindBuffer(GL_ARRAY_BUFFER,VBO)
glBufferData(GL_ARRAY_BUFFER, dh_buffer.nbytes, dh_buffer, GL_STATIC_DRAW)
##Vertices
glEnableVertexAttribArray(0)
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,dh_buffer.itemsize*8,ctypes.c_void_p(0))
##textures
glEnableVertexAttribArray(1)
glVertexAttribPointer(1,2,GL_FLOAT,GL_FALSE,dh_buffer.itemsize*8,ctypes.c_void_p(12))
##normals
glVertexAttribPointer(2,3,GL_FLOAT,GL_FALSE,dh_buffer.itemsize*8,ctypes.c_void_p(20))
glEnableVertexAttribArray(2)
textures=glGenTextures(1)
load_texture("meshes/final.png",textures)
##load_texture("meshes/metal.jpg",textures)
glEnable(GL_TEXTURE_2D)
glUseProgram(shader)
glClearColor(0,0.1,0.1,1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
view = pyrr.matrix44.create_from_translation(pyrr.Vector3([0.0, -7.0, -6.0]))
projection = pyrr.matrix44.create_perspective_projection_matrix(45.0, w_height / w_width, 0.1, 100.0)
model = pyrr.matrix44.create_from_translation(pyrr.Vector3([0.0, 0.0, 0.0]))
##(x,y,z) coordinates
##dh_pos=pyrr.matrix44.create_from_translation(pyrr.Vector3([0,-26,-12]))
##dh_pos=pyrr.matrix44.create_from_translation(pyrr.Vector3([0,0,0]))
view_loc = glGetUniformLocation(shader, "view")
proj_loc = glGetUniformLocation(shader, "projection")
model_loc = glGetUniformLocation(shader, "model")
transform_loc = glGetUniformLocation(shader, "transform")
light_loc = glGetUniformLocation(shader, "light")
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
##glUniformMatrix4fv(light_loc, 1, GL_FALSE, light)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
glUniformMatrix4fv(model_loc, 1, GL_FALSE, model)
DEG=0.01 ##FOR TOGGLING OF ROTATION
##the main applcn loop
while not glfw.window_should_close(window):
glfw.poll_events()
do_movement()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
##changing the view point
view=cam.get_view_matrix()
glUniformMatrix4fv(view_loc,1 ,GL_FALSE,view)
if Rotate:
DEG+=.005
rot_y=pyrr.Matrix44.from_y_rotation(0.8*DEG)
##rot_y=pyrr.Matrix44.from_y_rotation(0.8*glfw.get_time())
##rot_y=pyrr.Matrix44.from_y_rotation(0.8)
## if quake:
## PTS=Vector3([15.0,15.0,15.0])
## rot_y=pyrr.matrix44.create_from_translation(PTS)
##
##model = pyrr.matrix44.multiply(rot_y, dh_pos)
##draw the Dharara
##glBindVertexArray(VAO)
##glBindTexture(GL_TEXTURE_2D,textures)
glUniformMatrix4fv(transform_loc, 1, GL_FALSE, rot_y)
glUniformMatrix4fv(light_loc,1,GL_FALSE,rot_y)
glDrawArrays(GL_TRIANGLES,0, len(dh_indices))
glfw.swap_buffers(window)
##terminate glfw,free up allocated resources
glfw.terminate()
|
the-stack_106_16200
|
import copy
import json
import math
import numbers
import os
import random
import time
from enum import Enum
from queue import Full
from os.path import join
import numpy as np
from algorithms.appo.appo_utils import TaskType, iterate_recursively
from algorithms.utils.algo_utils import EPS
from utils.utils import log, experiment_dir
def perturb_float(x, perturb_amount=1.2):
# mutation direction
new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount
return new_value
def perturb_vtrace(x, cfg):
return perturb_float(x, perturb_amount=1.005)
def perturb_exponential_decay(x, cfg):
perturbed = perturb_float(1.0 - x)
new_value = 1.0 - perturbed
new_value = max(EPS, new_value)
return new_value
def perturb_batch_size(x, cfg):
new_value = perturb_float(x, perturb_amount=1.2)
initial_batch_size = cfg.batch_size
max_batch_size = initial_batch_size * 1.5
min_batch_size = cfg.rollout
new_value = min(new_value, max_batch_size)
# round down to whole number of rollouts
new_value = (int(new_value) // cfg.rollout) * cfg.rollout
new_value = max(new_value, min_batch_size)
return new_value
class PbtTask(Enum):
SAVE_MODEL, LOAD_MODEL, UPDATE_CFG, UPDATE_REWARD_SCHEME = range(4)
HYPERPARAMS_TO_TUNE = {
'learning_rate', 'exploration_loss_coeff', 'value_loss_coeff', 'max_grad_norm', 'ppo_clip_ratio', 'ppo_clip_value',
}
# if not specified then tune all rewards
REWARD_CATEGORIES_TO_TUNE = {
'doom_': ['delta', 'selected_weapon'],
'quadrotor_': ['quad_rewards'],
}
# HYPERPARAMS_TO_TUNE_EXTENDED = {
# 'learning_rate', 'exploration_loss_coeff', 'value_loss_coeff', 'adam_beta1', 'max_grad_norm',
# 'ppo_clip_ratio', 'ppo_clip_value', 'vtrace_rho', 'vtrace_c',
# }
SPECIAL_PERTURBATION = dict(
gamma=perturb_exponential_decay,
adam_beta1=perturb_exponential_decay,
vtrace_rho=perturb_vtrace,
vtrace_c=perturb_vtrace,
batch_size=perturb_batch_size,
)
def policy_cfg_file(cfg, policy_id):
return join(experiment_dir(cfg=cfg), f'policy_{policy_id:02d}_cfg.json')
def policy_reward_shaping_file(cfg, policy_id):
return join(experiment_dir(cfg=cfg), f'policy_{policy_id:02d}_reward_shaping.json')
class PopulationBasedTraining:
def __init__(self, cfg, default_reward_shaping, summary_writers):
self.cfg = cfg
if cfg.pbt_optimize_batch_size and 'batch_size' not in HYPERPARAMS_TO_TUNE:
HYPERPARAMS_TO_TUNE.add('batch_size')
self.last_update = [0] * self.cfg.num_policies
self.policy_cfg = [dict() for _ in range(self.cfg.num_policies)]
self.policy_reward_shaping = [dict() for _ in range(self.cfg.num_policies)]
self.default_reward_shaping = default_reward_shaping
self.summary_writers = summary_writers
self.last_pbt_summaries = 0
self.learner_workers = self.actor_workers = None
self.reward_categories_to_tune = []
for env_prefix, categories in REWARD_CATEGORIES_TO_TUNE.items():
if cfg.env.startswith(env_prefix):
self.reward_categories_to_tune = categories
def init(self, learner_workers, actor_workers):
self.learner_workers = learner_workers
self.actor_workers = actor_workers
for policy_id in range(self.cfg.num_policies):
# save the policy-specific configs if they don't exist, or else load them from files
policy_cfg_filename = policy_cfg_file(self.cfg, policy_id)
if os.path.exists(policy_cfg_filename):
with open(policy_cfg_filename, 'r') as json_file:
log.debug('Loading initial policy %d configuration from file %s', policy_id, policy_cfg_filename)
json_params = json.load(json_file)
self.policy_cfg[policy_id] = json_params
else:
self.policy_cfg[policy_id] = dict()
for param_name in HYPERPARAMS_TO_TUNE:
self.policy_cfg[policy_id][param_name] = self.cfg[param_name]
if policy_id > 0: # keep one policy with default settings in the beginning
log.debug('Initial cfg mutation for policy %d', policy_id)
self.policy_cfg[policy_id] = self._perturb_cfg(self.policy_cfg[policy_id])
for policy_id in range(self.cfg.num_policies):
# save the policy-specific reward shaping if it doesn't exist, or else load from file
policy_reward_shaping_filename = policy_reward_shaping_file(self.cfg, policy_id)
if os.path.exists(policy_reward_shaping_filename):
with open(policy_reward_shaping_filename, 'r') as json_file:
log.debug(
'Loading policy %d reward shaping from file %s', policy_id, policy_reward_shaping_filename,
)
json_params = json.load(json_file)
self.policy_reward_shaping[policy_id] = json_params
else:
self.policy_reward_shaping[policy_id] = copy.deepcopy(self.default_reward_shaping)
if policy_id > 0: # keep one policy with default settings in the beginning
log.debug('Initial rewards mutation for policy %d', policy_id)
self.policy_reward_shaping[policy_id] = self._perturb_reward(self.policy_reward_shaping[policy_id])
# send initial configuration to the system components
for policy_id in range(self.cfg.num_policies):
self._save_cfg(policy_id)
self._save_reward_shaping(policy_id)
self._learner_update_cfg(policy_id)
self._actors_update_shaping_scheme(policy_id)
def _save_cfg(self, policy_id):
policy_cfg_filename = policy_cfg_file(self.cfg, policy_id)
with open(policy_cfg_filename, 'w') as json_file:
log.debug('Saving policy-specific configuration %d to file %s', policy_id, policy_cfg_filename)
json.dump(self.policy_cfg[policy_id], json_file)
def _save_reward_shaping(self, policy_id):
policy_reward_shaping_filename = policy_reward_shaping_file(self.cfg, policy_id)
with open(policy_reward_shaping_filename, 'w') as json_file:
log.debug('Saving policy-specific reward shaping %d to file %s', policy_id, policy_reward_shaping_filename)
json.dump(self.policy_reward_shaping[policy_id], json_file)
def _perturb_param(self, param, param_name, default_param):
# toss a coin whether we perturb the parameter at all
if random.random() > self.cfg.pbt_mutation_rate:
return param
if param != default_param and random.random() < 0.05:
# small chance to replace parameter with a default value
log.debug('%s changed to default value %r', param_name, default_param)
return default_param
if param_name in SPECIAL_PERTURBATION:
new_value = SPECIAL_PERTURBATION[param_name](param, self.cfg)
elif type(param) is bool:
new_value = not param
elif isinstance(param, numbers.Number):
perturb_amount = random.uniform(1.01, 1.5)
new_value = perturb_float(float(param), perturb_amount=perturb_amount)
else:
raise RuntimeError('Unsupported parameter type')
log.debug('Param %s changed from %.6f to %.6f', param_name, param, new_value)
return new_value
def _perturb(self, old_params, default_params):
"""Params assumed to be a flat dict."""
params = copy.deepcopy(old_params)
for key, value in params.items():
if isinstance(value, (tuple, list)):
# this is the case for reward shaping delta params
params[key] = tuple(
self._perturb_param(p, f'{key}_{i}', default_params[key][i])
for i, p in enumerate(value)
)
else:
params[key] = self._perturb_param(value, key, default_params[key])
return params
def _perturb_cfg(self, original_cfg):
replacement_cfg = copy.deepcopy(original_cfg)
return self._perturb(replacement_cfg, default_params=self.cfg)
def _perturb_reward(self, original_reward_shaping):
if original_reward_shaping is None:
return None
replacement_shaping = copy.deepcopy(original_reward_shaping)
if len(self.reward_categories_to_tune) > 0:
for category in self.reward_categories_to_tune:
if category in replacement_shaping:
replacement_shaping[category] = self._perturb(
replacement_shaping[category], default_params=self.default_reward_shaping[category],
)
else:
replacement_shaping = self._perturb(replacement_shaping, default_params=self.default_reward_shaping)
return replacement_shaping
def _force_learner_to_save_model(self, policy_id):
learner_worker = self.learner_workers[policy_id]
learner_worker.save_model()
def _learner_load_model(self, policy_id, replacement_policy):
log.debug('Asking learner %d to load model from %d', policy_id, replacement_policy)
load_task = (PbtTask.LOAD_MODEL, (policy_id, replacement_policy))
learner_worker = self.learner_workers[policy_id]
learner_worker.task_queue.put((TaskType.PBT, load_task))
def _learner_update_cfg(self, policy_id):
learner_worker = self.learner_workers[policy_id]
log.debug('Sending learning configuration to learner %d...', policy_id)
cfg_task = (PbtTask.UPDATE_CFG, (policy_id, self.policy_cfg[policy_id]))
learner_worker.task_queue.put((TaskType.PBT, cfg_task))
def _actors_update_shaping_scheme(self, policy_id):
log.debug('Sending latest reward scheme to actors for policy %d...', policy_id)
for actor_worker in self.actor_workers:
reward_scheme_task = (PbtTask.UPDATE_REWARD_SCHEME, (policy_id, self.policy_reward_shaping[policy_id]))
task = (TaskType.PBT, reward_scheme_task)
try:
actor_worker.task_queue.put(task, timeout=0.1)
except Full:
log.warning('Could not add task %r to queue, it is likely that worker died', task)
@staticmethod
def _write_dict_summaries(dictionary, writer, name, env_steps):
for d, key, value in iterate_recursively(dictionary):
if isinstance(value, bool):
value = int(value)
if isinstance(value, (int, float)):
writer.add_scalar(f'zz_pbt/{name}_{key}', value, env_steps)
elif isinstance(value, (tuple, list)):
for i, tuple_value in enumerate(value):
writer.add_scalar(f'zz_pbt/{name}_{key}_{i}', tuple_value, env_steps)
else:
log.error('Unsupported type in pbt summaries %r', type(value))
def _write_pbt_summaries(self, policy_id, env_steps):
writer = self.summary_writers[policy_id]
self._write_dict_summaries(self.policy_cfg[policy_id], writer, 'cfg', env_steps)
if self.policy_reward_shaping[policy_id] is not None:
self._write_dict_summaries(self.policy_reward_shaping[policy_id], writer, 'rew', env_steps)
def _update_policy(self, policy_id, policy_stats):
if self.cfg.pbt_target_objective not in policy_stats:
return
target_objectives = policy_stats[self.cfg.pbt_target_objective]
# not enough data to perform PBT yet
for objectives in target_objectives:
if len(objectives) <= 0:
return
target_objectives = [np.mean(o) for o in target_objectives]
policies = list(range(self.cfg.num_policies))
policies_sorted = sorted(zip(target_objectives, policies), reverse=True)
policies_sorted = [p for objective, p in policies_sorted]
replace_fraction = self.cfg.pbt_replace_fraction
replace_number = math.ceil(replace_fraction * self.cfg.num_policies)
best_policies = policies_sorted[:replace_number]
worst_policies = policies_sorted[-replace_number:]
if policy_id in best_policies:
# don't touch the policies that are doing well
return
log.debug('PBT best policies: %r, worst policies %r', best_policies, worst_policies)
# to make the code below uniform, this means keep our own parameters and cfg
# we only take parameters and cfg from another policy if certain conditions are met (see below)
replacement_policy = policy_id
if policy_id in worst_policies:
log.debug('Current policy %d is among the worst policies %r', policy_id, worst_policies)
replacement_policy_candidate = random.choice(best_policies)
reward_delta = target_objectives[replacement_policy_candidate] - target_objectives[policy_id]
reward_delta_relative = abs(reward_delta / (target_objectives[replacement_policy_candidate] + EPS)) # TODO: this might not work correctly with negative rewards
if abs(reward_delta) > self.cfg.pbt_replace_reward_gap_absolute and reward_delta_relative > self.cfg.pbt_replace_reward_gap:
replacement_policy = replacement_policy_candidate
log.debug(
'Difference in reward is %.4f (%.4f), policy %d weights to be replaced by %d',
reward_delta, reward_delta_relative, policy_id, replacement_policy,
)
else:
log.debug('Difference in reward is not enough %.3f %.3f', abs(reward_delta), reward_delta_relative)
if policy_id == 0:
# Do not ever mutate the 1st policy, leave it for the reference
# Still we allow replacements in case it's really bad
self.policy_cfg[policy_id] = self.policy_cfg[replacement_policy]
self.policy_reward_shaping[policy_id] = self.policy_reward_shaping[replacement_policy]
else:
self.policy_cfg[policy_id] = self._perturb_cfg(self.policy_cfg[replacement_policy])
self.policy_reward_shaping[policy_id] = self._perturb_reward(self.policy_reward_shaping[replacement_policy])
if replacement_policy != policy_id:
# force replacement policy learner to save the model and wait until it's done
self._force_learner_to_save_model(replacement_policy)
# now that the latest "replacement" model is saved to disk, we ask the learner to load the replacement policy
self._learner_load_model(policy_id, replacement_policy)
self._save_cfg(policy_id)
self._save_reward_shaping(policy_id)
self._learner_update_cfg(policy_id)
self._actors_update_shaping_scheme(policy_id)
def update(self, env_steps, policy_stats):
if not self.cfg.with_pbt or self.cfg.num_policies <= 1:
return
for policy_id in range(self.cfg.num_policies):
if policy_id not in env_steps:
continue
if env_steps[policy_id] < self.cfg.pbt_start_mutation:
continue
steps_since_last_update = env_steps[policy_id] - self.last_update[policy_id]
if steps_since_last_update > self.cfg.pbt_period_env_steps:
self._update_policy(policy_id, policy_stats)
self._write_pbt_summaries(policy_id, env_steps[policy_id])
self.last_update[policy_id] = env_steps[policy_id]
# also periodically dump a pbt summary even if we didn't change anything
now = time.time()
if now - self.last_pbt_summaries > 5 * 60:
for policy_id in range(self.cfg.num_policies):
if policy_id in env_steps:
self._write_pbt_summaries(policy_id, env_steps[policy_id])
self.last_pbt_summaries = now
|
the-stack_106_16202
|
from tftk.image.dataset import Mnist
from tftk.image.dataset import Food101
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model.classification import SimpleClassificationModel
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context
from tftk.image.model.representation import SimpleRepresentationModel, add_projection_layers
from tftk.train.image import ImageTrain
from tftk import ENABLE_SUSPEND_RESUME_TRAINING, ResumeExecutor
import tensorflow as tf
class MovingAverageCallback(tf.keras.callbacks.Callback):
def __init__(self, model):
self.model = model
def on_train_begin(self, logs=None):
print("Starting training")
def on_train_end(self, logs=None):
print("Stop training")
def on_epoch_begin(self, epoch, logs=None):
print("\nStart epoch")
def on_epoch_end(self, epoch, logs=None):
print("\nOn epoch end, updating moving average")
w1 = self.model.get_weights()
w2 = []
for a in w1:
print(type(a))
w2.append( a*0.8 )
self.model.set_weights(w2)
def get_moving_average_callback(model):
m = model
def moving_average(loss, acc):
print("on epoch end")
w1 = m.get_weights()
w2 = []
for a in w1:
print(type(a))
w2.append( a*0.8 )
m.set_weights(w2)
return moving_average
def custom_loss(y_pred, y_true):
y_1, y_2 = y_pred
diff = y_1 - y_2
loss = tf.keras.backend.abs(diff)
return loss
def reinforcement(data):
img = data["image"]
label = data["label"]
return ([img,img],[img,img])
# supervised
def supervised_dataset(dataset:tf.data.Dataset, max_label:int)->tf.data.Dataset:
filtered = dataset.filter(lambda data:data['label'] < max_label)
def supervised_transform(data):
image = data['image']
image = tf.cast(image, tf.float32)
image = image / 255.0
label = data['label']
label = tf.one_hot(label, max_label)
return image, label
return filtered.map(supervised_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def pretext_dataset(dataset:tf.data.Dataset, start_label:int)->tf.data.Dataset:
filtered = dataset.filter(lambda data:data['label'] >= start_label)
def supervised_transform(data):
image = data['image']
image = tf.cast(image, tf.float32)
image = image / 255.0
def random_transform(image):
pass
if __name__ == '__main__':
context = Context.init_context(TRAINING_NAME='')
# ENABLE_SUSPEND_RESUME_TRAINING()
BATCH_SIZE = 500
CLASS_NUM = 10
IMAGE_SIZE = 28
EPOCHS = 2
SHUFFLE_SIZE = 1000
# if IS_SUSPEND_RESUME_TRAIN() == True and IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE()== True:
# train, train_len = Mnist.get_train_dataset()
# validation, validation_len = Mnist.get_test_dataset()
# train = train.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))
# validation = validation.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))
# train = train.map(reinforcement)
# online_model = SimpleRepresentationModel.get_representation_model(input_shape=(28,28,1))
# target_model = SimpleRepresentationModel.get_representation_model(input_shape=(28,28,1))
# print(online_model.layers)
# online_projection_model = add_projection_layers(online_model)
# target_projection_model = add_projection_layers(target_model)
# input_online = online_model.layers[0].input
# input_target = target_model.layers[0].input
# output_online = online_model.layers[-1].output
# output_target = target_model.layers[-1].output
# mearged_model = tf.keras.Model(inputs=[input_online,input_target], outputs=[output_online,output_target])
# mearged_model.summary()
# optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
# callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=16)
# mearged_model.compile(optimizer=optimizer, loss=custom_loss)
# train = train.take(10)
# y = mearged_model.predict(train)
# print(y)
# optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs)
# online_projection = add_projection_layers(online_model)
# target_projection = add_projection_layers(target_model)
# inputs = [online_projection.input, target_projection.input]
# outputs = [online_projection.output, target_projection.output]
# total_model = tf.keras.Model(inputs=inputs, outputs=outputs)
# optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
# model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,1),classes=CLASS_NUM)
# callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=16)
# callbacks.append(MovingAverageCallback(model))
# ImageTrain.train_image_classification(train_data=train,train_size=train_len,batch_size=BATCH_SIZE,validation_data=validation,validation_size=validation_len,shuffle_size=SHUFFLE_SIZE,model=model,callbacks=callbacks,optimizer=optimizer,loss="categorical_crossentropy",max_epoch=EPOCHS)
# w1 = model.get_weights()
# # print(type(w1))
# w2 = []
# for a in w1:
# print(type(a))
# w2.append( a*0.8 )
# model.set_weights(w2)
|
the-stack_106_16203
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -maxmempooltx limit-number-of-transactions-in-mempool
# code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import shutil
import time
class MempoolCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Three nodes
args = ["-maxmempooltx=11", "-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
self.nodes.append(start_node(2, self.options.tmpdir, args))
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount, node=0):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[node].createrawtransaction(inputs, outputs)
signresult = self.nodes[node].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# 20 transactions
b = [ self.nodes[0].getblockhash(n) for n in range(1, 21) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
# -maxmempooltx doesn't evict transactions submitted via sendrawtransaction:
assert_equal(len(self.nodes[0].getrawmempool()), 20)
# ... and doesn't evict sendtoaddress() transactions:
spends1_id.append(self.nodes[0].sendtoaddress(node0_address, 50))
assert_equal(len(self.nodes[0].getrawmempool()), 21)
time.sleep(1) # wait just a bit for node0 to send transactions to node1
# ... node1's mempool should be limited
assert(len(self.nodes[1].getrawmempool()) <= 11)
# have other node create five transactions...
node1_txids = []
for i in range(5):
node1_txids.append(self.nodes[1].sendtoaddress(node1_address, 50))
# it's mempool should be limited, but should contain those txids:
node1_txs = set(self.nodes[1].getrawmempool())
assert(node1_txs.issuperset(node1_txids))
# The first send-to-self is guaranteed to evict another of node0's transaction.
# The second (and subsequent) might try (and fail) to evict the first
# send-to-self, in which case the mempool size can be bigger than -maxmempooltx
assert(len(self.nodes[1].getrawmempool()) <= 11+4)
time.sleep(1)
# node0's mempool should still have all its transactions:
node0_txs = set(self.nodes[0].getrawmempool())
assert(node0_txs.issuperset(spends1_id))
# Have each node mine a block, should empty mempools:
blocks = []
blocks.extend(self.nodes[0].generate(1))
sync_blocks(self.nodes)
blocks.extend(self.nodes[1].generate(1))
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Second test:
# eviction of long chains of dependent transactions
parent_ids = [spends1_id[0], node1_txids[0]]
send_addresses = [node0_address, node1_address]
chained_ids = [[],[]]
send_amount = 50
for i in range(5):
send_amount = send_amount-0.001 # send with sufficient fee
for node in range(2):
raw = self.create_tx(parent_ids[node], send_addresses[node], send_amount, node)
parent_ids[node] = self.nodes[node].sendrawtransaction(raw)
chained_ids[node].append(parent_ids[node])
sync_mempools(self.nodes) # Wait for all ten txns in all mempools
assert_equal(len(self.nodes[2].getrawmempool()), 10)
# Have both nodes generate high-priority transactions to exercise chained-transaction-eviction
# code
tx_ids = [[],[]]
for i in range(3):
for node in range(2):
tx_ids[node].append(self.nodes[node].sendtoaddress(send_addresses[node], 25))
# Give a little time for transactions to make their way to node2, it's mempool should
# remain under limit
time.sleep(1)
assert(len(self.nodes[2].getrawmempool()) <= 11)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
the-stack_106_16205
|
from math import hypot
import numpy as np
import json
import sys
def givens_rotation(A):
"""Perform QR decomposition of matrix A using Givens rotation."""
(num_rows, num_cols) = np.shape(A)
# Initialize orthogonal matrix Q and upper triangular matrix R.
Q = np.identity(num_rows)
R = np.copy(A)
# Iterate over lower triangular matrix.
(rows, cols) = np.tril_indices(num_rows, -1, num_cols)
for (row, col) in zip(rows, cols):
# Compute Givens rotation matrix and
# zero-out lower triangular matrix entries.
if R[row, col] != 0:
(c, s) = _givens_rotation_matrix_entries(R[col, col], R[row, col])
G = np.identity(num_rows)
G[[col, row], [col, row]] = c
G[row, col] = s
G[col, row] = -s
R = np.dot(G, R)
Q = np.dot(Q, G.T)
return (Q, R)
def _givens_rotation_matrix_entries(a, b):
"""Compute matrix entries for Givens rotation."""
r = hypot(a, b)
c = a/r
s = -b/r
return (c, s)
n = int(sys.argv[1])
A = np.random.rand(n, n);
Q,R = givens_rotation(A)
result = {
"A": A.tolist(),
"R": R.tolist(),
"Q": Q.tolist()
}
# print(result)
print(json.dumps(result))
sys.stdout.flush()
|
the-stack_106_16206
|
"""Test the main class DataExporter and functions in the dataio module, ExportData."""
import pathlib
import shutil
import re
from collections import OrderedDict
import logging
import json
import yaml
import pytest
import xtgeo
import fmu.dataio
# pylint: disable=protected-access
CFG = OrderedDict()
CFG["model"] = {"name": "Test", "revision": "21.0.0"}
CFG["masterdata"] = {
"smda": {
"country": [
{"identifier": "Norway", "uuid": "ad214d85-8a1d-19da-e053-c918a4889309"}
],
"discovery": [{"short_identifier": "abdcef", "uuid": "ghijk"}],
}
}
CFG["stratigraphy"] = {"TopVolantis": {}}
CFG["access"] = {"someaccess": "jail"}
CFG["model"] = {"revision": "0.99.0"}
RUN = "tests/data/drogon/ertrun1/realization-0/iter-0/rms"
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def test_instantate_class_no_keys():
"""Test function _get_meta_master."""
# it should be possible to parse without any key options
case = fmu.dataio.ExportData()
for attr, value in case.__dict__.items():
print(attr, value)
assert case._verbosity == "CRITICAL"
assert case._is_prediction is True
def test_get_meta_dollars():
"""The private routine that provides special <names> (earlier with $ in front)."""
case = fmu.dataio.ExportData()
case._config = CFG
logger.info(case._meta_dollars)
assert "$schema" in case._meta_dollars
assert "fmu" in case._meta_dollars["source"]
def test_get_meta_masterdata():
"""The private routine that provides masterdata."""
case = fmu.dataio.ExportData()
case._config = CFG
case._get_meta_masterdata()
assert case._meta_masterdata["smda"]["country"][0]["identifier"] == "Norway"
def test_get_meta_access():
"""The private routine that provides access."""
case = fmu.dataio.ExportData()
case._config = CFG
case._get_meta_access()
assert case._meta_access["someaccess"] == "jail"
def test_get_meta_tracklog():
"""The private routine that provides tracklog."""
# placeholder
def test_process_fmu_model():
"""The (second order) private routine that provides fmu:model"""
case = fmu.dataio.ExportData()
case._config = CFG
fmumodel = case._process_meta_fmu_model()
assert fmumodel["revision"] == "0.99.0"
def test_process_fmu_realisation():
"""The (second order) private routine that provides realization and iteration."""
case = fmu.dataio.ExportData()
case._config = CFG
case._pwd = pathlib.Path(RUN)
c_meta, i_meta, r_meta = case._process_meta_fmu_realization_iteration()
logger.info("========== CASE")
logger.info("%s", json.dumps(c_meta, indent=2, default=str))
logger.info("========== ITER")
logger.info("%s", json.dumps(i_meta, indent=2, default=str))
logger.info("========== REAL")
logger.info("%s", json.dumps(r_meta, indent=2, default=str))
assert r_meta["parameters"]["KVKH_CREVASSE"] == 0.3
assert r_meta["parameters"]["GLOBVAR"]["VOLON_FLOODPLAIN_VOLFRAC"] == 0.256355
assert c_meta["uuid"] == "a40b05e8-e47f-47b1-8fee-f52a5116bd37"
def test_raise_userwarning_missing_content(tmp_path):
"""Example on generating a GridProperty without content spesified."""
gpr = xtgeo.GridProperty(ncol=10, nrow=11, nlay=12)
gpr.name = "testgp"
fmu.dataio.ExportData.export_root = tmp_path.resolve()
fmu.dataio.ExportData.grid_fformat = "roff"
with pytest.warns(UserWarning, match="is not provided which defaults"):
exp = fmu.dataio.ExportData(parent="unset")
exp._pwd = tmp_path
exp.to_file(gpr)
assert (tmp_path / "grids" / ".unset--testgp.roff.yml").is_file() is True
def test_exported_filenames(tmp_path):
"""Test that exported filenames are as expected"""
fmu.dataio.ExportData.export_root = tmp_path.resolve()
surf = xtgeo.RegularSurface(
ncol=20, nrow=30, xinc=20, yinc=20, values=0, name="test"
)
# test case 1, vanilla
exp = fmu.dataio.ExportData(
name="myname",
content="depth",
)
exp._pwd = tmp_path
exp.to_file(surf)
assert (tmp_path / "maps" / "myname.gri").is_file() is True
assert (tmp_path / "maps" / ".myname.gri.yml").is_file() is True
# test case 2, dots in name
exp = fmu.dataio.ExportData(
name="myname.with.dots", content="depth", verbosity="DEBUG"
)
exp._pwd = tmp_path
# for a surface...
exp.to_file(surf)
assert (tmp_path / "maps" / "myname_with_dots.gri").is_file() is True
assert (tmp_path / "maps" / ".myname_with_dots.gri.yml").is_file() is True
# ...for a polygon...
poly = xtgeo.Polygons()
poly.from_list([(1.0, 2.0, 3.0, 0), (1.0, 2.0, 3.0, 0)])
exp.to_file(poly)
assert (tmp_path / "polygons" / "myname_with_dots.csv").is_file() is True
assert (tmp_path / "polygons" / ".myname_with_dots.csv.yml").is_file() is True
# ...and for a table.
table = poly.dataframe
exp.to_file(table)
assert (tmp_path / "tables" / "myname_with_dots.csv").is_file() is True
assert (tmp_path / "tables" / ".myname_with_dots.csv.yml").is_file() is True
# ...for a grid property...
exp = fmu.dataio.ExportData(
name="myname",
content="depth",
parent="unset",
)
exp._pwd = tmp_path
gpr = xtgeo.GridProperty(ncol=10, nrow=11, nlay=12)
gpr.name = "testgp"
exp.to_file(gpr)
assert (tmp_path / "grids" / "unset--myname.roff").is_file() is True
assert (tmp_path / "grids" / ".unset--myname.roff.yml").is_file() is True
def test_file_block(tmp_path):
"""Test the content of the file metadata block"""
# make it look like an ERT run
current = tmp_path / "scratch" / "fields" / "user"
current.mkdir(parents=True, exist_ok=True)
shutil.copytree("tests/data/drogon/ertrun1", current / "mycase")
fmu.dataio.ExportData.export_root = "../../share/results"
fmu.dataio.ExportData.surface_fformat = "irap_binary"
runfolder = current / "mycase" / "realization-0" / "iter-0" / "rms" / "model"
runfolder.mkdir(parents=True, exist_ok=True)
out = current / "mycase" / "realization-0" / "iter-0" / "share" / "results" / "maps"
exp = fmu.dataio.ExportData(
config=CFG,
content="depth",
unit="m",
vertical_domain={"depth": "msl"},
timedata=None,
is_prediction=True,
is_observation=False,
tagname="what Descr",
verbosity="INFO",
runfolder=runfolder.resolve(),
workflow="my current workflow",
)
# make a fake RegularSurface
srf = xtgeo.RegularSurface(
ncol=20,
nrow=30,
xinc=20,
yinc=20,
values=0,
name="TopVolantis",
)
exp.to_file(srf, verbosity="INFO")
metadataout = out / ".topvolantis--what_descr.gri.yml"
assert metadataout.is_file() is True
# now read the metadata file and test some key entries:
with open(metadataout, "r") as stream:
meta = yaml.safe_load(stream)
rel_path = meta["file"]["relative_path"]
assert (
rel_path
== "realization-0/iter-0/share/results/maps/topvolantis--what_descr.gri"
)
abs_path = meta["file"]["absolute_path"]
assert len(abs_path) > len(rel_path)
assert abs_path.endswith(rel_path)
# does not test validity, just that it looks right
size_bytes = meta["file"]["size_bytes"]
assert isinstance(size_bytes, int)
# does not test validity, just that it looks right
checksum_md5 = meta["file"]["checksum_md5"]
assert re.match("^[a-z0-9]{32}", checksum_md5)
|
the-stack_106_16210
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering
"""
import logging
import dask.array as da
import numpy as np
import six
import sklearn.cluster
from dask import delayed
from scipy.linalg import pinv, svd
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_random_state
from ..metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
from ..utils import _format_bytes, _log_array, check_array
from .k_means import KMeans
logger = logging.getLogger(__name__)
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply parallel Spectral Clustering
This implementation avoids the expensive computation of the N x N
affinity matrix. Instead, the Nyström Method is used as an
approximation.
Parameters
----------
n_clusters : integer, optional
The dimension of the projection subspace.
eigen_solver : None
ignored
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
ignored
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
Callables should expect arguments similar to
`sklearn.metrics.pairwise_kernels`: a required ``X``, an optional
``Y``, and ``gamma``, ``degree``, ``coef0``, and any keywords passed
in ``kernel_params``.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : 'kmeans' or Estimator, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. By default creates an instance of
:class:`dask_ml.cluster.KMeans` and sets `n_clusters` to 2. For
further control over the hyperparameters of the final label
assignment, pass an instance of a ``KMeans`` estimator (either
scikit-learn or dask-ml).
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
n_components : int, default 100
Number of rows from ``X`` to use for the Nyström approximation.
Larger ``n_components`` will improve the accuracy of the
approximation, at the cost of a longer training time.
persist_embedding : bool
Whether to persist the intermediate n_samples x n_components
array used for clustering.
kmeans_params : dictionary of string to any, optional
Keyword arguments for the KMeans clustering used for the final
clustering.
Attributes
----------
assign_labels_ : Estimator
The instance of the KMeans estimator used to assign labels
labels_ : dask.array.Array, size (n_samples,)
The cluster labels assigned
eigenvalues_ : numpy.ndarray
The eigenvalues from the SVD of the sampled points
Notes
-----
Using ``persist_embedding=True`` can be an important optimization to
avoid some redundant computations. This persists the array being fed to
the clustering algorithm in (distributed) memory. The array is shape
``n_samples x n_components``.
References
----------
- Parallel Spectral Clustering in Distributed Systems, 2010
Chen, Song, Bai, Lin, and Chang
IEEE Transactions on Pattern Analysis and Machine Intelligence
http://ieeexplore.ieee.org/document/5444877/
- Spectral Grouping Using the Nystrom Method (2004)
Fowlkes, Belongie, Chung, Malik
IEEE Transactions on Pattern Analysis and Machine Intelligence
https://people.cs.umass.edu/~mahadeva/cs791bb/reading/fowlkes-nystrom.pdf
"""
def __init__(
self,
n_clusters=8,
eigen_solver=None,
random_state=None,
n_init=10,
gamma=1.,
affinity="rbf",
n_neighbors=10,
eigen_tol=0.0,
assign_labels="kmeans",
degree=3,
coef0=1,
kernel_params=None,
n_jobs=1,
n_components=100,
persist_embedding=False,
kmeans_params=None,
):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
self.n_components = n_components
self.persist_embedding = persist_embedding
self.kmeans_params = kmeans_params
def _check_array(self, X):
logger.info("Starting check array")
result = check_array(X, accept_dask_dataframe=False).astype(float)
logger.info("Finished check array")
return result
def fit(self, X, y=None):
X = self._check_array(X)
n_components = self.n_components
metric = self.affinity
rng = check_random_state(self.random_state)
n_clusters = self.n_clusters
# kmeans for final clustering
if isinstance(self.assign_labels, six.string_types):
if self.assign_labels == "kmeans":
km = KMeans(
n_clusters=n_clusters, random_state=rng.randint(2 ** 32 - 1)
)
elif self.assign_labels == "sklearn-kmeans":
km = sklearn.cluster.KMeans(n_clusters=n_clusters, random_state=rng)
else:
msg = "Unknown 'assign_labels' {!r}".format(self.assign_labels)
raise ValueError(msg)
elif isinstance(self.assign_labels, BaseEstimator):
km = self.assign_labels
else:
raise TypeError(
"Invalid type {} for 'assign_labels'".format(type(self.assign_labels))
)
if self.kmeans_params:
km.set_params(**self.kmeans_params)
n = len(X)
if n <= n_components:
msg = (
"'n_components' must be smaller than the number of samples."
" Got {} components and {} samples".format(n_components, n)
)
raise ValueError(msg)
params = self.kernel_params or {}
params["gamma"] = self.gamma
params["degree"] = self.degree
params["coef0"] = self.coef0
# indices for our exact / approximate blocks
inds = np.arange(n)
keep = rng.choice(inds, n_components, replace=False)
keep.sort()
rest = ~np.isin(inds, keep)
# compute the exact blocks
# these are done in parallel for dask arrays
if isinstance(X, da.Array):
X_keep = X[keep].rechunk(X.shape).persist()
else:
X_keep = X[keep]
X_rest = X[rest]
A, B = embed(X_keep, X_rest, n_components, metric, params)
_log_array(logger, A, "A")
_log_array(logger, B, "B")
# now the approximation of C
a = A.sum(0) # (l,)
b1 = B.sum(1) # (l,)
b2 = B.sum(0) # (m,)
# TODO: I think we have some unnecessary delayed wrapping of A here.
A_inv = da.from_delayed(delayed(pinv)(A), A.shape, A.dtype)
inner = A_inv.dot(b1)
d1_si = 1 / da.sqrt(a + b1)
d2_si = 1 / da.sqrt(b2 + B.T.dot(inner)) # (m,), dask array
# d1, d2 are diagonal, so we can avoid large matrix multiplies
# Equivalent to diag(d1_si) @ A @ diag(d1_si)
A2 = d1_si.reshape(-1, 1) * A * d1_si.reshape(1, -1) # (n, n)
_log_array(logger, A2, "A2")
# A2 = A2.rechunk(A2.shape)
# Equivalent to diag(d1_si) @ B @ diag(d2_si)
B2 = da.multiply(da.multiply(d1_si.reshape(-1, 1), B), d2_si.reshape(1, -1))
_log_array(logger, B2, "B2")
U_A, S_A, V_A = delayed(svd, pure=True, nout=3)(A2)
U_A = da.from_delayed(U_A, (n_components, n_components), A2.dtype)
S_A = da.from_delayed(S_A, (n_components,), A2.dtype)
V_A = da.from_delayed(V_A, (n_components, n_components), A2.dtype)
# Eq 16. This is OK when V2 is orthogonal
V2 = da.sqrt(float(n_components) / n) * da.vstack([A2, B2.T]).dot(
U_A[:, :n_clusters]
).dot(
da.diag(1.0 / da.sqrt(S_A[:n_clusters]))
) # (n, k)
_log_array(logger, V2, "V2.1")
if isinstance(B2, da.Array):
V2 = V2.rechunk((B2.chunks[1][0], n_clusters))
_log_array(logger, V2, "V2.2")
# normalize (Eq. 4)
U2 = (V2.T / da.sqrt((V2 ** 2).sum(1))).T # (n, k)
_log_array(logger, U2, "U2.2")
# Recover original indices
U2 = _slice_mostly_sorted(U2, keep, rest, inds) # (n, k)
_log_array(logger, U2, "U2.3")
if self.persist_embedding and isinstance(U2, da.Array):
logger.info("Persisting array for k-means")
U2 = U2.persist()
elif isinstance(U2, da.Array):
logger.info(
"Consider persist_embedding. This will require %s",
_format_bytes(U2.nbytes),
)
pass
logger.info("k-means for assign_labels[starting]")
km.fit(U2)
logger.info("k-means for assign_labels[finished]")
# Now... what to keep?
self.assign_labels_ = km
self.labels_ = km.labels_
self.eigenvalues_ = S_A[:n_clusters] # TODO: better name
return self
def embed(X_keep, X_rest, n_components, metric, kernel_params):
if isinstance(metric, six.string_types):
if metric not in PAIRWISE_KERNEL_FUNCTIONS:
msg = "Unknown affinity metric name '{}'. Expected one " "of '{}'".format(
metric, PAIRWISE_KERNEL_FUNCTIONS.keys()
)
raise ValueError(msg)
A = pairwise_kernels(X_keep, metric=metric, filter_params=True, **kernel_params)
B = pairwise_kernels(
X_keep, X_rest, metric=metric, filter_params=True, **kernel_params
)
elif callable(metric):
A = metric(X_keep, **kernel_params)
B = metric(X_keep, X_rest, **kernel_params)
else:
msg = (
"Unexpected type for 'affinity' '{}'. Must be string "
"kernel name, array, or callable"
)
raise TypeError(msg)
if isinstance(A, da.Array):
A = A.rechunk((n_components, n_components))
B = B.rechunk((B.shape[0], B.chunks[1]))
return A, B
def _slice_mostly_sorted(array, keep, rest, ind=None):
"""Slice dask array `array` that is almost entirely sorted already.
We perform approximately `2 * len(keep)` slices on `array`.
This is OK, since `keep` is small. Individually, each of these slices
is entirely sorted.
Parameters
----------
array : dask.array.Array
keep : ndarray[Int]
This must be sorted.
rest : ndarray[Bool]
ind : ndarray[Int], optional
Returns
-------
sliced : dask.array.Array
"""
if ind is None:
ind = np.arange(len(array))
idx = np.argsort(np.concatenate([keep, ind[rest]]))
slices = []
if keep[0] > 0: # avoid creating empty slices
slices.append(slice(None, keep[0]))
slices.append([keep[0]])
windows = zip(keep[:-1], keep[1:])
for l, r in windows:
if r > l + 1: # avoid creating empty slices
slices.append(slice(l + 1, r))
slices.append([r])
if keep[-1] < len(array) - 1: # avoid creating empty slices
slices.append(slice(keep[-1] + 1, None))
result = da.concatenate([array[idx[slice_]] for slice_ in slices])
return result
|
the-stack_106_16211
|
import pandas as pd
from bokeh.palettes import Spectral4
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=800, plot_height=250, x_axis_type="datetime")
p.title.text = 'Click on legend entries to mute the corresponding lines'
for name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):
df = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
parse_dates=['Date']
)
p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,
muted_color=color, muted_alpha=0.2, legend=name)
p.legend.location = "top_left"
p.legend.click_policy="mute"
output_file("interactive_legend.html", title="interactive_legend.py example")
show(p)
|
the-stack_106_16212
|
import numpy as np
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
if __name__ == '__main__':
for xs in [(0, 0), (1, 0), (0, 1), (1, 1)]:
y = OR(xs[0], xs[1])
print(str(xs) + " -> " + str(y))
|
the-stack_106_16213
|
# -*- coding: utf-8 -*-
"""
Logging adapter
---------------
"""
import logging
from flask_login import current_user # NOQA
import enum
log = logging.getLogger(__name__) # pylint: disable=invalid-name
class Logging(object):
"""
This is a helper extension, which adjusts logging configuration for the
application.
"""
# somewhere between Error and Critical to guarantee that it appears in the logs but is not interpreted as a
# real error by the reader
AUDIT = 45
class AuditType(str, enum.Enum):
UserCreate = 'User Create'
SystemCreate = 'System Create'
Delete = 'Delete'
Update = 'Update' # Generic Update
FrontEndFault = 'Front End Fault' # Bad message received on API
BackEndFault = 'Back End Fault' # Faulty message received from ACM/EDM etc
HoustonFault = 'Houston Fault' # Internal Error within Houston
Other = 'Access' # None of the above
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
"""
Common Flask interface to initialize the logging according to the
application configuration.
"""
# We don't need the default Flask's loggers when using our invoke tasks
# since we set up beautiful colorful loggers globally.
for handler in list(app.logger.handlers):
app.logger.removeHandler(handler)
app.logger.propagate = True
if app.debug:
logging.getLogger('flask_oauthlib').setLevel(logging.DEBUG)
app.logger.setLevel(logging.DEBUG)
# We don't need the default SQLAlchemy loggers when using our invoke
# tasks since we set up beautiful colorful loggers globally.
# NOTE: This particular workaround is for the SQLALCHEMY_ECHO mode,
# when all SQL commands get printed (without these lines, they will get
# printed twice).
sqla_logger = logging.getLogger('sqlalchemy.engine.base.Engine')
for hdlr in list(sqla_logger.handlers):
sqla_logger.removeHandler(hdlr)
sqla_logger.addHandler(logging.NullHandler())
logging.addLevelName(self.AUDIT, 'AUDIT')
@classmethod
def _log_message(cls, logger, msg, audit_type, *args, **kwargs):
if audit_type == cls.AuditType.SystemCreate:
# Just leave message as is for system create, no user created this
pass
elif current_user and not current_user.is_anonymous:
msg = f'{msg} executed by {current_user.email}({current_user.guid})'
else:
msg += ' executed by anonymous user'
log_kwargs = kwargs
if 'duration' in kwargs:
msg += f" in {kwargs['duration']} seconds"
log_kwargs.pop('duration')
if logger:
logger.log(cls.AUDIT, msg, *args, **log_kwargs)
else:
log.log(cls.AUDIT, msg, *args, **log_kwargs)
# logger for calling file needed as a parameter to ensure that the file and line numbers are correct in logs
@classmethod
def audit_log(cls, logger, msg, audit_type=AuditType.Other, *args, **kwargs):
assert object
cls._log_message(logger, msg, audit_type, *args, **kwargs)
from app.modules.audit_logs.models import AuditLog
AuditLog.create(msg, audit_type, *args, **kwargs)
@classmethod
def audit_log_object(
cls, logger, obj, msg='', audit_type=AuditType.Other, *args, **kwargs
):
assert obj
assert hasattr(obj, 'guid')
assert isinstance(audit_type, cls.AuditType)
module_name = obj.__class__.__name__
log_msg = f'{module_name} {audit_type} {obj.guid} {msg}'
cls._log_message(logger, log_msg, audit_type, *args, **kwargs)
from app.modules.audit_logs.models import AuditLog
AuditLog.create(msg, audit_type, module_name, obj.guid, *args, **kwargs)
@classmethod
def user_create_object(cls, logger, obj, msg='', *args, **kwargs):
cls.audit_log_object(logger, obj, msg, cls.AuditType.UserCreate, *args, **kwargs)
@classmethod
def system_create_object(cls, logger, obj, msg='', *args, **kwargs):
cls.audit_log_object(
logger, obj, msg, cls.AuditType.SystemCreate, *args, **kwargs
)
@classmethod
def backend_fault(cls, logger, msg='', obj=None, *args, **kwargs):
if obj:
cls.audit_log_object(
logger, obj, msg, cls.AuditType.BackEndFault, *args, **kwargs
)
else:
cls.audit_log(logger, msg, cls.AuditType.BackEndFault, *args, **kwargs)
@classmethod
def frontend_fault(cls, logger, msg='', obj=None, *args, **kwargs):
if obj:
cls.audit_log_object(
logger, obj, msg, cls.AuditType.FrontEndFault, *args, **kwargs
)
else:
cls.audit_log(logger, msg, cls.AuditType.FrontEndFault, *args, **kwargs)
@classmethod
def houston_fault(cls, logger, msg='', obj=None, *args, **kwargs):
if obj:
cls.audit_log_object(
logger, obj, msg, cls.AuditType.HoustonFault, *args, **kwargs
)
else:
cls.audit_log(logger, msg, cls.AuditType.HoustonFault, *args, **kwargs)
@classmethod
def delete_object(cls, logger, obj, msg='', *args, **kwargs):
cls.audit_log_object(logger, obj, msg, cls.AuditType.Delete, *args, **kwargs)
@classmethod
def patch_object(cls, logger, obj, patch_args, *args, **kwargs):
msg = 'Patch:'
for patch in patch_args:
msg += f" {patch['op']} {patch['field_name']}"
if 'value' in patch:
msg += f", {patch['value']} "
else:
msg += ' '
cls.audit_log_object(logger, obj, msg, cls.AuditType.Update, *args, **kwargs)
|
the-stack_106_16214
|
import os
import csv
import torch
import torch.optim as optim
import itertools
import sys
sys.path.append('../')
#import dataPreperation.Fact2_Only_F1_H_exact_tokens as data
import dataPreperation.Original_Fact2 as data
# from dataPreperation.Fact2_Only_F1_H_exact_tokens import dataPreparation
from copynet_seq2seq_dataset_reader import CopyNetDatasetReader
# from obqa_datasetreader import Seq2SeqDatasetReader
from allennlp.data.tokenizers.word_tokenizer import WordTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper, StackedSelfAttentionEncoder
from copynet_seq2seq import CopyNetSeq2Seq
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.attention import LinearAttention, BilinearAttention, DotProductAttention
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from allennlp.predictors import SimpleSeq2SeqPredictor
# Size of output
ENC_EMBEDDING_DIM = 50
TGT_EMBEDDING_DIM = 50
HIDDEN_DIM = 50
CUDA_DEVICE = -1
numEpochs = 1
beamSize = 8
def findExtraVocab(data):
allExtraVocab = []
for i in range(len(data)):
srcData = list(data[i]['source_tokens'])
srcData = [str(i) for i in srcData]
tgtData = list(data[i]['target_tokens'])
tgtData = [str(i) for i in tgtData]
print(srcData,tgtData)
input("findExtraVocab")
extra = set(tgtData) - set(srcData)
for j in extra:
allExtraVocab.append(j)
# print(allExtraVocab)
# print (len(allExtraVocab))
# print (len(set(allExtraVocab)))
return allExtraVocab
def main():
trainFile = "../srcData/trainData.csv"
validFile = "../srcData/devData.csv"
testFile = "../srcData/testData.csv"
trainSeq2SeqFile = data.dataPreparation(trainFile)
validSeq2SeqFile = data.dataPreparation(validFile)
testSeq2SeqFile = data.dataPreparation(testFile)
print (testSeq2SeqFile)
# TokenIndexer Determines how string tokens gets represented as arrays of indexes in a model
# SingleIdTokenIndexer = Tokens are single integers
# TokenCharactersIndexer = Tokens as a list of integers
# Read a tsvfile with paired instances (source, target)
reader = CopyNetDatasetReader(
source_tokenizer=WordTokenizer(),
target_tokenizer=WordTokenizer(), # Defaults to source_tokenizer
source_token_indexers={'tokens': SingleIdTokenIndexer()},
target_namespace='tokens'# Defaults to source_token_indexers
)
# Each of the dataset is a list of each tokens (source_tokens, target_tokens)
train_dataset = reader.read(trainSeq2SeqFile)
validation_dataset = reader.read(validSeq2SeqFile)
test_dataset = reader.read(testSeq2SeqFile)
"""
# Finding extra fact2 vocab
trainExtraVocab = findExtraVocab(train_dataset)
validExtraVocab = findExtraVocab(validation_dataset)
testExtraVocab = findExtraVocab(test_dataset)
finalExtraVocab = list(set(trainExtraVocab + validExtraVocab + testExtraVocab))
print("length:", len(finalExtraVocab))
# input()
"""
# vocab = Vocabulary.from_instances(train_dataset + validation_dataset, min_count={'tokens': 3, 'target_tokens': 3})
vocab = Vocabulary.from_instances(train_dataset + validation_dataset + test_dataset)
# Train + Valid = 9703
# Train + Valid + Test = 10099
print ("Vocab SIze :", vocab.get_vocab_size('tokens'))
encEmbedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
embedding_dim=ENC_EMBEDDING_DIM)
# Embedding for tokens since in the dataset creation time it is mentioned tokens
source_embedder = BasicTextFieldEmbedder({"tokens": encEmbedding})
encoder = PytorchSeq2SeqWrapper(torch.nn.LSTM(ENC_EMBEDDING_DIM, HIDDEN_DIM, batch_first=True, dropout=0.2))
Attention = DotProductAttention()
print (Attention)
max_decoding_steps = 4 # TODO: make this variable
model = CopyNetSeq2Seq(vocab, source_embedder, encoder, max_decoding_steps=max_decoding_steps,
target_embedding_dim=TGT_EMBEDDING_DIM,
# target_namespace = 'target_tokens',
beam_size=beamSize,
attention = Attention)
# Can also specify lr=0.001
optimizer = optim.Adam(model.parameters())
# Data Iterator that specify how to batch our dataset
# Takes data shuffles it and creates fixed sized batches
# iterator = BasicIterator(batch_size=2)
# iterator.index_with(vocab)
# Pads batches wrt max input lengths per batch, sorts dataset wrt the fieldnames and padding keys provided for efficient computations
iterator = BucketIterator(batch_size=50, sorting_keys=[("source_tokens", "num_tokens")])
iterator.index_with(vocab)
trainer = Trainer(model=model,
optimizer=optimizer,
iterator=iterator,
train_dataset=train_dataset,
validation_dataset=validation_dataset,
# patience = 3,
num_epochs=numEpochs,
cuda_device=CUDA_DEVICE)
trainer.train()
"""
predictor = SimpleSeq2SeqPredictor(model, reader)
'''for i in range(2):
print ("Epoch: {}".format(i))
trainer.train()
predictor = SimpleSeq2SeqPredictor(model, reader)
for instance in itertools.islice(validation_dataset, 10):
print('SOURCE:', instance.fields['source_tokens'].tokens)
print('GOLD:', instance.fields['target_tokens'].tokens)
print('PRED:', predictor.predict_instance(instance)['predicted_tokens'])
'{'predictions': [[1, 4, 5, 92, 8, 6, 1, 8, 6, 26, 3]],
'loss': 5.9835076332092285,
'class_log_probabilities': [-20.10894012451172],
'predicted_tokens': ['@@UNKNOWN@@', 'is', 'a', 'type', 'of', 'the', '@@UNKNOWN@@', 'of', 'the', 'sun']}
print (predictor.predict_instance(instance))
'''
outFile = open("output_" + str(HIDDEN_DIM) + "_" + str(numEpochs) + "_" + str(beamSize) + ".csv", "w")
writer = csv.writer(outFile, delimiter="\t")
for instance in itertools.islice(test_dataset, 500):
src = instance.fields['source_tokens'].tokens
gold = instance.fields['target_tokens'].tokens
pred = predictor.predict_instance(instance)['predicted_tokens']
writer.writerow([src, gold, pred])
outFile.close()
"""
if __name__ == "__main__":
main()
|
the-stack_106_16215
|
"""
**********************************************************************
**********************************************************************
** author: ZSAIm
** email: [email protected]
** github: https://github.com/ZSAIm/CaptchaReconition-CNN
**
** programming by python 3.5
**
** 9.9-2018
**********************************************************************
**********************************************************************
"""
import urllib.request
import img_process
import matplotlib.pyplot as plt
from io import BytesIO
from PIL import Image
import numpy as np
import tensorflow as tf
import inference
from constant import *
import os
ZFsoft_CAPTCHA_URL = 'http://jwgldx.gdut.edu.cn/CheckCode.aspx'
MODEL_INDEX = 40000
MODEL_NAME_FORMAT = MODEL_NAME + '-%s'
class CaptchaReconiton():
def __init__(self):
with tf.Graph().as_default() as g:
with tf.device('/cpu:0'):
with tf.name_scope('input'):
self.input = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
x_reshape = tf.reshape(self.input, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='input-reshape')
self.output = inference.inference(x_reshape, self.keep_prob)
self.output_reshape = tf.reshape(self.output, [-1, NUM_CAPTCHA, LABEL_LEN])
# correct_prediction = tf.equal(tf.argmax(self.output_reshape, 2), tf.argmax(y_, 2))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.saver = tf.train.Saver()
# with tf.Session() as sess:
self.sess = tf.Session()
self.saver.restore(sess=self.sess, save_path=os.path.join(MODEL_SAVE_PATH, MODEL_NAME_FORMAT % MODEL_INDEX))
def run(self, img_array):
predict = self.sess.run(self.output_reshape, feed_dict={self.input: img_array, self.keep_prob: 1})
ret = []
for i in predict:
ret.append(argmax_onehots(i))
return ret
def batch_run(self):
pass
def get_image():
res = urllib.request.urlopen(ZFsoft_CAPTCHA_URL)
fp = BytesIO()
fp.write(res.read())
return fp
def argmax_onehots(onehots):
chars = ''
onehots = np.reshape(onehots, [NUM_CAPTCHA, LABEL_LEN])
for i in np.argmax(onehots, axis=1):
chars += CHAR_SET[i]
return chars
def main():
img_fp = get_image()
imgsrc = Image.open(img_fp)
img = imgsrc.convert('L')
imgpx = img.load()
img_process.binary(img, imgpx, 125)
img_process.clear_noise(img, imgpx)
img_array = np.array(img)
img_array_reshape = np.reshape(img_array, [1, IMAGE_HEIGHT * IMAGE_WIDTH])
print(exm.run(img_array_reshape))
plt.imshow(img_array)
exm = CaptchaReconiton()
if __name__ == '__main__':
main()
# import batching
# for i, j in batching.batch_op():
|
the-stack_106_16216
|
from pygame import *
from check_files import check_files
#inzialate fonts and mixer
font.init()
mixer.init()
#score racket1
score1 = 0
#score racket2
score2 = 0
FPS = 60
speed_x = 3
speed_y = 3
game = True
finish = False
back = (200,255,255)
win_width = 600
win_height = 500
required_files = ['images//ball.png','images//racket.png','sound_effects//racket_hit.wav','sound_effects//wall.wav']
#audio
racket_hit = mixer.Sound('sound_effects//racket_hit.wav')
wall = mixer.Sound('sound_effects//wall.wav')
#fonts
font_score = font.Font(None,35)
check_files(required_files)
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, player_speed, size_x, size_y):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
def to_spawn(self,x,y):
window.blit(self.image,(x,y))
self.rect.x = x
self.rect.y = y
class Player(GameSprite):
def update_l(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_width - 80:
self.rect.y += self.speed
def update_r(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_width - 80:
self.rect.y += self.speed
class Bot(GameSprite):
def bot_on(self,target):
if self.rect.y > target.rect.y:
self.rect.y -= self.speed
if self.rect.y < target.rect.y:
self.rect.y += self.speed
# objects
racket1 = Player('images//racket.png', 30, 200, 4, 50, 150)
racket2 = Player('images//racket.png', 520, 200, 4, 50, 150)
ball = GameSprite('images//ball.png', 200, 200, 4, 50, 50)
#izializate window
display.set_caption('Pong')
window = display.set_mode((win_width,win_height))
window.fill(back)
clock = time.Clock()
while game:
for e in event.get():
if e.type == QUIT:
game = False
if finish != True:
window.fill(back)
ball.rect.x += speed_x
ball.rect.y += speed_y
if ball.rect.y > win_height-50 or ball.rect.y < 0:
speed_y *= -1
wall.play()
if sprite.collide_rect(racket1,ball) or sprite.collide_rect(racket2,ball):
speed_x *= -1
racket_hit.play()
if ball.rect.x < 0:
score2 += 1
ball.to_spawn(200,200)
racket1.to_spawn(30,200)
racket2.to_spawn(520,200)
if ball.rect.x > 550:
score1 += 1
ball.to_spawn(200,200)
racket1.to_spawn(30,200)
racket2.to_spawn(520,200)
score_disp = font_score.render('{}:{}'.format(str(score1),str(score2)),True,(0,0,0))
window.blit(score_disp,(280,0))
racket1.update_l()
racket2.update_r()
ball.reset()
racket1.reset()
racket2.reset()
display.update()
clock.tick(FPS)
|
the-stack_106_16218
|
import os
import shutil
import itertools
import glob
import textwrap
import configparser
from conans import ConanFile, tools, RunEnvironment
from conans.errors import ConanInvalidConfiguration
from conans.model import Generator
class qt(Generator):
@property
def filename(self):
return "qt.conf"
@property
def content(self):
return """[Paths]
Prefix = %s
ArchData = bin/archdatadir
HostData = bin/archdatadir
Data = bin/datadir
Sysconf = bin/sysconfdir
LibraryExecutables = bin/archdatadir/bin
Plugins = bin/archdatadir/plugins
Imports = bin/archdatadir/imports
Qml2Imports = bin/archdatadir/qml
Translations = bin/datadir/translations
Documentation = bin/datadir/doc
Examples = bin/datadir/examples""" % self.conanfile.deps_cpp_info["qt"].rootpath.replace("\\", "/")
class QtConan(ConanFile):
_submodules = ["qtsvg", "qtdeclarative", "qtactiveqt", "qtscript", "qtmultimedia", "qttools", "qtxmlpatterns",
"qttranslations", "qtdoc", "qtlocation", "qtsensors", "qtconnectivity", "qtwayland",
"qt3d", "qtimageformats", "qtgraphicaleffects", "qtquickcontrols", "qtserialbus", "qtserialport", "qtx11extras",
"qtmacextras", "qtwinextras", "qtandroidextras", "qtwebsockets", "qtwebchannel", "qtwebengine", "qtwebview",
"qtquickcontrols2", "qtpurchasing", "qtcharts", "qtdatavis3d", "qtvirtualkeyboard", "qtgamepad", "qtscxml",
"qtspeech", "qtnetworkauth", "qtremoteobjects", "qtwebglplugin", "qtlottie", "qtquicktimeline", "qtquick3d"]
generators = "pkg_config"
name = "qt"
description = "Qt is a cross-platform framework for graphical user interfaces."
topics = ("conan", "qt", "ui")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.qt.io"
license = "LGPL-3.0"
exports = ["patches/*.diff"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"commercial": [True, False],
"opengl": ["no", "es2", "desktop", "dynamic"],
"with_vulkan": [True, False],
"openssl": [True, False],
"with_pcre2": [True, False],
"with_glib": [True, False],
# "with_libiconv": [True, False], # QTBUG-84708 Qt tests failure "invalid conversion from const char** to char**"
"with_doubleconversion": [True, False],
"with_freetype": [True, False],
"with_fontconfig": [True, False],
"with_icu": [True, False],
"with_harfbuzz": [True, False],
"with_libjpeg": ["libjpeg", "libjpeg-turbo", False],
"with_libpng": [True, False],
"with_sqlite3": [True, False],
"with_mysql": [True, False],
"with_pq": [True, False],
"with_odbc": [True, False],
"with_libalsa": [True, False],
"with_openal": [True, False],
"with_zstd": [True, False],
"gui": [True, False],
"widgets": [True, False],
"device": "ANY",
"cross_compile": "ANY",
"sysroot": "ANY",
"config": "ANY",
"multiconfiguration": [True, False]
}
options.update({module: [True, False] for module in _submodules})
no_copy_source = True
default_options = {
"shared": False,
"commercial": False,
"opengl": "desktop",
"with_vulkan": False,
"openssl": True,
"with_pcre2": True,
"with_glib": False,
# "with_libiconv": True, # QTBUG-84708
"with_doubleconversion": True,
"with_freetype": True,
"with_fontconfig": True,
"with_icu": True,
"with_harfbuzz": False,
"with_libjpeg": "libjpeg",
"with_libpng": True,
"with_sqlite3": True,
"with_mysql": True,
"with_pq": True,
"with_odbc": True,
"with_libalsa": False,
"with_openal": True,
"with_zstd": True,
"gui": True,
"widgets": True,
"device": None,
"cross_compile": None,
"sysroot": None,
"config": None,
"multiconfiguration": False
}
default_options.update({module: False for module in _submodules})
short_paths = True
def export(self):
self.copy("qtmodules%s.conf" % self.version)
def build_requirements(self):
if tools.os_info.is_windows and self.settings.compiler == "Visual Studio":
self.build_requires("jom/1.1.3")
if self.options.qtwebengine:
self.build_requires("ninja/1.10.2")
# gperf, bison, flex, python >= 2.7.5 & < 3
if self.settings.os != "Windows":
self.build_requires("bison/3.7.1")
self.build_requires("gperf/3.1")
self.build_requires("flex/2.6.4")
# Check if a valid python2 is available in PATH or it will failflex
# Start by checking if python2 can be found
python_exe = tools.which("python2")
if not python_exe:
# Fall back on regular python
python_exe = tools.which("python")
if not python_exe:
msg = ("Python2 must be available in PATH "
"in order to build Qt WebEngine")
raise ConanInvalidConfiguration(msg)
# In any case, check its actual version for compatibility
from six import StringIO # Python 2 and 3 compatible
mybuf = StringIO()
cmd_v = "\"{}\" --version".format(python_exe)
self.run(cmd_v, output=mybuf)
verstr = mybuf.getvalue().strip().split("Python ")[1]
if verstr.endswith("+"):
verstr = verstr[:-1]
version = tools.Version(verstr)
# >= 2.7.5 & < 3
v_min = "2.7.5"
v_max = "3.0.0"
if (version >= v_min) and (version < v_max):
msg = ("Found valid Python 2 required for QtWebengine:"
" version={}, path={}".format(mybuf.getvalue(), python_exe))
self.output.success(msg)
else:
msg = ("Found Python 2 in path, but with invalid version {}"
" (QtWebEngine requires >= {} & < "
"{})\nIf you have both Python 2 and 3 installed, copy the python 2 executable to"
"python2(.exe)".format(verstr, v_min, v_max))
raise ConanInvalidConfiguration(msg)
def config_options(self):
if self.settings.os != "Linux":
del self.options.with_icu
del self.options.with_fontconfig
del self.options.with_libalsa
if self.settings.compiler == "apple-clang":
if tools.Version(self.settings.compiler.version) < "10.0":
raise ConanInvalidConfiguration("Old versions of apple sdk are not supported by Qt (QTBUG-76777)")
if self.settings.compiler in ["gcc", "clang"]:
if tools.Version(self.settings.compiler.version) < "5.0":
raise ConanInvalidConfiguration("qt 5.15.X does not support GCC or clang before 5.0")
if self.settings.compiler in ["gcc", "clang"] and tools.Version(self.settings.compiler.version) < "5.3":
del self.options.with_mysql
if self.settings.os == "Windows":
self.options.with_mysql = False
self.options.opengl = "dynamic"
def configure(self):
#if self.settings.os != "Linux":
# self.options.with_libiconv = False # QTBUG-84708
if not self.options.gui:
del self.options.opengl
del self.options.with_vulkan
del self.options.with_freetype
del self.options.with_fontconfig
del self.options.with_harfbuzz
del self.options.with_libjpeg
del self.options.with_libpng
if not self.options.qtmultimedia:
del self.options.with_libalsa
del self.options.with_openal
if tools.os_info.is_linux:
if self.options.qtwebengine:
self.options.with_fontconfig = True
if self.options.multiconfiguration:
del self.settings.build_type
config = configparser.ConfigParser()
config.read(os.path.join(self.recipe_folder, "qtmodules%s.conf" % self.version))
submodules_tree = {}
assert config.sections()
for s in config.sections():
section = str(s)
assert section.startswith("submodule ")
assert section.count('"') == 2
modulename = section[section.find('"') + 1: section.rfind('"')]
status = str(config.get(section, "status"))
if status != "obsolete" and status != "ignore":
submodules_tree[modulename] = {"status": status,
"path": str(config.get(section, "path")), "depends": []}
if config.has_option(section, "depends"):
submodules_tree[modulename]["depends"] = [str(i) for i in config.get(section, "depends").split()]
for m in submodules_tree:
assert m in ["qtbase", "qtqa", "qtrepotools"] or m in self._submodules, "module %s is not present in recipe options : (%s)" % (m, ",".join(self._submodules))
for m in self._submodules:
assert m in submodules_tree, "module %s is not present in qtmodules%s.conf : (%s)" % (m, self.version, ",".join(submodules_tree))
def _enablemodule(mod):
if mod != "qtbase":
setattr(self.options, mod, True)
for req in submodules_tree[mod]["depends"]:
_enablemodule(req)
for module in self._submodules:
if self.options.get_safe(module):
_enablemodule(module)
def validate(self):
if self.options.widgets and not self.options.gui:
raise ConanInvalidConfiguration("using option qt:widgets without option qt:gui is not possible. "
"You can either disable qt:widgets or enable qt:gui")
if self.options.qtwebengine:
if not self.options.shared:
raise ConanInvalidConfiguration("Static builds of Qt WebEngine are not supported")
if not (self.options.gui and self.options.qtdeclarative and self.options.qtlocation and self.options.qtwebchannel):
raise ConanInvalidConfiguration("option qt:qtwebengine requires also qt:gui, qt:qtdeclarative, qt:qtlocation and qt:qtwebchannel")
if tools.cross_building(self.settings, skip_x64_x86=True):
raise ConanInvalidConfiguration("Cross compiling Qt WebEngine is not supported")
if self.settings.compiler == "gcc" and tools.Version(self.settings.compiler.version) < "5":
raise ConanInvalidConfiguration("Compiling Qt WebEngine with gcc < 5 is not supported")
if self.settings.os == "Android" and self.options.get_safe("opengl", "no") == "desktop":
raise ConanInvalidConfiguration("OpenGL desktop is not supported on Android. Consider using OpenGL es2")
if self.settings.os != "Windows" and self.options.get_safe("opengl", "no") == "dynamic":
raise ConanInvalidConfiguration("Dynamic OpenGL is supported only on Windows.")
if self.options.get_safe("with_fontconfig", False) and not self.options.get_safe("with_freetype", False):
raise ConanInvalidConfiguration("with_fontconfig cannot be enabled if with_freetype is disabled.")
if not self.options.with_doubleconversion and str(self.settings.compiler.libcxx) != "libc++":
raise ConanInvalidConfiguration("Qt without libc++ needs qt:with_doubleconversion. "
"Either enable qt:with_doubleconversion or switch to libc++")
if "MT" in self.settings.get_safe("compiler.runtime", default="") and self.options.shared:
raise ConanInvalidConfiguration("Qt cannot be built as shared library with static runtime")
if self.settings.compiler == "apple-clang":
if tools.Version(self.settings.compiler.version) < "10.0":
raise ConanInvalidConfiguration("Old versions of apple sdk are not supported by Qt (QTBUG-76777)")
if self.settings.compiler in ["gcc", "clang"]:
if tools.Version(self.settings.compiler.version) < "5.0":
raise ConanInvalidConfiguration("qt 5.15.X does not support GCC or clang before 5.0")
def requirements(self):
self.requires("zlib/1.2.11")
if self.options.openssl:
self.requires("openssl/1.1.1k")
if self.options.with_pcre2:
self.requires("pcre2/10.37")
if self.options.with_glib:
self.requires("glib/2.68.3")
# if self.options.with_libiconv: # QTBUG-84708
# self.requires("libiconv/1.16")# QTBUG-84708
if self.options.with_doubleconversion and not self.options.multiconfiguration:
self.requires("double-conversion/3.1.5")
if self.options.get_safe("with_freetype", False) and not self.options.multiconfiguration:
self.requires("freetype/2.10.4")
if self.options.get_safe("with_fontconfig", False):
self.requires("fontconfig/2.13.93")
if self.options.get_safe("with_icu", False):
self.requires("icu/69.1")
if self.options.get_safe("with_harfbuzz", False) and not self.options.multiconfiguration:
self.requires("harfbuzz/2.8.1")
if self.options.get_safe("with_libjpeg", False) and not self.options.multiconfiguration:
if self.options.with_libjpeg == "libjpeg-turbo":
self.requires("libjpeg-turbo/2.1.0")
else:
self.requires("libjpeg/9d")
if self.options.get_safe("with_libpng", False) and not self.options.multiconfiguration:
self.requires("libpng/1.6.37")
if self.options.with_sqlite3 and not self.options.multiconfiguration:
self.requires("sqlite3/3.35.5")
self.options["sqlite3"].enable_column_metadata = True
if self.options.get_safe("with_mysql", False):
self.requires("libmysqlclient/8.0.25")
if self.options.with_pq:
self.requires("libpq/13.2")
if self.options.with_odbc:
if self.settings.os != "Windows":
self.requires("odbc/2.3.9")
if self.options.get_safe("with_openal", False):
self.requires("openal/1.21.0")
if self.options.get_safe("with_libalsa", False):
self.requires("libalsa/1.2.4")
if self.options.gui and self.settings.os == "Linux":
self.requires("xorg/system")
if not tools.cross_building(self, skip_x64_x86=True):
self.requires("xkbcommon/1.3.0")
if self.options.get_safe("opengl", "no") != "no":
self.requires("opengl/system")
if self.options.with_zstd:
self.requires("zstd/1.5.0")
if self.options.qtwebengine and self.settings.os == "Linux":
self.requires("expat/2.4.1")
self.requires("opus/1.3.1")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
shutil.move("qt-everywhere-src-%s" % self.version, "qt5")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
for f in ["renderer", os.path.join("renderer", "core"), os.path.join("renderer", "platform")]:
tools.replace_in_file(os.path.join(self.source_folder, "qt5", "qtwebengine", "src", "3rdparty", "chromium", "third_party", "blink", f, "BUILD.gn"),
" if (enable_precompiled_headers) {\n if (is_win) {",
" if (enable_precompiled_headers) {\n if (false) {"
)
def _make_program(self):
if self.settings.compiler == "Visual Studio":
return "jom"
elif tools.os_info.is_windows:
return "mingw32-make"
else:
return "make"
def _xplatform(self):
if self.settings.os == "Linux":
if self.settings.compiler == "gcc":
return {"x86": "linux-g++-32",
"armv6": "linux-arm-gnueabi-g++",
"armv7": "linux-arm-gnueabi-g++",
"armv7hf": "linux-arm-gnueabi-g++",
"armv8": "linux-aarch64-gnu-g++"}.get(str(self.settings.arch), "linux-g++")
elif self.settings.compiler == "clang":
if self.settings.arch == "x86":
return "linux-clang-libc++-32" if self.settings.compiler.libcxx == "libc++" else "linux-clang-32"
elif self.settings.arch == "x86_64":
return "linux-clang-libc++" if self.settings.compiler.libcxx == "libc++" else "linux-clang"
elif self.settings.os == "Macos":
return {"clang": "macx-clang",
"apple-clang": "macx-clang",
"gcc": "macx-g++"}.get(str(self.settings.compiler))
elif self.settings.os == "iOS":
if self.settings.compiler == "apple-clang":
return "macx-ios-clang"
elif self.settings.os == "watchOS":
if self.settings.compiler == "apple-clang":
return "macx-watchos-clang"
elif self.settings.os == "tvOS":
if self.settings.compiler == "apple-clang":
return "macx-tvos-clang"
elif self.settings.os == "Android":
if self.settings.compiler == "clang":
return "android-clang"
elif self.settings.os == "Windows":
return {"Visual Studio": "win32-msvc",
"gcc": "win32-g++",
"clang": "win32-clang-g++"}.get(str(self.settings.compiler))
elif self.settings.os == "WindowsStore":
if self.settings.compiler == "Visual Studio":
return {"14": {"armv7": "winrt-arm-msvc2015",
"x86": "winrt-x86-msvc2015",
"x86_64": "winrt-x64-msvc2015"},
"15": {"armv7": "winrt-arm-msvc2017",
"x86": "winrt-x86-msvc2017",
"x86_64": "winrt-x64-msvc2017"},
"16": {"armv7": "winrt-arm-msvc2019",
"x86": "winrt-x86-msvc2019",
"x86_64": "winrt-x64-msvc2019"}
}.get(str(self.settings.compiler.version)).get(str(self.settings.arch))
elif self.settings.os == "FreeBSD":
return {"clang": "freebsd-clang",
"gcc": "freebsd-g++"}.get(str(self.settings.compiler))
elif self.settings.os == "SunOS":
if self.settings.compiler == "sun-cc":
if self.settings.arch == "sparc":
return "solaris-cc-stlport" if self.settings.compiler.libcxx == "libstlport" else "solaris-cc"
elif self.settings.arch == "sparcv9":
return "solaris-cc64-stlport" if self.settings.compiler.libcxx == "libstlport" else "solaris-cc64"
elif self.settings.compiler == "gcc":
return {"sparc": "solaris-g++",
"sparcv9": "solaris-g++-64"}.get(str(self.settings.arch))
elif self.settings.os == "Neutrino" and self.settings.compiler == "qcc":
return {"armv8": "qnx-aarch64le-qcc",
"armv8.3": "qnx-aarch64le-qcc",
"armv7": "qnx-armle-v7-qcc",
"armv7hf": "qnx-armle-v7-qcc",
"armv7s": "qnx-armle-v7-qcc",
"armv7k": "qnx-armle-v7-qcc",
"x86": "qnx-x86-qcc",
"x86_64": "qnx-x86-64-qcc"}.get(str(self.settings.arch))
elif self.settings.os == "Emscripten" and self.settings.arch == "wasm":
return "wasm-emscripten"
return None
def build(self):
args = ["-confirm-license", "-silent", "-nomake examples", "-nomake tests",
"-prefix %s" % self.package_folder]
args.append("-v")
args.append("-archdatadir %s" % os.path.join(self.package_folder, "bin", "archdatadir"))
args.append("-datadir %s" % os.path.join(self.package_folder, "bin", "datadir"))
args.append("-sysconfdir %s" % os.path.join(self.package_folder, "bin", "sysconfdir"))
if self.options.commercial:
args.append("-commercial")
else:
args.append("-opensource")
if not self.options.gui:
args.append("-no-gui")
if not self.options.widgets:
args.append("-no-widgets")
if not self.options.shared:
args.insert(0, "-static")
if self.settings.compiler == "Visual Studio":
if self.settings.compiler.runtime == "MT" or self.settings.compiler.runtime == "MTd":
args.append("-static-runtime")
else:
args.insert(0, "-shared")
if self.options.multiconfiguration:
args.append("-debug-and-release")
elif self.settings.build_type == "Debug":
args.append("-debug")
elif self.settings.build_type == "Release":
args.append("-release")
elif self.settings.build_type == "RelWithDebInfo":
args.append("-release")
args.append("-force-debug-info")
elif self.settings.build_type == "MinSizeRel":
args.append("-release")
args.append("-optimize-size")
for module in self._submodules:
if not self.options.get_safe(module):
args.append("-skip " + module)
args.append("--zlib=system")
# openGL
opengl = self.options.get_safe("opengl", "no")
if opengl == "no":
args += ["-no-opengl"]
elif opengl == "es2":
args += ["-opengl es2"]
elif opengl == "desktop":
args += ["-opengl desktop"]
elif opengl == "dynamic":
args += ["-opengl dynamic"]
if self.options.get_safe("with_vulkan", False):
args.append("-vulkan")
else:
args.append("-no-vulkan")
# openSSL
if not self.options.openssl:
args += ["-no-openssl"]
else:
if self.options["openssl"].shared:
args += ["-openssl-runtime"]
else:
args += ["-openssl-linked"]
# args.append("--iconv=" + ("gnu" if self.options.with_libiconv else "no"))# QTBUG-84708
args.append("--glib=" + ("yes" if self.options.with_glib else "no"))
args.append("--pcre=" + ("system" if self.options.with_pcre2 else "qt"))
args.append("--fontconfig=" + ("yes" if self.options.get_safe("with_fontconfig", False) else "no"))
args.append("--icu=" + ("yes" if self.options.get_safe("with_icu", False) else "no"))
args.append("--sql-mysql=" + ("yes" if self.options.get_safe("with_mysql", False) else "no"))
args.append("--sql-psql=" + ("yes" if self.options.with_pq else "no"))
args.append("--sql-odbc=" + ("yes" if self.options.with_odbc else "no"))
args.append("--zstd=" + ("yes" if self.options.with_zstd else "no"))
if self.options.qtmultimedia:
args.append("--alsa=" + ("yes" if self.options.get_safe("with_libalsa", False) else "no"))
for opt, conf_arg in [
("with_doubleconversion", "doubleconversion"),
("with_freetype", "freetype"),
("with_harfbuzz", "harfbuzz"),
("with_libjpeg", "libjpeg"),
("with_libpng", "libpng"),
("with_sqlite3", "sqlite")]:
if self.options.get_safe(opt, False):
if self.options.multiconfiguration:
args += ["-qt-" + conf_arg]
else:
args += ["-system-" + conf_arg]
else:
args += ["-no-" + conf_arg]
libmap = [("zlib", "ZLIB"),
("openssl", "OPENSSL"),
("pcre2", "PCRE2"),
("glib", "GLIB"),
# ("libiconv", "ICONV"),# QTBUG-84708
("double-conversion", "DOUBLECONVERSION"),
("freetype", "FREETYPE"),
("fontconfig", "FONTCONFIG"),
("icu", "ICU"),
("harfbuzz", "HARFBUZZ"),
("libjpeg", "LIBJPEG"),
("libjpeg-turbo", "LIBJPEG"),
("libpng", "LIBPNG"),
("sqlite3", "SQLITE"),
("libmysqlclient", "MYSQL"),
("libpq", "PSQL"),
("odbc", "ODBC"),
("sdl2", "SDL2"),
("openal", "OPENAL"),
("zstd", "ZSTD"),
("libalsa", "ALSA"),
("xkbcommon", "XKBCOMMON")]
for package, var in libmap:
if package in self.deps_cpp_info.deps:
if package == "freetype":
args.append("\"%s_INCDIR=%s\"" % (var, self.deps_cpp_info[package].include_paths[-1]))
args.append("\"%s_LIBS=%s\"" % (var, " ".join(self._gather_libs(package))))
for package in self.deps_cpp_info.deps:
args += ["-I \"%s\"" % s for s in self.deps_cpp_info[package].include_paths]
args += ["-D %s" % s for s in self.deps_cpp_info[package].defines]
lib_arg = "/LIBPATH:" if self.settings.compiler == "Visual Studio" else "-L"
args.append("QMAKE_LFLAGS+=\"%s\"" % " ".join("%s%s" % (lib_arg, l) for package in self.deps_cpp_info.deps for l in self.deps_cpp_info[package].lib_paths))
if "libmysqlclient" in self.deps_cpp_info.deps:
args.append("-mysql_config \"%s\"" % os.path.join(self.deps_cpp_info["libmysqlclient"].rootpath, "bin", "mysql_config"))
if "libpq" in self.deps_cpp_info.deps:
args.append("-psql_config \"%s\"" % os.path.join(self.deps_cpp_info["libpq"].rootpath, "bin", "pg_config"))
if self.settings.os == "Macos":
args += ["-no-framework"]
elif self.settings.os == "Android":
args += ["-android-ndk-platform android-%s" % self.settings.os.api_level]
args += ["-android-abis %s" % {"armv7": "armeabi-v7a",
"armv8": "arm64-v8a",
"x86": "x86",
"x86_64": "x86_64"}.get(str(self.settings.arch))]
if self.settings.get_safe("compiler.libcxx") == "libstdc++":
args += ["-D_GLIBCXX_USE_CXX11_ABI=0"]
elif self.settings.get_safe("compiler.libcxx") == "libstdc++11":
args += ["-D_GLIBCXX_USE_CXX11_ABI=1"]
if self.options.sysroot:
args += ["-sysroot %s" % self.options.sysroot]
if self.options.device:
args += ["-device %s" % self.options.device]
else:
xplatform_val = self._xplatform()
if xplatform_val:
if not tools.cross_building(self.settings, skip_x64_x86=True):
args += ["-platform %s" % xplatform_val]
else:
args += ["-xplatform %s" % xplatform_val]
else:
self.output.warn("host not supported: %s %s %s %s" %
(self.settings.os, self.settings.compiler,
self.settings.compiler.version, self.settings.arch))
if self.options.cross_compile:
args += ["-device-option CROSS_COMPILE=%s" % self.options.cross_compile]
def _getenvpath(var):
val = os.getenv(var)
if val and tools.os_info.is_windows:
val = val.replace("\\", "/")
os.environ[var] = val
return val
value = _getenvpath("CC")
if value:
args += ['QMAKE_CC="' + value + '"',
'QMAKE_LINK_C="' + value + '"',
'QMAKE_LINK_C_SHLIB="' + value + '"']
value = _getenvpath('CXX')
if value:
args += ['QMAKE_CXX="' + value + '"',
'QMAKE_LINK="' + value + '"',
'QMAKE_LINK_SHLIB="' + value + '"']
if tools.os_info.is_linux and self.settings.compiler == "clang":
args += ['QMAKE_CXXFLAGS+="-ftemplate-depth=1024"']
if self.options.qtwebengine and self.settings.os == "Linux":
args += ["-qt-webengine-ffmpeg",
"-system-webengine-opus"]
if self.options.config:
args.append(str(self.options.config))
os.mkdir("build_folder")
with tools.chdir("build_folder"):
with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
build_env = {"MAKEFLAGS": "j%d" % tools.cpu_count(), "PKG_CONFIG_PATH": [self.build_folder]}
if self.settings.os == "Windows":
build_env["PATH"] = [os.path.join(self.source_folder, "qt5", "gnuwin32", "bin")]
with tools.environment_append(build_env):
if tools.os_info.is_macos:
open(".qmake.stash" , "w").close()
open(".qmake.super" , "w").close()
self.run("%s/qt5/configure %s" % (self.source_folder, " ".join(args)), run_environment=True)
if tools.os_info.is_macos:
with open("bash_env", "w") as f:
f.write('export DYLD_LIBRARY_PATH="%s"' % ":".join(RunEnvironment(self).vars["DYLD_LIBRARY_PATH"]))
with tools.environment_append({
"BASH_ENV": os.path.abspath("bash_env")
}) if tools.os_info.is_macos else tools.no_op():
self.run(self._make_program(), run_environment=True)
@property
def _cmake_executables_file(self):
return os.path.join("lib", "cmake", "Qt5Core", "conan_qt_executables_variables.cmake")
def package(self):
with tools.chdir("build_folder"):
self.run("%s install" % self._make_program())
with open(os.path.join(self.package_folder, "bin", "qt.conf"), "w") as f:
f.write("""[Paths]
Prefix = ..
ArchData = bin/archdatadir
HostData = bin/archdatadir
Data = bin/datadir
Sysconf = bin/sysconfdir
LibraryExecutables = bin/archdatadir/bin
Plugins = bin/archdatadir/plugins
Imports = bin/archdatadir/imports
Qml2Imports = bin/archdatadir/qml
Translations = bin/datadir/translations
Documentation = bin/datadir/doc
Examples = bin/datadir/examples""")
self.copy("*LICENSE*", src="qt5/", dst="licenses")
for module in self._submodules:
if not self.options.get_safe(module):
tools.rmdir(os.path.join(self.package_folder, "licenses", module))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
for mask in ["Find*.cmake", "*Config.cmake", "*-config.cmake"]:
tools.remove_files_by_mask(self.package_folder, mask)
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la*")
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.pdb*")
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.pdb")
# "Qt5Bootstrap" is internal Qt library - removing it to avoid linking error, since it contains
# symbols that are also in "Qt5Core.lib". It looks like there is no "Qt5Bootstrap.dll".
for fl in glob.glob(os.path.join(self.package_folder, "lib", "*Qt5Bootstrap*")):
os.remove(fl)
for m in os.listdir(os.path.join(self.package_folder, "lib", "cmake")):
module = os.path.join(self.package_folder, "lib", "cmake", m, "%sMacros.cmake" % m)
if not os.path.isfile(module):
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake", m))
extension = ""
if self.settings.os == "Windows":
extension = ".exe"
v = tools.Version(self.version)
filecontents = textwrap.dedent("""\
set(QT_CMAKE_EXPORT_NAMESPACE Qt5)
set(QT_VERSION_MAJOR {major})
set(QT_VERSION_MINOR {minor})
set(QT_VERSION_PATCH {patch})
""".format(major=v.major, minor=v.minor, patch=v.patch))
targets = {}
targets["Core"] = ["moc", "rcc", "qmake"]
targets["DBus"] = ["qdbuscpp2xml", "qdbusxml2cpp"]
if self.options.widgets:
targets["Widgets"] = ["uic"]
if self.options.qttools:
targets["Tools"] = ["qhelpgenerator", "qcollectiongenerator", "qdoc", "qtattributionsscanner"]
targets[""] = ["lconvert", "lrelease", "lupdate"]
if self.options.qtremoteobjects:
targets["RemoteObjects"] = ["repc"]
if self.options.qtscxml:
targets["Scxml"] = ["qscxmlc"]
for namespace, targets in targets.items():
for target in targets:
filecontents += textwrap.dedent("""\
if(NOT TARGET ${{QT_CMAKE_EXPORT_NAMESPACE}}::{target})
add_executable(${{QT_CMAKE_EXPORT_NAMESPACE}}::{target} IMPORTED)
set_target_properties(${{QT_CMAKE_EXPORT_NAMESPACE}}::{target} PROPERTIES IMPORTED_LOCATION ${{CMAKE_CURRENT_LIST_DIR}}/../../../bin/{target}{ext})
set(Qt5{namespace}_{uppercase_target}_EXECUTABLE ${{QT_CMAKE_EXPORT_NAMESPACE}}::{target})
endif()
""".format(target=target, ext=extension, namespace=namespace, uppercase_target=target.upper()))
tools.save(os.path.join(self.package_folder, self._cmake_executables_file), filecontents)
def package_id(self):
del self.info.options.cross_compile
del self.info.options.sysroot
if self.options.multiconfiguration and self.settings.compiler == "Visual Studio":
if "MD" in self.settings.compiler.runtime:
self.info.settings.compiler.runtime = "MD/MDd"
else:
self.info.settings.compiler.runtime = "MT/MTd"
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "Qt5"
self.cpp_info.names["cmake_find_package_multi"] = "Qt5"
libsuffix = ""
if self.settings.build_type == "Debug":
if self.settings.os == "Windows":
libsuffix = "d"
elif tools.is_apple_os(self.settings.os):
libsuffix = "_debug"
def _get_corrected_reqs(requires):
reqs = []
for r in requires:
reqs.append(r if "::" in r else "qt%s" % r)
return reqs
def _create_module(module, requires=[]):
componentname = "qt%s" % module
assert componentname not in self.cpp_info.components, "Module %s already present in self.cpp_info.components" % module
self.cpp_info.components[componentname].names["cmake_find_package"] = module
self.cpp_info.components[componentname].names["cmake_find_package_multi"] = module
self.cpp_info.components[componentname].libs = ["Qt5%s%s" % (module, libsuffix)]
self.cpp_info.components[componentname].includedirs = ["include", os.path.join("include", "Qt%s" % module)]
self.cpp_info.components[componentname].defines = ["QT_%s_LIB" % module.upper()]
if module != "Core" and "Core" not in requires:
requires.append("Core")
self.cpp_info.components[componentname].requires = _get_corrected_reqs(requires)
def _create_plugin(pluginname, libname, type, requires):
componentname = "qt%s" % pluginname
assert componentname not in self.cpp_info.components, "Plugin %s already present in self.cpp_info.components" % pluginname
self.cpp_info.components[componentname].names["cmake_find_package"] = pluginname
self.cpp_info.components[componentname].names["cmake_find_package_multi"] = pluginname
if not self.options.shared:
self.cpp_info.components[componentname].libs = [libname + libsuffix]
self.cpp_info.components[componentname].libdirs = [os.path.join("bin", "archdatadir", "plugins", type)]
self.cpp_info.components[componentname].includedirs = []
if "Core" not in requires:
requires.append("Core")
self.cpp_info.components[componentname].requires = _get_corrected_reqs(requires)
core_reqs = ["zlib::zlib"]
if self.options.with_pcre2:
core_reqs.append("pcre2::pcre2")
if self.options.with_doubleconversion:
core_reqs.append("double-conversion::double-conversion")
if self.options.get_safe("with_icu", False):
core_reqs.append("icu::icu")
if self.options.with_zstd:
core_reqs.append("zstd::zstd")
if self.options.with_glib:
core_reqs.append("glib::glib-2.0")
_create_module("Core", core_reqs)
if self.options.gui:
gui_reqs = ["DBus"]
if self.options.with_freetype:
gui_reqs.append("freetype::freetype")
if self.options.with_libpng:
gui_reqs.append("libpng::libpng")
if self.options.get_safe("with_fontconfig", False):
gui_reqs.append("fontconfig::fontconfig")
if self.settings.os in ["Linux", "FreeBSD"]:
gui_reqs.append("xorg::xorg")
if not tools.cross_building(self, skip_x64_x86=True):
gui_reqs.append("xkbcommon::xkbcommon")
if self.options.get_safe("opengl", "no") != "no":
gui_reqs.append("opengl::opengl")
if self.options.with_harfbuzz:
gui_reqs.append("harfbuzz::harfbuzz")
if self.options.with_libjpeg == "libjpeg-turbo":
gui_reqs.append("libjpeg-turbo::libjpeg-turbo")
if self.options.with_libjpeg == "libjpeg":
gui_reqs.append("libjpeg::libjpeg")
_create_module("Gui", gui_reqs)
if self.options.with_sqlite3:
_create_plugin("QSQLiteDriverPlugin", "qsqlite", "sqldrivers", ["sqlite3::sqlite3"])
if self.options.with_pq:
_create_plugin("QPSQLDriverPlugin", "qsqlpsql", "sqldrivers", ["libpq::libpq"])
if self.options.get_safe("with_mysql", False):
_create_plugin("QMySQLDriverPlugin", "qsqlmysql", "sqldrivers", ["libmysqlclient::libmysqlclient"])
if self.options.with_odbc:
if self.settings.os != "Windows":
_create_plugin("QODBCDriverPlugin", "qsqlodbc", "sqldrivers", ["odbc::odbc"])
networkReqs = []
if self.options.openssl:
networkReqs.append("openssl::openssl")
_create_module("Network", networkReqs)
_create_module("Sql")
_create_module("Test")
if self.options.widgets:
_create_module("Widgets", ["Gui"])
if self.options.gui and self.options.widgets:
_create_module("PrintSupport", ["Gui", "Widgets"])
if self.options.get_safe("opengl", "no") != "no" and self.options.gui:
_create_module("OpenGL", ["Gui"])
if self.options.widgets and self.options.get_safe("opengl", "no") != "no":
_create_module("OpenGLExtensions", ["Gui"])
_create_module("DBus")
_create_module("Concurrent")
_create_module("Xml")
if self.options.qtdeclarative:
_create_module("Qml", ["Network"])
_create_module("QmlModels", ["Qml"])
self.cpp_info.components["qtQmlImportScanner"].names["cmake_find_package"] = "QmlImportScanner" # this is an alias for Qml and there to integrate with existing consumers
self.cpp_info.components["qtQmlImportScanner"].names["cmake_find_package_multi"] = "QmlImportScanner"
self.cpp_info.components["qtQmlImportScanner"].requires = _get_corrected_reqs(["Qml"])
if self.options.gui:
_create_module("Quick", ["Gui", "Qml", "QmlModels"])
if self.options.widgets:
_create_module("QuickWidgets", ["Gui", "Qml", "Quick", "Widgets"])
_create_module("QuickShapes", ["Gui", "Qml", "Quick"])
_create_module("QmlWorkerScript", ["Qml"])
_create_module("QuickTest", ["Test"])
if self.options.qttools and self.options.gui and self.options.widgets:
_create_module("UiPlugin", ["Gui", "Widgets"])
self.cpp_info.components["qtUiPlugin"].libs = [] # this is a collection of abstract classes, so this is header-only
self.cpp_info.components["qtUiPlugin"].libdirs = []
_create_module("UiTools", ["UiPlugin", "Gui", "Widgets"])
_create_module("Designer", ["Gui", "UiPlugin", "Widgets", "Xml"])
_create_module("Help", ["Gui", "Sql", "Widgets"])
if self.options.qtquick3d and self.options.gui:
_create_module("Quick3DUtils", ["Gui"])
_create_module("Quick3DAssetImport", ["Gui", "Qml", "Quick3DUtils"])
_create_module("Quick3DRuntimeRender", ["Gui", "Quick", "Quick3DAssetImport", "Quick3DUtils", "ShaderTools"])
_create_module("Quick3D", ["Gui", "Qml", "Quick", "Quick3DRuntimeRender"])
if self.options.qtquickcontrols2 and self.options.gui:
_create_module("QuickControls2", ["Gui", "Quick"])
_create_module("QuickTemplates2", ["Gui", "Quick"])
if self.options.qtsvg and self.options.gui:
_create_module("Svg", ["Gui"])
if self.options.widgets:
_create_module("SvgWidgets", ["Gui", "Svg", "Widgets"])
if self.options.qtwayland and self.options.gui:
_create_module("WaylandClient", ["Gui", "wayland::wayland-client"])
_create_module("WaylandCompositor", ["Gui", "wayland::wayland-server"])
if self.options.qtlocation:
_create_module("Positioning")
_create_module("Location", ["Gui", "Quick"])
_create_plugin("QGeoServiceProviderFactoryMapbox", "qtgeoservices_mapbox", "geoservices", [])
_create_plugin("QGeoServiceProviderFactoryMapboxGL", "qtgeoservices_mapboxgl", "geoservices", [])
_create_plugin("GeoServiceProviderFactoryEsri", "qtgeoservices_esri", "geoservices", [])
_create_plugin("QGeoServiceProviderFactoryItemsOverlay", "qtgeoservices_itemsoverlay", "geoservices", [])
_create_plugin("QGeoServiceProviderFactoryNokia", "qtgeoservices_nokia", "geoservices", [])
_create_plugin("QGeoServiceProviderFactoryOsm", "qtgeoservices_osm", "geoservices", [])
_create_plugin("QGeoPositionInfoSourceFactoryGeoclue", "qtposition_geoclue", "position", [])
_create_plugin("QGeoPositionInfoSourceFactoryGeoclue2", "qtposition_geoclue2", "position", [])
_create_plugin("QGeoPositionInfoSourceFactoryPoll", "qtposition_positionpoll", "position", [])
_create_plugin("QGeoPositionInfoSourceFactorySerialNmea", "qtposition_serialnmea", "position", [])
if self.options.qtwebchannel:
_create_module("WebChannel", ["Qml"])
if self.options.qtwebengine:
_create_module("WebEngineCore", ["Gui", "Quick", "WebChannel", "Positioning", "expat::expat", "opus::libopus"])
_create_module("WebEngine", ["WebEngineCore"])
_create_module("WebEngineWidgets", ["WebEngineCore", "Quick", "PrintSupport", "Widgets", "Gui", "Network"])
if self.options.qtserialport:
_create_module("SerialPort")
if self.options.qtserialbus:
_create_module("SerialBus", ["SerialPort"])
_create_plugin("PassThruCanBusPlugin", "qtpassthrucanbus", "canbus", [])
_create_plugin("PeakCanBusPlugin", "qtpeakcanbus", "canbus", [])
_create_plugin("SocketCanBusPlugin", "qtsocketcanbus", "canbus", [])
_create_plugin("TinyCanBusPlugin", "qttinycanbus", "canbus", [])
_create_plugin("VirtualCanBusPlugin", "qtvirtualcanbus", "canbus", [])
if self.options.qtsensors:
_create_module("Sensors")
_create_plugin("genericSensorPlugin", "qtsensors_generic", "sensors", [])
_create_plugin("IIOSensorProxySensorPlugin", "qtsensors_iio-sensor-proxy", "sensors", [])
if self.settings.os == "Linux":
_create_plugin("LinuxSensorPlugin", "qtsensors_linuxsys", "sensors", [])
_create_plugin("QtSensorGesturePlugin", "qtsensorgestures_plugin", "sensorgestures", [])
_create_plugin("QShakeSensorGesturePlugin", "qtsensorgestures_shakeplugin", "sensorgestures", [])
if self.options.qtscxml:
_create_module("Scxml", ["Qml"])
if self.options.qtpurchasing:
_create_module("Purchasing")
if self.options.qtcharts:
_create_module("Charts", ["Gui", "Widgets"])
if self.options.qt3d:
_create_module("3DCore", ["Gui", "Network"])
_create_module("3DRender", ["3DCore"])
_create_plugin("DefaultGeometryLoaderPlugin", "defaultgeometryloader", "geometryloaders", [])
_create_plugin("GLTFGeometryLoaderPlugin", "gltfgeometryloader", "geometryloaders", [])
_create_plugin("GLTFSceneExportPlugin", "gltfsceneexport", "sceneparsers", [])
_create_plugin("GLTFSceneImportPlugin", "gltfsceneimport", "sceneparsers", [])
_create_plugin("OpenGLRendererPlugin", "openglrenderer", "renderers", [])
_create_plugin("Scene2DPlugin", "scene2d", "renderplugins", [])
_create_module("3DAnimation", ["3DRender", "3DCore", "Gui"])
_create_module("3DInput", ["3DCore", "GamePad", "Gui"])
_create_module("3DLogic", ["3DCore", "Gui"])
_create_module("3DExtras", ["3DRender", "3DInput", "3DLogic", "3DCore", "Gui"])
_create_module("3DQuick", ["3DCore", "Quick", "Gui", "Qml"])
_create_module("3DQuickAnimation", ["3DAnimation", "3DRender", "3DQuick", "3DCore", "Gui", "Qml"])
_create_module("3DQuickExtras", ["3DExtras", "3DInput", "3DQuick", "3DRender", "3DLogic", "3DCore", "Gui", "Qml"])
_create_module("3DQuickInput", ["3DInput", "3DQuick", "3DCore", "Gui", "Qml"])
_create_module("3DQuickRender", ["3DRender", "3DQuick", "3DCore", "Gui", "Qml"])
_create_module("3DQuickScene2D", ["3DRender", "3DQuick", "3DCore", "Gui", "Qml"])
if self.options.qtgamepad:
_create_module("Gamepad", ["Gui"])
if self.settings.os == "Linux":
_create_plugin("QEvdevGamepadBackendPlugin", "evdevgamepad", "gamepads", [])
if self.settings.os == "Macos":
_create_plugin("QDarwinGamepadBackendPlugin", "darwingamepad", "gamepads", [])
if self.settings.os =="Windows":
_create_plugin("QXInputGamepadBackendPlugin", "xinputgamepad", "gamepads", [])
if self.options.qtmultimedia:
multimedia_reqs = ["Network", "Gui"]
if self.options.with_libalsa:
multimedia_reqs.append("libalsa::libalsa")
if self.options.with_openal:
multimedia_reqs.append("openal::openal")
_create_module("Multimedia", multimedia_reqs)
_create_module("MultimediaWidgets", ["Multimedia", "Widgets", "Gui"])
if self.options.qtdeclarative and self.options.gui:
_create_module("MultimediaQuick", ["Multimedia", "Quick"])
_create_plugin("QM3uPlaylistPlugin", "qtmultimedia_m3u", "playlistformats", [])
if self.settings.os == "Linux":
_create_module("MultimediaGstTools", ["Multimedia", "MultimediaWidgets", "Gui"])
_create_plugin("CameraBinServicePlugin", "gstcamerabin", "mediaservice", [])
_create_plugin("QAlsaPlugin", "qtaudio_alsa", "audio", [])
_create_plugin("QGstreamerAudioDecoderServicePlugin", "gstaudiodecoder", "mediaservice", [])
_create_plugin("QGstreamerCaptureServicePlugin", "gstmediacapture", "mediaservice", [])
_create_plugin("QGstreamerPlayerServicePlugin", "gstmediaplayer", "mediaservice", [])
if self.settings.os == "Windows":
_create_plugin("AudioCaptureServicePlugin", "qtmedia_audioengine", "mediaservice", [])
_create_plugin("DSServicePlugin", "dsengine", "mediaservice", [])
_create_plugin("QWindowsAudioPlugin", "qtaudio_windows", "audio", [])
if self.settings.os == "Macos":
_create_plugin("AudioCaptureServicePlugin", "qtmedia_audioengine", "mediaservice", [])
_create_plugin("AVFMediaPlayerServicePlugin", "qavfmediaplayer", "mediaservice", [])
_create_plugin("AVFServicePlugin", "qavfcamera", "mediaservice", [])
_create_plugin("CoreAudioPlugin", "qtaudio_coreaudio", "audio", [])
if self.options.qtwebsockets:
_create_module("WebSockets", ["Network"])
if self.options.qtconnectivity:
_create_module("Bluetooth", ["Network"])
_create_module("Nfc", [])
if self.options.qtdatavis3d:
_create_module("DataVisualization", ["Gui"])
if self.options.qtnetworkauth:
_create_module("NetworkAuth", ["Network"])
if not self.options.shared:
if self.settings.os == "Windows":
self.cpp_info.components["qtCore"].system_libs.append("version") # qtcore requires "GetFileVersionInfoW" and "VerQueryValueW" which are in "Version.lib" library
self.cpp_info.components["qtCore"].system_libs.append("winmm") # qtcore requires "__imp_timeSetEvent" which is in "Winmm.lib" library
self.cpp_info.components["qtCore"].system_libs.append("netapi32") # qtcore requires "NetApiBufferFree" which is in "Netapi32.lib" library
self.cpp_info.components["qtCore"].system_libs.append("userenv") # qtcore requires "__imp_GetUserProfileDirectoryW " which is in "UserEnv.Lib" library
self.cpp_info.components["qtCore"].system_libs.append("ws2_32") # qtcore requires "WSAStartup " which is in "Ws2_32.Lib" library
self.cpp_info.components["qtNetwork"].system_libs.append("DnsApi") # qtnetwork from qtbase requires "DnsFree" which is in "Dnsapi.lib" library
if self.settings.os == "Macos":
self.cpp_info.components["qtCore"].frameworks.append("IOKit") # qtcore requires "_IORegistryEntryCreateCFProperty", "_IOServiceGetMatchingService" and much more which are in "IOKit" framework
self.cpp_info.components["qtCore"].frameworks.append("Cocoa") # qtcore requires "_OBJC_CLASS_$_NSApplication" and more, which are in "Cocoa" framework
self.cpp_info.components["qtCore"].frameworks.append("Security") # qtcore requires "_SecRequirementCreateWithString" and more, which are in "Security" framework
self.cpp_info.components["qtCore"].builddirs.append(os.path.join("bin","archdatadir","bin"))
self.cpp_info.components["qtCore"].build_modules["cmake_find_package"].append(self._cmake_executables_file)
self.cpp_info.components["qtCore"].build_modules["cmake_find_package_multi"].append(self._cmake_executables_file)
for m in os.listdir(os.path.join("lib", "cmake")):
module = os.path.join("lib", "cmake", m, "%sMacros.cmake" % m)
component_name = m.replace("Qt5", "qt")
self.cpp_info.components[component_name].build_modules["cmake_find_package"].append(module)
self.cpp_info.components[component_name].build_modules["cmake_find_package_multi"].append(module)
self.cpp_info.components[component_name].builddirs.append(os.path.join("lib", "cmake", m))
objects_dirs = glob.glob(os.path.join(self.package_folder, "lib", "objects-*/"))
for object_dir in objects_dirs:
for m in os.listdir(object_dir):
submodules_dir = os.path.join(object_dir, m)
component = "qt" + m[:m.find("_")]
for sub_dir in os.listdir(submodules_dir):
submodule_dir = os.path.join(submodules_dir, sub_dir)
obj_files = [os.path.join(submodule_dir, file) for file in os.listdir(submodule_dir)]
self.cpp_info.components[component].exelinkflags.extend(obj_files)
self.cpp_info.components[component].sharedlinkflags.extend(obj_files)
@staticmethod
def _remove_duplicate(l):
seen = set()
seen_add = seen.add
for element in itertools.filterfalse(seen.__contains__, l):
seen_add(element)
yield element
def _gather_libs(self, p):
if not p in self.deps_cpp_info.deps:
return []
libs = ["-l" + i for i in self.deps_cpp_info[p].libs + self.deps_cpp_info[p].system_libs]
if tools.is_apple_os(self.settings.os):
libs += ["-framework " + i for i in self.deps_cpp_info[p].frameworks]
libs += self.deps_cpp_info[p].sharedlinkflags
for dep in self.deps_cpp_info[p].public_deps:
libs += self._gather_libs(dep)
return self._remove_duplicate(libs)
|
the-stack_106_16219
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
from matplotlib import colors
from bpnet.plot.utils import MidpointNormalize
class QuantileTruncateNormalizer:
def __init__(self, pmin=50, pmax=99):
self.pmin = pmin
self.pmax = pmax
def __call__(self, signal):
norm_signal = np.minimum(signal, np.percentile(signal, self.pmax))
norm_signal = np.maximum(norm_signal, np.percentile(signal, self.pmin))
return norm_signal
class RowQuantileNormalizer:
def __init__(self, pmin=50, pmax=99):
"""Row-normalize the profile matrix
Args:
pmin: minimum percentile
pmax: maximum percentile
"""
self.pmin = pmin
self.pmax = pmax
def __call__(self, signal):
s = signal.copy()
p50 = np.percentile(s, self.pmin, axis=1)
p99 = np.percentile(s, self.pmax, axis=1)
# mask all values < p50
s[s < p50[:, np.newaxis]] = np.nan
snorms = np.minimum(s / p99[:, np.newaxis], 1)
return snorms
def normalize(p, pmin=50, pmax=99):
"""Back-compatibility
"""
return RowQuantileNormalizer(pmin, pmax)(p)
def heatmap_stranded_profile(signal, ax=None, figsize=(5, 20),
aspect=0.2, normalizer=RowQuantileNormalizer(),
interpolation='nearest', tick_step=25):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
norm_signal = normalizer(signal)
ax.imshow(norm_signal[:, :, 0], cmap=plt.cm.Reds, interpolation=interpolation, aspect=aspect)
ax.imshow(norm_signal[:, :, 1], alpha=0.5, cmap=plt.cm.Blues, interpolation=interpolation, aspect=aspect)
seq_len = signal.shape[1]
ticks = np.arange(0, seq_len + 1 - tick_step, tick_step)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks - seq_len // 2)
ax.set_ylabel("Seqlet index")
ax.set_xlabel("Position")
return fig
def multiple_heatmap_stranded_profile(signal_dict, figsize=(20, 20), sort_idx=None, **kwargs):
"""Plot a dictionary of profiles
"""
tasks = list(signal_dict.keys())
fig, axes = plt.subplots(1, len(tasks), figsize=figsize)
# pre-sort
if sort_idx is None:
total_counts = sum([x.sum(axis=-1).sum(axis=-1) for x in signal_dict.values()])
sort_idx = np.argsort(-total_counts)
for i, (task, ax) in enumerate(zip(tasks, axes)):
heatmap_stranded_profile(signal_dict[task][sort_idx], ax=ax, **kwargs)
ax.set_title(task)
fig.subplots_adjust(wspace=0) # no space between plots
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False) # no numbers
plt.setp([a.get_yaxis() for a in fig.axes[1:]], visible=False) # no numbers
return fig
def heatmap_contribution_profile(signal, ax=None, figsize=(5, 20), aspect=0.2, sort_idx=None, tick_step=25):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if sort_idx is None:
sort_idx = np.arange(signal.shape[0])
interpolation = 'nearest'
ax.imshow(signal[sort_idx],
cmap=plt.cm.RdBu, norm=MidpointNormalize(midpoint=0),
interpolation=interpolation, aspect=aspect)
seq_len = signal.shape[1]
ticks = np.arange(0, seq_len + 1 - tick_step, tick_step)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks - seq_len // 2)
ax.set_ylabel("Seqlet index")
ax.set_xlabel("Position")
def multiple_heatmap_contribution_profile(signal_dict, sort_idx=None,
figsize=(20, 20), **kwargs):
"""Plot a dictionary of profiles
"""
tasks = list(signal_dict.keys())
fig, axes = plt.subplots(1, len(tasks), figsize=figsize)
# --------------------
# special. TODO - re-factor
if sort_idx is None:
sort_idx = np.arange([x for x in signal_dict.values()][0].shape[0])
for i, (task, ax) in enumerate(zip(tasks, axes)):
heatmap_contribution_profile(signal_dict[task][sort_idx],
ax=ax, **kwargs)
# --------------------
ax.set_title(task)
fig.subplots_adjust(wspace=0) # no space between plots
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False) # no numbers
plt.setp([a.get_yaxis() for a in fig.axes[1:]], visible=False) # no numbers
return fig
def multiple_heatmaps(signal_dict, plot_fn, sort_idx=None, figsize=(20, 20), **kwargs):
tasks = list(signal_dict.keys())
fig, axes = plt.subplots(1, len(tasks), figsize=figsize)
if sort_idx is None:
sort_idx = np.arange([x for x in signal_dict.values()][0].shape[0])
for i, (task, ax) in enumerate(zip(tasks, axes)):
plot_fn(signal_dict[task][sort_idx],
ax=ax, **kwargs)
ax.set_title(task)
fig.subplots_adjust(wspace=0) # no space between plots
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False) # no numbers
plt.setp([a.get_yaxis() for a in fig.axes[1:]], visible=False) # no numbers
return fig
def heatmap_sequence(one_hot, ax=None, sort_idx=None, aspect='auto',
figsize_tmpl=(8, 4), cbar=True, title=None):
"""Plot a heatmap of sequences
"""
if ax is None:
figsize = (figsize_tmpl[0] * one_hot.shape[1] / 200,
figsize_tmpl[1] * one_hot.shape[0] / 2000)
fig, ax = plt.subplots(figsize=figsize)
if sort_idx is None:
sort_idx = np.arange(one_hot.shape[0])
cmap = colors.ListedColormap(["red", "orange", "blue", "green"][::-1])
qrates = np.array(list("TGCA"))
bounds = np.linspace(-.5, 3.5, 5)
norm = colors.BoundaryNorm(bounds, 4)
fmt = mpl.ticker.FuncFormatter(lambda x, pos: qrates[::-1][norm(x)])
img = ax.imshow(one_hot.argmax(axis=-1)[sort_idx], aspect=aspect, cmap=cmap, norm=norm, alpha=0.8)
if cbar:
ax2_divider = make_axes_locatable(ax)
cax2 = ax2_divider.append_axes("top", size="5%", pad=0.05)
# cb2 = colorbar(im2, cax=cax2, orientation="horizontal")
cb2 = colorbar(img, cax=cax2, cmap=cmap, norm=norm, boundaries=bounds,
orientation="horizontal",
ticks=[0, 1, 2, 3], format=fmt)
cax2.xaxis.set_ticks_position("top")
seq_len = one_hot.shape[1]
ticks = np.arange(0, seq_len + 1, 25)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks - seq_len // 2)
ax.set_ylabel("Seqlet index")
ax.set_xlabel("Position")
if title is not None:
ax.set_title(title)
return fig
|
the-stack_106_16221
|
"""
Delete snapshot action for AWS RDS DB snapshot.
"""
from resourcehandlers.aws.models import AWSHandler
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
import boto3
import time
from django.db import IntegrityError
def generate_options_for_snapshot(server=None, **kwargs):
resource = kwargs.get('resource')
snapshots = []
region = resource.attributes.get(field__name='aws_region').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
db_cluster_identifier = resource.attributes.get(field__name='db_cluster_identifier').value
handler = AWSHandler.objects.get(id=rh_id)
rds = boto3.client('rds',
region_name=region,
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd
)
response = rds.describe_db_cluster_snapshots(
DBClusterIdentifier=db_cluster_identifier,
)
snapshots.extend([snapshot['DBClusterSnapshotIdentifier'] for snapshot in response['DBClusterSnapshots']])
if len(snapshots) == 0:
return []
return snapshots
def run(job, resource, **kwargs):
region = resource.attributes.get(field__name='aws_region').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
handler = AWSHandler.objects.get(id=rh_id)
snapshot_identifier = '{{ snapshot }}'
set_progress('Connecting to Amazon RDS')
rds = boto3.client('rds',
region_name=region,
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd
)
set_progress('Deleting snapshot "{}"'.format(snapshot_identifier))
rds.delete_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshot_identifier
)
return "SUCCESS", "Snapshot has succesfully been deleted", ""
|
the-stack_106_16222
|
from lbrynet import conf
class ClientRequest(object):
def __init__(self, request_dict, response_identifier=None):
self.request_dict = request_dict
self.response_identifier = response_identifier
class ClientPaidRequest(ClientRequest):
def __init__(self, request_dict, response_identifier, max_pay_units):
ClientRequest.__init__(self, request_dict, response_identifier)
self.max_pay_units = max_pay_units
class ClientBlobRequest(ClientPaidRequest):
def __init__(self, request_dict, response_identifier, write_func, finished_deferred,
cancel_func, blob):
if blob.length is None:
max_pay_units = conf.settings['BLOB_SIZE']
else:
max_pay_units = blob.length
ClientPaidRequest.__init__(self, request_dict, response_identifier, max_pay_units)
self.write = write_func
self.finished_deferred = finished_deferred
self.cancel = cancel_func
self.blob = blob
|
the-stack_106_16225
|
import numpy as np
import pandas as pd
def check_df_col(df, column, name=None):
"""
Checks for the presence of a column (or columns) in a tidy
DataFrame with an informative error message. Passes silently,
otherwise raises error.
"""
if column is not None:
if type(column) != list:
column = [column]
for col in column:
if name is None:
error_message = f"The value '{col}' is not present in any of the columns of your DataFrame."
else:
error_message = f"Your {name} value '{col}' is not present in any of the columns of your DataFrame."
error_message += "\nYou may be looking for:\n " + str(list(df.columns))
assert col in df.columns, error_message
def check_replicates(df, variable, value, grouping):
"""Checks for the presence of replicates in the values of a dataset,
given some experimental conditions. Returns True if the standard
deviation of the values of each group (if more than one exists) is
greater than, indicating that replicates were performed under the
given criteria.
Parameters
----------
df : Pandas DataFrame in tidy format
The data set to be checked for replicates
variable : immutable object
Name of column of data frame for the independent variable,
indicating a specific experimental condition.
value : immutable object
Name of column of data frame for the dependent variable,
indicating an experimental observation.
group : immutable object of list of immutable objects
Column name or list of column names that indicates how the
data set should be split.
Returns
-------
replicates : boolean
True if replicates are present.
df_out : the DataFrame containing averaged 'variable' values, if
replicates is True. Otherwise returns the original DataFrame.
"""
# Unpack the experimental conditions into a single list of arguments
if type(grouping) != list:
grouping = [grouping]
args = [elem for elem in [variable, *grouping] if elem != None]
# Get stdev of argument groups
grouped = df.groupby(args)[value]
group_stdevs = grouped.std().reset_index()
group_stdev = group_stdevs[value].mean()
# Determine if there are replicates (mean > 0)
replicates = bool(group_stdev > 0)
# Average the values and return
if replicates:
df_mean = grouped.mean().reset_index()
df_mean.columns = list(df_mean.columns[:-1]) + ['Mean of ' + str(value)]
df_return = df.merge(df_mean)
return replicates, df_return
|
the-stack_106_16226
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns=[
url(r'^$',views.landing,name='landing'),
url(r'^profile/$',views.profile,name='profile'),
url(r'^profile/edit/$',views.edit,name='edit'),
url(r'^businesses/$',views.biz,name='biz'),
url(r'^search/$',views.search_results,name='search_results'),
url(r'^post/$',views.post,name='post'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
the-stack_106_16227
|
from pygears.conf import safe_bind
from pygears.typing import TypingNamespacePlugin, Queue, Tuple, Union, typeof
def next_pos(type_list, comb, t):
if len(type_list) == 1:
yield comb + [t]
else:
yield from type_comb_rec(type_list[:-1], comb + [t])
def type_comb_rec(type_list, comb):
type_ = type_list[-1]
if typeof(type_, Union):
for t in type_.types:
yield from next_pos(type_list, comb, t)
else:
yield from next_pos(type_list, comb, type_)
def tuple_type_comb(type_):
type_list = [t for t in type_]
for t in type_comb_rec(type_list, []):
yield Tuple[tuple(reversed(t))]
def queue_type_comb(type_):
# type_list = [t for t in type_[0]]
for t in type_[0]:
yield Queue[t, type_.lvl]
# for t in type_comb_rec(type_list, []):
# yield Queue[tuple(reversed(t)), type_.lvl]
def expand(type_):
if typeof(type_, Tuple):
return Union[tuple(tuple_type_comb(type_))]
elif typeof(type_, Queue):
if typeof(type_[0], Tuple):
return Tuple[tuple(queue_type_comb(type_))]
elif typeof(type_[0], Union):
utypes = [Queue[t, type_.lvl] for t in type_[0].types]
return Union[tuple(utypes)]
else:
return type_
class TypeExpandPlugin(TypingNamespacePlugin):
@classmethod
def bind(cls):
safe_bind('gear/type_arith/expand', expand)
|
the-stack_106_16229
|
import base64
import io
import json
import os
import gdown
#import fastbook
#fastbook.setup_book()
import fastai
import pandas as pd
import requests
import torchtext
import nltk
import snscrape.modules.twitter as sntwitter
from copy import deepcopy
from torchvision import models
from torchvision import transforms
from PIL import Image
from django.shortcuts import render
from django.conf import settings
#from fastbook import *
from torchtext.data import get_tokenizer
from fastai.text.all import *
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
from nltk.corpus import wordnet
from nltk import FreqDist
from string import punctuation
import mmap
from .forms import TextEntryForm
from .download_pkls import *
from .tweet_manipulations import TweetManipulations
import pathlib
posixpath_temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath
subs = ['academic-humanities', 'academic-stem', 'anime', 'astrology', 'conservative', 'hippie-spiritual', 'kpop', 'lgbtq', 'liberal', 'sports', 'tech-nerd']
path_cwd = os.getcwd()
path_df = 'static\\dataframes'
path_dls = 'static\\dataloaders'
path_models = 'static\\models'
path_nums200 = 'static\\nums200'
path_toks200 = 'static\\toks200'
path_tweets = 'static\\tweets-by-user'
max_tweets = 999
tweets_to_analyze = 100
num_to_return = 3
def get_tweets(df):
return L(df.iloc[i, 0] for i in range(0, df.shape[0]))
def subword(sz):
sp = SubwordTokenizer(vocab_sz=sz)
sp.setup(txts)
return ' '.join(first(sp([txt]))[:40])
class WorkWithModels:
d = None
t = None
df_eachsub = []
tkn_eachsub = []
txts_eachsub = []
toks200_eachsub = []
nums200_eachsub = []
dls_eachsub = []
learn_eachsub = []
# learn = None
df_c = None
tkn_c = None
toks200_c = None
nums200_c = None
dls_c = None
learn_c = None
subs_eachuser = dict()
df_user = None
txts_user = []
toks200_user = None
num_user = None
rare_words_user = []
alias_user = ''
def __init__(self, d, t):
self.d = d
self.t = t
# methods to get user's distinctive vocabulary
def get_user_assets_ready(self, username):
spacy = WordTokenizer()
tkn = Tokenizer(spacy)
self.df_user = pd.read_csv(os.path.join(path_tweets, 'tweets-' + username + '.csv'), index_col=0)
self.df_user.columns = ['Content']
self.txts_user = L(self.df_user.iloc[i, 0] for i in range(0, self.df_user.shape[0]))
self.toks200_user = self.txts_user.map(tkn)
self.num_user = Numericalize()
self.num_user.setup(self.toks200_user)
coll_repr(self.num_user.vocab,20)
def get_rare_words(self, username):
# coati: should specifically look through the *user's* texts, i.e. have user-specific assets loaded from
# .pkl's (or if you're only storing the user's vocabulary, a .txt)
with open(Path(str(path_df))/'common_words.txt', 'rb', 0) as f, \
mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as s:
for word in self.num_user.vocab:
if len(word) >= 4:
if word[0] != '@' and word[0:2] != 'xx' and word not in ['t.co', 'https']:
lemma = word
if word[-1:] in ['d', 's']: lemma = word[:-1]
elif word[-2:] in ['er', 'in', 'st', 'th', 'ty']: lemma = word[:-2]
elif word[-3:] in ['ing']: lemma = word[:-3]
if not s.find(lemma[:-1].encode()) != -1:
self.rare_words_user.append([word, self.get_POS(word, username)])
# coati: maybe store these variables differently?
self.t.rare_words_user = self.rare_words_user
def get_POS(self, word, username):
for i in range(0, len(self.txts_user)):
tweet = self.txts_user[i].split()
if word in tweet:
index = tweet.index(word)
pos_tweet_predicted = nltk.tag.pos_tag(tweet)
pos_word_predicted = pos_tweet_predicted[index]
return pos_word_predicted[1]
return 'NN'
# coati: not currently using this
def get_syns_rare_words(self, username):
try:
for i in range(0, len(self.rare_words_user)):
word = self.rare_words_user[i][0]
syns = self.t.find_syns(word)
if len(syns) > 0:
print('Synonyms for ' + word + ': ' + ' '.join(syns))
except Exception as e:
print(e)
def get_vocab_of_learner(self, sub_index):
try:
num = Numericalize()
num.setup(self.toks200_eachsub[sub_index])
return num.vocab
except Exception as e:
print(e)
return []
def get_categorization_assets_ready(self):
print('Getting assets for categorization, hang tight................')
print('Loading dataframes...')
try:
self.df_c = torch.load(os.path.join(path_cwd, path_df, 'df_categorize.pkl'))
except Exception as e:
print(e)
spacy = WordTokenizer()
tkn = Tokenizer(spacy)
self.tkn_c = tkn
print('Loading txts...')
self.txts_c = L(self.df_c.iloc[i, 0] for i in range(0, self.df_c.shape[0]))
# print('Loading toks200...')
# try:
# #COATI: host this online so you can download it, currently it has no way of getting to the project
# self.toks200_c = torch.load(os.path.join(path_cwd, path_toks200, 'toks200_c.pkl'))
# except Exception as e:
# print(e)
# print('Loading nums200...')
# try:
# #COATI: host this online so you can download it, currently it has no way of getting to the project
# self.nums200_c = torch.load(os.path.join(path_cwd, path_nums200, 'nums200_c.pkl'))
# except Exception as e:
# print(e)
print('Loading dataloaders...')
try:
self.d.download_dls_c()
self.dls_c = torch.load(os.path.join(path_cwd, path_dls, 'dls-nlp-clas.pkl'))
except Exception as e:
print(e)
print('Loading learners...')
try:
self.d.download_learn_c_pth()
self.learn_c = text_classifier_learner(self.dls_c, AWD_LSTM, drop_mult = 0.5, metrics = accuracy).to_fp16()
self.learn_c.path = Path(str(path_cwd))/'static'
self.learn_c = self.learn_c.load('nlpmodel3_clas')
except Exception as e:
print(e)
print('Success!')
def get_generation_assets_ready(self):
print('Getting assets for tweet generation, hang tight................')
print('Loading dataframes...')
try:
self.df_eachsub = torch.load(os.path.join(path_cwd, path_df, 'df_eachsub_tweets.pkl'))
except Exception as e:
print(e)
spacy = WordTokenizer()
for i in range(0, len(subs)):
tkn = Tokenizer(spacy)
self.tkn_eachsub.append(tkn)
print('Loading txts...')
try:
self.txts_eachsub = torch.load(os.path.join(path_cwd, path_df, 'txts_eachsub.pkl'))
except Exception as e:
print(e)
# coati: TO DO................ store txts_eachsub.pkl on drive so you can download it, currently the program has
# no way of creating it
print('Loading toks200...')
try:
self.d.download_toks200()
self.toks200_eachsub = torch.load(os.path.join(path_cwd, path_toks200, 'toks200-tweets.pkl'))
except Exception as e:
print(e)
print('Loading nums200...')
try:
self.d.download_nums200()
self.nums200_eachsub = torch.load(os.path.join(path_cwd, path_nums200, 'nums200-eachsub.pkl'))
except Exception as e:
print(e)
print('Loading dataloaders...')
try:
for i in range(0, len(subs)):
filename = 'dls-nlp-' + subs[i] + '-ALT.pkl'
dls_thissub = torch.load(os.path.join(path_cwd, path_dls, filename))
self.dls_eachsub.append(dls_thissub)
except Exception as e:
print(e)
print('Loading learners...')
try:
self.d.download_all_models()
for i in range(0, len(subs)):
try:
filename = 'nlpmodel3-' + subs[i] # don't include ".pth" in filename --- model.learn() doesn't require it
learn = language_model_learner(
self.dls_eachsub[i], AWD_LSTM, drop_mult=0.3,
metrics=[accuracy, Perplexity()]).to_fp16()
learn.path = Path(str(path_cwd))/'static'
learn = learn.load(filename)
self.learn_eachsub.append(learn)
print('Successfully loaded model ' + str(i))
except:
print('Failed to load model ' + str(i))
print('Loaded')
except Exception as e:
print(e)
def download_user_tweets(self, username):
print('Downloading tweets by user ' + username + '...')
tweets_list = []
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('from:' + username).get_items()):
if i > max_tweets:
break
if i == 0:
# setting user's alias to display name retrieved from their 1st tweet
self.alias_user = tweet.user.displayname
tweets_list.append([tweet.content])
# racc: tweets_list.append([tweet.date, tweet.id, tweet.content, tweet.user.username])
tweets_df = pd.DataFrame(tweets_list, columns=['Content'])
tweets_df.to_csv(os.path.join(path_tweets, 'tweets-' + username + '.csv'))
# coati: if this version of snscrape stops working, another way to do it:
# scraper = snscrape.modules.twitter.TwitterUserScraper('textfiles')
# for tweet in scraper.get_items():
# print(tweet.user.username)
# print(tweet.date)
# print(tweet.content)
# os.system('snscrape --max-results ' + str(max_tweets) + ' twitter-user ' + username + ' >tweets-by-user-' + username + '.txt')
def categorize_user(self, username):
subs_thisuser = []
try:
self.download_user_tweets(username)
# coati: uncomment this if you get rid of "get_user_assets()" method
# self.df_user = pd.read_csv(os.path.join(path_tweets, 'tweets-' + username + '.csv'), index_col=0)
# self.df_user.columns = ['Content']
preds_each_tweet = []
for i in range(0, tweets_to_analyze):
print('Analyzing tweet #', i)
preds_this_tweet = self.learn_c.predict(self.df_user.loc[i, 'Content'])[2]
print(preds_this_tweet)
preds_each_tweet.append(preds_this_tweet)
all_preds_each_categ = torch.stack(preds_each_tweet)
df_preds = pd.DataFrame(all_preds_each_categ.numpy())
df_preds.columns = subs
# predictions of subculture for all of the user's tweets
preds_overall_each_categ = []
for i in range(0, len(subs)):
pred_overall_this_categ = df_preds[subs[i]].mean()
preds_overall_each_categ.append(pred_overall_this_categ)
# sorting these overall predictions from most to least likely
preds_overall_each_categ_sorted = deepcopy(preds_overall_each_categ)
preds_overall_each_categ_sorted.sort(reverse=True)
for i in range(0, len(preds_overall_each_categ_sorted)):
cur_pred = preds_overall_each_categ_sorted[i]
orig_index_of_pred = preds_overall_each_categ.index(cur_pred)
print('Likelihood of being in group ' + subs[orig_index_of_pred] + ': ' + str(cur_pred))
# coati: currently just returning top 3 categories --- can make this mechanism more complex later
if i < num_to_return:
subs_thisuser.append(orig_index_of_pred)
except Exception as e:
print(e)
self.subs_eachuser[username] = subs_thisuser
# coati: SAVE THIS SOMEWHERE, like in a csv in the user's copy of the repo
def get_user_alias(self, username):
if self.alias_user == '':
return self.username
else:
return self.alias_user
def get_user_styles(self, username):
# coati: for future: don't include links ("http...", "t.co...") or at's ("@") as uncapitalized tweets,
# check if user uses emojis
percent_capitalized = 0
percent_punctuated = 0
tweets_to_inspect = 30
tweets_uncapitalized = 0
tweets_capitalized = 0
tweets_punctuated = 0
try:
for i in range(0, tweets_to_inspect):
cur_tweet = self.df_user.loc[i, 'Content'].strip()
# for checking capitalization of the user's tweets, leave out ones that start with "@"
if re.match('^[A-Z]', cur_tweet):
tweets_capitalized = tweets_capitalized + 1
elif re.match('^[a-z]', cur_tweet):
tweets_uncapitalized = tweets_uncapitalized + 1
if cur_tweet[-1] in punctuation:
tweets_punctuated = tweets_punctuated + 1
percent_capitalized = tweets_capitalized / (tweets_capitalized + tweets_uncapitalized)
percent_punctuated = tweets_punctuated / tweets_to_inspect
return [percent_capitalized, percent_punctuated]
except Exception as e:
# might be a "divide by zero" error, so just return .5 for both
return [.5, .5]
def get_tweet_predictions(self, username, topic):
num_words = 20
# coati: for now just 1 sentence, but in the future you can generate multiple and pick the best one by
# some metric, like BLEU grammatical correctness
num_sentences = 1
subs_thisuser = self.subs_eachuser[username]
preds_manipulated_all_subs = []
for i in range(0, len(subs_thisuser)):
cur_sub = subs_thisuser[i]
intro = self.t.intro_from_prompt(topic, self.rare_words_user)
preds = self.learn_eachsub[cur_sub].predict(intro, num_words, temperature=0.75)
# racc: [self.learn_eachsub ... for _ in range(n_sentences)]
preds_manipulated = self.t.apply_manipulations(preds, topic, self.get_user_styles(username), self.rare_words_user)
preds_manipulated_all_subs.append(preds_manipulated)
return preds_manipulated_all_subs
|
the-stack_106_16230
|
"""Admin suppor for inlines
Peter Cicman, Divio GmbH, 2008
"""
from django.utils.text import capfirst, get_text_list
from django.contrib.admin.util import flatten_fieldsets
from django.http import HttpResponseRedirect
from django.utils.encoding import force_unicode
import re
from copy import deepcopy
from django.conf import settings
from django import forms
from django.contrib import admin
from django.db.models import Model
from django.forms.util import ErrorList, ValidationError
from django.forms.models import BaseInlineFormSet, ModelFormMetaclass
from django.utils.translation import ugettext as _
from django.template.loader import find_template
from django.template import TemplateDoesNotExist
from multilingual.languages import get_default_language
from multilingual.utils import GLL
MULTILINGUAL_PREFIX = '_ml__trans_'
MULTILINGUAL_INLINE_PREFIX = '_ml__inline_trans_'
def gll(func):
def wrapped(cls, request, *args, **kwargs):
cls.use_language = request.GET.get('lang', request.GET.get('language', get_default_language()))
GLL.lock(cls.use_language)
resp = func(cls, request, *args, **kwargs)
GLL.release()
return resp
wrapped.__name__ = func.__name__
wrapped.__doc__ = func.__doc__
return wrapped
def standard_get_fill_check_field(stdopts):
if hasattr(stdopts, 'translation_model'):
opts = stdopts.translation_model._meta
for field in opts.fields:
if field.name in ('language_code', 'master'):
continue
if not (field.blank or field.null):
return field.name
return None
def relation_hack(form, fields, prefix=''):
opts = form.instance._meta
localm2m = [m2m.attname for m2m in opts.local_many_to_many]
externalfk = [obj.field.related_query_name() for obj in opts.get_all_related_objects()]
externalm2m = [m2m.get_accessor_name() for m2m in opts.get_all_related_many_to_many_objects()]
for name, db_field in fields:
full_name = '%s%s' % (prefix, name)
if full_name in form.fields:
value = getattr(form.instance, name, '')
# check for (local) ForeignKeys
if isinstance(value, Model):
value = value.pk
# check for (local) many to many fields
elif name in localm2m:
value = value.all()
# check for (external) ForeignKeys
elif name in externalfk:
value = value.all()
# check for (external) many to many fields
elif name in externalm2m:
value = value.all()
form.fields[full_name].initial = value
class MultilingualInlineModelForm(forms.ModelForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
"""
Fill initial ML Fields
"""
super(MultilingualInlineModelForm, self).__init__(data, files, auto_id,
prefix, initial, error_class, label_suffix, empty_permitted, instance)
# only read initial data if the object already exists, not if we're adding it!
if self.instance.pk:
relation_hack(self, get_translated_fields(self.instance), MULTILINGUAL_INLINE_PREFIX)
class MultilingualInlineFormSet(BaseInlineFormSet):
def get_queryset(self):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_query_set()
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
if self.max_num > 0:
_queryset = qs[:self.max_num]
else:
_queryset = qs
return _queryset
def save_new(self, form, commit=True):
"""
NOTE: save_new method is completely overridden here, there's no
other way to pretend double save otherwise. Just assign translated data
to object
"""
kwargs = {self.fk.get_attname(): self.instance.pk}
new_obj = self.model(**kwargs)
self._prepare_multilingual_object(new_obj, form)
return forms.save_instance(form, new_obj, exclude=[self._pk_field.name], commit=commit)
def save_existing(self, form, instance, commit=True):
"""
NOTE: save_new method is completely overridden here, there's no
other way to pretend double save otherwise. Just assign translated data
to object
"""
self._prepare_multilingual_object(instance, form)
return forms.save_instance(form, instance, exclude=[self._pk_field.name], commit=commit)
def _prepare_multilingual_object(self, obj, form):
opts = obj._meta
for realname, fieldname in self.ml_fields.items():
field = opts.get_field_by_name(realname)[0]
m = re.match(r'^%s(?P<field_name>.*)$' % MULTILINGUAL_INLINE_PREFIX, fieldname)
if m:
field.save_form_data(self.instance, form.cleaned_data[fieldname])
setattr(obj, realname, getattr(self.instance, realname.rsplit('_', 1)[0]))
class MultilingualInlineAdmin(admin.TabularInline):
formset = MultilingualInlineFormSet
form = MultilingualInlineModelForm
template = 'admin/multilingual/edit_inline/tabular.html'
# css class added to inline box
inline_css_class = None
use_language = None
fill_check_field = None
#TODO: add some nice template
def __init__(self, parent_model, admin_site):
super(MultilingualInlineAdmin, self).__init__(parent_model, admin_site)
if hasattr(self, 'use_fields'):
# go around admin fields structure validation
self.fields = self.use_fields
def get_formset(self, request, obj=None, **kwargs):
FormSet = super(MultilingualInlineAdmin, self).get_formset(request, obj, **kwargs)
FormSet.use_language = GLL.language_code
FormSet.ml_fields = {}
for name, field in get_translated_fields(self.model, GLL.language_code):
fieldname = '%s%s' % (MULTILINGUAL_INLINE_PREFIX, name)
FormSet.form.base_fields[fieldname] = self.formfield_for_dbfield(field, request=request)
FormSet.ml_fields[name] = fieldname
return FormSet
def queryset(self, request):
"""
Filter objects which don't have a value in this language
"""
qs = super(MultilingualInlineAdmin, self).queryset(request)
# Don't now what the hell I was thinking here, but this code breaks stuff:
#
# checkfield = self.get_fill_check_field()
# if checkfield is not None:
# kwargs = {str('%s_%s__isnull' % (checkfield, GLL.language_code)): False}
# from django.db.models.fields import CharField
# if isinstance(self.model._meta.translation_model._meta.get_field_by_name(checkfield)[0], CharField):
# kwargs[str('%s_%s__gt' % (checkfield, GLL.language_code))] = ''
# return qs.filter(**kwargs)
return qs.filter(translations__language_code=GLL.language_code).distinct()
def get_fill_check_field(self):
if self.fill_check_field is None:
self.fill_check_field = standard_get_fill_check_field(self.model._meta)
return self.fill_check_field
class MultilingualModelAdminForm(forms.ModelForm):
# for rendering / saving multilingual fields connecte to model, takes place
# when admin per language is ussed
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
"""
Fill up initial ML Fields
"""
super(MultilingualModelAdminForm, self).__init__(data, files, auto_id, prefix,
initial, error_class, label_suffix,
empty_permitted, instance)
# only try to fill intial data if we are not adding an object!
if self.instance.pk:
fields = [(f, getattr(self.instance, "%s_%s" % (f, GLL.language_code), '')) for f in self.ml_fields]
relation_hack(self, fields)
def clean(self):
cleaned_data = super(MultilingualModelAdminForm, self).clean()
self.validate_ml_unique()
return cleaned_data
def validate_ml_unique(self):
form_errors = []
if not hasattr(self.instance._meta, 'translation_model'):
return
for check in self.instance._meta.translation_model._meta.unique_together[:]:
lookup_kwargs = {'language_code': GLL.language_code}
for field_name in check:
#local_name = "%s_%s" % (field_name, self.use_language)
if self.cleaned_data.get(field_name) is not None:
lookup_kwargs[field_name] = self.cleaned_data.get(field_name)
if len(check) == 2 and 'master' in check and 'language_code' in check:
continue
qs = self.instance._meta.translation_model.objects.filter(**lookup_kwargs)
if self.instance.pk is not None:
qs = qs.exclude(master=self.instance.pk)
if qs.count():
model_name = capfirst(self.instance._meta.verbose_name)
field_labels = []
for field_name in check:
if field_name == "language_code":
field_labels.append(_("language"))
elif field_name == "master":
continue
else:
field_labels.append(self.instance._meta.translation_model._meta.get_field_by_name(field_name)[0].verbose_name)
field_labels = get_text_list(field_labels, _('and'))
form_errors.append(
_(u"%(model_name)s with this %(field_label)s already exists.") % \
{'model_name': unicode(model_name),
'field_label': unicode(field_labels)}
)
if form_errors:
# Raise the unique together errors since they are considered
# form-wide.
raise ValidationError(form_errors)
def save(self, commit=True):
self._prepare_multilingual_object(self.instance, self)
return super(MultilingualModelAdminForm, self).save(commit)
def _prepare_multilingual_object(self, obj, form):
opts = self.instance._meta
for name in self.ml_fields:
field = opts.get_field_by_name(name)[0]
# respect save_form_data
field.save_form_data(self.instance, form.cleaned_data[name])
setattr(obj, "%s_%s" % (name, GLL.language_code), getattr(self.instance, name))
class MultilingualModelAdmin(admin.ModelAdmin):
# use special template to render tabs for languages on top
change_form_template = "admin/multilingual/change_form.html"
form = MultilingualModelAdminForm
_multilingual_model_admin = True
use_language = None
fill_check_field = None
_use_hacks = ['fieldsets', 'prepopulated_fields', 'readonly_fields']
class Media:
css = {
'all': ('%smultilingual/admin/css/style.css' % settings.MEDIA_URL,)
}
def __init__(self, model, admin_site):
for attr in self._use_hacks:
if hasattr(self, 'use_%s' % attr):
setattr(self, attr, getattr(self, 'use_%s' % attr))
super(MultilingualModelAdmin, self).__init__(model, admin_site)
def get_fill_check_field(self):
if self.fill_check_field is None:
self.fill_check_field = standard_get_fill_check_field(self.model._meta)
return self.fill_check_field
def get_form(self, request, obj=None, **kwargs):
# assign language to inlines, so they now how to render
for inline in self.inline_instances:
if isinstance(inline, MultilingualInlineAdmin):
inline.use_language = GLL.language_code
Form = super(MultilingualModelAdmin, self).get_form(request, obj, **kwargs)
Form.ml_fields = {}
for name, field in get_default_translated_fields(self.model):
if not field.editable:
continue
form_field = self.formfield_for_dbfield(field, request=request)
local_name = "%s_%s" % (name, GLL.language_code)
Form.ml_fields[name] = form_field
Form.base_fields[name] = form_field
Form.use_language = GLL.language_code
return Form
def placeholder_plugin_filter(self, request, queryset):
"""
This is only used on models which use placeholders from the django-cms
"""
if not request:
return queryset
if GLL.is_active:
return queryset.filter(language=GLL.language_code)
return queryset
@gll
def change_view(self, *args, **kwargs):
return super(MultilingualModelAdmin, self).change_view(*args, **kwargs)
@gll
def add_view(self, *args, **kwargs):
return super(MultilingualModelAdmin, self).add_view(*args, **kwargs)
@gll
def delete_view(self, *args, **kwargs):
return super(MultilingualModelAdmin, self).delete_view(*args, **kwargs)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
fill_check_field = self.get_fill_check_field()
if obj and fill_check_field is not None:
from django.db.models.fields import CharField
kwargs = {'%s__isnull' % fill_check_field:False}
if isinstance(self.model._meta.translation_model._meta.get_field_by_name(fill_check_field)[0], CharField):
kwargs['%s__gt' % fill_check_field] = ''
filled_languages = [t[0] for t in obj.translations.filter(**kwargs).values_list('language_code')]
context.update({
'current_language_index': GLL.language_code,
'current_language_code': GLL.language_code,
'filled_languages': filled_languages,
'old_template': self.get_old_template(),
})
return super(MultilingualModelAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def get_old_template(self):
opts = self.model._meta
app_label = opts.app_label
search_templates = [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
]
for template in search_templates:
try:
find_template(template)
return template
except TemplateDoesNotExist:
pass
def response_change(self, request, obj):
# because save & continue - so it shows the same language
if request.POST.has_key("_continue"):
opts = obj._meta
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
self.message_user(request, msg + ' ' + _("You may edit it again below."))
lang, path = request.GET.get('language', get_default_language()), request.path
if lang:
lang = "language=%s" % lang
if request.REQUEST.has_key('_popup'):
path += "?_popup=1" + "&%s" % lang
else:
path += "?%s" % lang
return HttpResponseRedirect(path)
return super(MultilingualModelAdmin, self).response_change(request, obj)
def get_translated_fields(model, language=None):
meta = model._meta
if not hasattr(meta, 'translated_fields'):
if hasattr(meta, 'translation_model'):
meta = meta.translation_model._meta
else:
return
# returns all the translatable fields, except of the default ones
if not language:
for name, (field, non_default) in meta.translated_fields.items():
if non_default:
yield name, field
else:
# if language is defined return fields in the same order, like they are defined in the
# translation class
for field in meta.fields:
if field.primary_key:
continue
name = field.name + "_%s" % language
field = meta.translated_fields.get(name, None)
if field:
yield name, field[0]
def get_default_translated_fields(model):
if hasattr(model._meta, 'translation_model'):
for name, (field, non_default) in model._meta.translation_model._meta.translated_fields.items():
if not non_default:
yield name, field
|
the-stack_106_16232
|
#!/usr/bin/env python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the kotyacoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Kotyacoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Kotyacoin")
return os.path.expanduser("~/.kotyacoin")
def read_bitcoin_config(dbdir):
"""Read the kotyacoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "kotyacoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a kotyacoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get kotyacoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send kotyacoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of kotyacoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_16234
|
import math,time
from .velocity_to_duration import velocity_to_duration
class Feedforward_interpolation:
def __init__(self,
motion,
current_posture,
postures,
starting_velocity,
velocities):
self._motion_proxy = motion
joints = postures[0].keys()
initial_duration = velocity_to_duration(current_posture,
postures[0],
starting_velocity)
times = [initial_duration]
total_time = initial_duration
for index in range(1,len(postures)):
duration = (velocity_to_duration(postures[index-1],
postures[index],
velocities[index-1]))
total_time += duration
times.append(total_time)
times = [times]*len(joints)
joint_lists = []
for joint in joints:
joint_lists.append([posture[joint] for posture in postures])
self._mid = self._motion_proxy.post.angleInterpolation(joints,
joint_lists,
times,
True)
def is_running(self):
return self._motion_proxy.isRunning(self._mid)
def stop(self):
try:
self._motion_proxy.stop(self._mid)
except:
pass
|
the-stack_106_16235
|
import tensorflow as tf
import numpy as np
import argparse
import os
import json
import glob
import random
import collections
import math
import time
from PIL import Image
import cv2
import sys
sys.path.append("..")
from utils.losses import Losses
from utils.misc import blend_uv
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--mode", required=True, help="[texture / normal]")
parser.add_argument("--func", default="train", help="[train / freeze / test]")
parser.add_argument("--output_dir", default=None, help="where to put output files")
parser.add_argument("--seed", type=int)
parser.add_argument(
"--checkpoint",
default=None,
help="directory with checkpoint to resume training from or use for testing",
)
parser.add_argument("--pb_path", default=None, help="protobuf file path")
parser.add_argument(
"--max_steps", type=int, help="number of training steps (0 to disable)"
)
parser.add_argument("--max_epochs", type=int, help="number of training epochs")
parser.add_argument(
"--summary_freq",
type=int,
default=100,
help="update summaries every summary_freq steps",
)
parser.add_argument(
"--progress_freq",
type=int,
default=50,
help="display progress every progress_freq steps",
)
parser.add_argument(
"--trace_freq", type=int, default=0, help="trace execution every trace_freq steps"
)
parser.add_argument(
"--display_freq",
type=int,
default=0,
help="write current training images every display_freq steps",
)
parser.add_argument(
"--save_freq",
type=int,
default=5000,
help="save model every save_freq steps, 0 to disable",
)
parser.add_argument(
"--separable_conv",
action="store_true",
help="use separable convolutions in the generator",
)
parser.add_argument(
"--aspect_ratio",
type=float,
default=1.0,
help="aspect ratio of output images (width/height)",
)
parser.add_argument(
"--lab_colorization",
action="store_true",
help="split input image into brightness (A) and color (B)",
)
parser.add_argument(
"--batch_size", type=int, default=1, help="number of images in batch"
)
parser.add_argument(
"--which_direction", type=str, default="AtoB", choices=["AtoB", "BtoA"]
)
parser.add_argument(
"--ngf",
type=int,
default=64,
help="number of generator filters in first conv layer",
)
parser.add_argument(
"--ndf",
type=int,
default=64,
help="number of discriminator filters in first conv layer",
)
parser.add_argument(
"--scale_size",
type=int,
default=1024,
help="scale images to this size before cropping to 256x256",
)
parser.add_argument(
"--flip", dest="flip", action="store_true", help="flip images horizontally"
)
parser.add_argument(
"--no_flip",
dest="flip",
action="store_false",
help="don't flip images horizontally",
)
parser.set_defaults(flip=True)
parser.add_argument(
"--lr", type=float, default=0.001, help="initial learning rate for adam"
)
parser.add_argument("--beta1", type=float, default=0.5, help="momentum term of adam")
parser.add_argument(
"--l1_weight",
type=float,
default=100.0,
help="weight on L1 term for generator gradient",
)
parser.add_argument(
"--tv_weight",
type=float,
default=1e-3,
help="weight on TV term for generator gradient",
)
parser.add_argument(
"--cos_weight",
type=float,
default=1e-3,
help="weight on cosine distance for generator gradient",
)
parser.add_argument(
"--gan_weight",
type=float,
default=1.0,
help="weight on GAN term for generator gradient",
)
parser.add_argument(
"--base_tex_path",
type=str,
default="../resources/base_tex.png",
help="path to base texture file",
)
parser.add_argument(
"--base_normal_path",
type=str,
default="../resources/base_normal.png",
help="path to base normal file",
)
parser.add_argument(
"--mu_tex_path",
type=str,
default="../resources/mu_tex.npy",
help="path to mu texture",
)
parser.add_argument(
"--std_tex_path",
type=str,
default="../resources/std_tex.npy",
help="path to std texture",
)
parser.add_argument(
"--mu_norm_path",
type=str,
default="../resources/mu_norm.npy",
help="path to mu normal",
)
parser.add_argument(
"--std_norm_path",
type=str,
default="../resources/std_norm.npy",
help="path to std normal",
)
# export options
parser.add_argument("--output_filetype", default="png", choices=["png", "jpeg"])
a = parser.parse_args()
EPS = 1e-12
CROP_SIZE = 1000
START_Y = 510
END_Y = 1510
START_X = 548
END_X = 1498
Examples = collections.namedtuple(
"Examples", "paths, inputs, targets, count, steps_per_epoch"
)
if a.func == "train":
if a.mode.startswith("tex"):
Model = collections.namedtuple(
"Model",
"outputs, predict_real, predict_fake, discrim_loss, discrim_grads_and_vars, gen_loss_GAN, gen_loss_L1, gen_loss_tv, gen_grads_and_vars, train",
)
else:
Model = collections.namedtuple(
"Model",
"outputs, predict_real, predict_fake, discrim_loss, discrim_grads_and_vars, gen_loss_GAN, gen_loss_L1, gen_loss_cos, gen_grads_and_vars, train",
)
elif a.func == "freeze":
Model = collections.namedtuple("Model", "outputs")
# mean and std of normal and texture
mu_texture = np.load(a.mu_tex_path)
mu_normal = np.load(a.mu_norm_path)
mu_texture = mu_texture[START_Y:END_Y, START_X:END_X, :]
mu_normal = mu_normal[START_Y:END_Y, START_X:END_X, :]
mu_texture = tf.constant(mu_texture, tf.float32)
mu_normal = tf.constant(mu_normal, tf.float32)
std_texture = np.load(a.std_tex_path)
std_normal = np.load(a.std_norm_path)
std_texture = std_texture[START_Y:END_Y, START_X:END_X, :]
std_normal = std_normal[START_Y:END_Y, START_X:END_X, :]
std_texture = tf.constant(std_texture, tf.float32)
std_normal = tf.constant(std_normal, tf.float32)
def normalize(image, mean=None, std=None):
if mean is None:
norm_image = image / 255.0 * 2 - 1
else:
norm_image = (image - mean) / std
return norm_image
def denormalize(image, mean=None, std=None):
if mean is None:
denorm_image = (image + 1) / 2 * 255
else:
denorm_image = image * std + mean
return denorm_image
def preprocess(image, mean=None, std=None):
with tf.name_scope("preprocess"):
# [0, 1] => [-1, 1]
if mean is None:
return image * 2 - 1
elif std is None:
return image * 255.0 - mean
else:
return (image * 255.0 - mean) / std
def deprocess(image, mean=None, std=None):
with tf.name_scope("deprocess"):
# [-1, 1] => [0, 1]
if mean is None:
return (image + 1) / 2
elif std is None:
return tf.clip_by_value((image + mean) / 255.0, 0, 1)
else:
return tf.clip_by_value((tf.multiply(image, std) + mean) / 255.0, 0, 1)
def preprocess_lab(lab):
with tf.name_scope("preprocess_lab"):
L_chan, a_chan, b_chan = tf.unstack(lab, axis=2)
# L_chan: black and white with input range [0, 100]
# a_chan/b_chan: color channels with input range ~[-110, 110], not exact
# [0, 100] => [-1, 1], ~[-110, 110] => [-1, 1]
return [L_chan / 50 - 1, a_chan / 110, b_chan / 110]
def deprocess_lab(L_chan, a_chan, b_chan):
with tf.name_scope("deprocess_lab"):
# this is axis=3 instead of axis=2 because we process individual images but deprocess batches
return tf.stack([(L_chan + 1) / 2 * 100, a_chan * 110, b_chan * 110], axis=3)
def augment(image, brightness):
# (a, b) color channels, combine with L channel and convert to rgb
a_chan, b_chan = tf.unstack(image, axis=3)
L_chan = tf.squeeze(brightness, axis=3)
lab = deprocess_lab(L_chan, a_chan, b_chan)
rgb = lab_to_rgb(lab)
return rgb
def discrim_conv(batch_input, out_channels, stride):
padded_input = tf.pad(
batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="CONSTANT"
)
return tf.layers.conv2d(
padded_input,
out_channels,
kernel_size=4,
strides=(stride, stride),
padding="valid",
kernel_initializer=tf.random_normal_initializer(0, 0.02),
)
def gen_conv(batch_input, out_channels):
# [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]
initializer = tf.random_normal_initializer(0, 0.02)
if a.separable_conv:
return tf.layers.separable_conv2d(
batch_input,
out_channels,
kernel_size=4,
strides=(2, 2),
padding="same",
depthwise_initializer=initializer,
pointwise_initializer=initializer,
)
else:
return tf.layers.conv2d(
batch_input,
out_channels,
kernel_size=4,
strides=(2, 2),
padding="same",
kernel_initializer=initializer,
)
def gen_deconv(batch_input, out_channels):
# [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]
initializer = tf.random_normal_initializer(0, 0.02)
if a.separable_conv:
_b, h, w, _c = batch_input.shape
resized_input = tf.image.resize_images(
batch_input, [h * 2, w * 2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
)
return tf.layers.separable_conv2d(
resized_input,
out_channels,
kernel_size=4,
strides=(1, 1),
padding="same",
depthwise_initializer=initializer,
pointwise_initializer=initializer,
)
else:
return tf.layers.conv2d_transpose(
batch_input,
out_channels,
kernel_size=4,
strides=(2, 2),
padding="same",
kernel_initializer=initializer,
)
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def batchnorm(inputs):
if a.func == "train":
is_train = True
else:
is_train = True
return tf.layers.batch_normalization(
inputs,
axis=3,
epsilon=1e-5,
momentum=0.1,
training=is_train,
gamma_initializer=tf.random_normal_initializer(1.0, 0.02),
)
def check_image(image):
assertion = tf.assert_equal(
tf.shape(image)[-1], 3, message="image must have 3 color channels"
)
with tf.control_dependencies([assertion]):
image = tf.identity(image)
if image.get_shape().ndims not in (3, 4):
raise ValueError("image must be either 3 or 4 dimensions")
# make the last dimension 3 so that you can unstack the colors
shape = list(image.get_shape())
shape[-1] = 3
image.set_shape(shape)
return image
# based on https://github.com/torch/image/blob/9f65c30167b2048ecbe8b7befdc6b2d6d12baee9/generic/image.c
def rgb_to_lab(srgb):
with tf.name_scope("rgb_to_lab"):
srgb = check_image(srgb)
srgb_pixels = tf.reshape(srgb, [-1, 3])
with tf.name_scope("srgb_to_xyz"):
linear_mask = tf.cast(srgb_pixels <= 0.04045, dtype=tf.float32)
exponential_mask = tf.cast(srgb_pixels > 0.04045, dtype=tf.float32)
rgb_pixels = (srgb_pixels / 12.92 * linear_mask) + (
((srgb_pixels + 0.055) / 1.055) ** 2.4
) * exponential_mask
rgb_to_xyz = tf.constant(
[
# X Y Z
[0.412453, 0.212671, 0.019334], # R
[0.357580, 0.715160, 0.119193], # G
[0.180423, 0.072169, 0.950227], # B
]
)
xyz_pixels = tf.matmul(rgb_pixels, rgb_to_xyz)
# https://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions
with tf.name_scope("xyz_to_cielab"):
# convert to fx = f(X/Xn), fy = f(Y/Yn), fz = f(Z/Zn)
# normalize for D65 white point
xyz_normalized_pixels = tf.multiply(
xyz_pixels, [1 / 0.950456, 1.0, 1 / 1.088754]
)
epsilon = 6 / 29
linear_mask = tf.cast(
xyz_normalized_pixels <= (epsilon ** 3), dtype=tf.float32
)
exponential_mask = tf.cast(
xyz_normalized_pixels > (epsilon ** 3), dtype=tf.float32
)
fxfyfz_pixels = (
xyz_normalized_pixels / (3 * epsilon ** 2) + 4 / 29
) * linear_mask + (xyz_normalized_pixels ** (1 / 3)) * exponential_mask
# convert to lab
fxfyfz_to_lab = tf.constant(
[
# l a b
[0.0, 500.0, 0.0], # fx
[116.0, -500.0, 200.0], # fy
[0.0, 0.0, -200.0], # fz
]
)
lab_pixels = tf.matmul(fxfyfz_pixels, fxfyfz_to_lab) + tf.constant(
[-16.0, 0.0, 0.0]
)
return tf.reshape(lab_pixels, tf.shape(srgb))
def lab_to_rgb(lab):
with tf.name_scope("lab_to_rgb"):
lab = check_image(lab)
lab_pixels = tf.reshape(lab, [-1, 3])
# https://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions
with tf.name_scope("cielab_to_xyz"):
# convert to fxfyfz
lab_to_fxfyfz = tf.constant(
[
# fx fy fz
[1 / 116.0, 1 / 116.0, 1 / 116.0], # l
[1 / 500.0, 0.0, 0.0], # a
[0.0, 0.0, -1 / 200.0], # b
]
)
fxfyfz_pixels = tf.matmul(
lab_pixels + tf.constant([16.0, 0.0, 0.0]), lab_to_fxfyfz
)
# convert to xyz
epsilon = 6 / 29
linear_mask = tf.cast(fxfyfz_pixels <= epsilon, dtype=tf.float32)
exponential_mask = tf.cast(fxfyfz_pixels > epsilon, dtype=tf.float32)
xyz_pixels = (3 * epsilon ** 2 * (fxfyfz_pixels - 4 / 29)) * linear_mask + (
fxfyfz_pixels ** 3
) * exponential_mask
# denormalize for D65 white point
xyz_pixels = tf.multiply(xyz_pixels, [0.950456, 1.0, 1.088754])
with tf.name_scope("xyz_to_srgb"):
xyz_to_rgb = tf.constant(
[
# r g b
[3.2404542, -0.9692660, 0.0556434], # x
[-1.5371385, 1.8760108, -0.2040259], # y
[-0.4985314, 0.0415560, 1.0572252], # z
]
)
rgb_pixels = tf.matmul(xyz_pixels, xyz_to_rgb)
# avoid a slightly negative number messing up the conversion
rgb_pixels = tf.clip_by_value(rgb_pixels, 0.0, 1.0)
linear_mask = tf.cast(rgb_pixels <= 0.0031308, dtype=tf.float32)
exponential_mask = tf.cast(rgb_pixels > 0.0031308, dtype=tf.float32)
srgb_pixels = (rgb_pixels * 12.92 * linear_mask) + (
(rgb_pixels ** (1 / 2.4) * 1.055) - 0.055
) * exponential_mask
return tf.reshape(srgb_pixels, tf.shape(lab))
def load_examples():
if a.input_dir is None or not os.path.exists(a.input_dir):
raise Exception("input_dir does not exist")
input_paths = glob.glob(os.path.join(a.input_dir, "*.jpg"))
decode = tf.image.decode_jpeg
if len(input_paths) == 0:
input_paths = glob.glob(os.path.join(a.input_dir, "*.png"))
decode = tf.image.decode_png
if len(input_paths) == 0:
raise Exception("input_dir contains no image files")
def get_name(path):
name, _ = os.path.splitext(os.path.basename(path))
return name
# if the image names are numbers, sort by the value rather than asciibetically
# having sorted inputs means that the outputs are sorted in test mode
if all(get_name(path).isdigit() for path in input_paths):
input_paths = sorted(input_paths, key=lambda path: int(get_name(path)))
else:
input_paths = sorted(input_paths)
with tf.name_scope("load_images"):
path_queue = tf.train.string_input_producer(input_paths, shuffle=True)
reader = tf.WholeFileReader()
paths, contents = reader.read(path_queue)
raw_input = decode(contents)
raw_input = tf.image.convert_image_dtype(
raw_input, dtype=tf.float32
) # [0,255] => [0,1]
assertion = tf.assert_equal(
tf.shape(raw_input)[2], 3, message="image does not have 3 channels"
)
with tf.control_dependencies([assertion]):
raw_input = tf.identity(raw_input)
# raw_input.set_shape([None, None, 3])
if a.lab_colorization:
# load color and brightness from image, no B image exists here
lab = rgb_to_lab(raw_input)
L_chan, a_chan, b_chan = preprocess_lab(lab)
a_images = tf.expand_dims(L_chan, axis=2)
b_images = tf.stack([a_chan, b_chan], axis=2)
else:
# break apart image pair and move to range [-1, 1]
width = tf.shape(raw_input)[1] # [height, width, channels]
# width = raw_input.get_shape().as_list()[1]
print(width)
if a.mode.startswith("tex"):
# a_images = raw_input[:,:width//4,:]
# b_images = raw_input[:,width//2:3*width//4,:]
a_images = raw_input[:, : width // 4, :]
b_images = raw_input[:, width // 2 : 3 * width // 4, :]
# a_images = raw_input[:,:width//2,:]
# b_images = raw_input[:,width//2:,:]
# print('width = %d' % width)
a_images = preprocess(a_images, mu_texture, std_texture)
b_images = preprocess(b_images, mu_texture, std_texture)
else:
a_rgbimages = preprocess(
raw_input[:, : width // 4, :], mu_texture, std_texture
)
a_nolimages = preprocess(
raw_input[:, width // 4 : width // 2, :], mu_normal, std_normal
)
b_rgbimages = preprocess(
raw_input[:, width // 2 : 3 * width // 4, :],
mu_texture,
std_texture,
)
b_nolimages = preprocess(
raw_input[:, 3 * width // 4 :, :], mu_normal, std_normal
)
a_images = tf.concat([a_nolimages, b_rgbimages], axis=2)
b_images = b_nolimages
a_images.set_shape([None, None, 6])
b_images.set_shape([None, None, 3])
if a.which_direction == "AtoB":
inputs, targets = [a_images, b_images]
elif a.which_direction == "BtoA":
inputs, targets = [b_images, a_images]
else:
raise Exception("invalid direction")
# synchronize seed for image operations so that we do the same operations to both
# input and output images
seed = random.randint(0, 2 ** 31 - 1)
def transform(image, mask=None, aug=False):
r = image
m = mask
if a.flip:
r = tf.image.random_flip_left_right(r, seed=seed)
if m is not None:
m = tf.image.random_flip_left_right(m, seed=seed)
if aug:
r.set_shape((END_Y - START_Y, END_X - START_X, 3))
# area produces a nice downscaling, but does nearest neighbor for upscaling
# assume we're going to be doing downscaling here
r = tf.image.resize_images(
r,
[a.scale_size, int(round(a.scale_size * a.aspect_ratio))],
method=tf.image.ResizeMethod.AREA,
)
offset = [0, 0]
offset[0] = tf.cast(
tf.random_uniform([1], 0, a.scale_size - CROP_SIZE + 1, seed=seed),
dtype=tf.int32,
)[0]
offset[1] = tf.cast(
tf.random_uniform(
[1],
0,
int(round(a.scale_size * a.aspect_ratio))
- int(round(CROP_SIZE * a.aspect_ratio))
+ 1,
seed=seed,
),
dtype=tf.int32,
)[0]
if a.scale_size > CROP_SIZE:
r = tf.image.crop_to_bounding_box(
r,
offset[0],
offset[1],
CROP_SIZE,
int(round(CROP_SIZE * a.aspect_ratio)),
)
elif a.scale_size < CROP_SIZE:
raise Exception("scale size cannot be less than crop size")
return r
with tf.name_scope("input_images"):
if a.mode.startswith("tex"):
mask = Image.open(a.regional_mask_path)
mask = np.asarray(mask, np.float32) / 255.0
mask = mask[START_Y:END_Y, START_X:END_X, :]
input_images = transform(inputs, mask, True)
else:
input_images = transform(inputs)
with tf.name_scope("target_images"):
if a.mode.startswith("tex"):
target_images = transform(targets, mask, False)
else:
target_images = transform(targets)
paths_batch, inputs_batch, targets_batch = tf.train.batch(
[paths, input_images, target_images], batch_size=a.batch_size
)
steps_per_epoch = int(math.ceil(len(input_paths) / a.batch_size))
return Examples(
paths=paths_batch,
inputs=inputs_batch,
targets=targets_batch,
count=len(input_paths),
steps_per_epoch=steps_per_epoch,
)
def crop_and_concat(x1, x2):
with tf.name_scope("crop_and_concat"):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [
0,
tf.cast((x1_shape[1] - x2_shape[1]) // 2, tf.int32),
tf.cast((x1_shape[2] - x2_shape[2]) // 2, tf.int32),
0,
]
size = [x1_shape[0], x2_shape[1], x2_shape[2], x1_shape[3]]
# x1_crop = tf.slice(x1, offsets, size)
x1_crop = tf.strided_slice(
x1, offsets, [size[0], offsets[1] + size[1], offsets[2] + size[2], size[3]]
)
return tf.concat([x1_crop, x2], 3)
def create_generator(generator_inputs, generator_outputs_channels):
layers = []
# encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]
with tf.variable_scope("encoder_1"):
output = gen_conv(generator_inputs, a.ngf)
layers.append(output)
layer_specs = [
a.ngf * 2, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
a.ngf * 4, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
a.ngf * 8, # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
a.ngf * 8, # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]
a.ngf * 8, # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]
a.ngf * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]
a.ngf * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]
]
for out_channels in layer_specs:
with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
rectified = lrelu(layers[-1], 0.2)
# print(rectified.get_shape(), '!!!!!!!!!!!!!!!!!!')
# [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
convolved = gen_conv(rectified, out_channels)
output = batchnorm(convolved)
layers.append(output)
if a.func == "train":
layer_specs = [
(
a.ngf * 8,
0.5,
), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]
(
a.ngf * 8,
0.5,
), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
(
a.ngf * 8,
0.5,
), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
(
a.ngf * 8,
0.0,
), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]
(
a.ngf * 4,
0.0,
), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]
(
a.ngf * 2,
0.0,
), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]
(
a.ngf,
0.0,
), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]
]
else:
layer_specs = [
(
a.ngf * 8,
0.0,
), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]
(
a.ngf * 8,
0.0,
), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
(
a.ngf * 8,
0.0,
), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
(
a.ngf * 8,
0.0,
), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]
(
a.ngf * 4,
0.0,
), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]
(
a.ngf * 2,
0.0,
), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]
(
a.ngf,
0.0,
), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]
]
num_encoder_layers = len(layers)
for decoder_layer, (out_channels, dropout) in enumerate(layer_specs):
skip_layer = num_encoder_layers - decoder_layer - 1
with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
if decoder_layer == 0:
# first decoder layer doesn't have skip connections
# since it is directly connected to the skip_layer
input = layers[-1]
else:
input = crop_and_concat(
layers[-1], layers[skip_layer]
) # tf.concat([layers[-1], layers[skip_layer]], 3) #crop_and_concat(layers[-1], layers[skip_layer])
sshape = list(layers[skip_layer].get_shape())
sshape[-1] = sshape[-1] * 2
input.set_shape(sshape)
rectified = tf.nn.relu(input)
output = gen_deconv(rectified, out_channels)
output = batchnorm(output)
if dropout > 0.0:
output = tf.nn.dropout(output, keep_prob=1 - dropout)
layers.append(output)
# decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
with tf.variable_scope("decoder_1"):
input = crop_and_concat(
layers[-1], layers[0]
) # tf.concat([layers[-1], layers[0]], 3) #crop_and_concat(layers[-1], layers[0])
sshape = list(layers[0].get_shape())
sshape[-1] = sshape[-1] * 2
input.set_shape(sshape)
rectified = tf.nn.relu(input)
output = gen_deconv(rectified, generator_outputs_channels)
if a.mode.startswith("tex"):
output = tf.tanh(output)
x1_shape = tf.shape(output)
x2_shape = tf.shape(generator_inputs)
# offsets for the top left corner of the crop
offsets = [
0,
tf.cast((x1_shape[1] - x2_shape[1]) // 2, tf.int32),
tf.cast((x1_shape[2] - x2_shape[2]) // 2, tf.int32),
0,
]
size = [x2_shape[0], x2_shape[1], x2_shape[2], x1_shape[3]]
output = tf.strided_slice(
output,
offsets,
[size[0], offsets[1] + size[1], offsets[1] + size[2], size[3]],
)
sshape = list(generator_inputs.get_shape())
if a.mode.startswith("tex") is False:
sshape[-1] = 3
output.set_shape(sshape)
layers.append(output)
return layers[-1]
def create_test_model(inputs):
with tf.variable_scope("generator"):
out_channels = 3
outputs = create_generator(inputs, out_channels)
global_step = tf.train.get_or_create_global_step()
incr_global_step = tf.assign(global_step, global_step + 1)
return Model(
outputs=outputs,
)
def create_train_model(inputs, targets):
def create_discriminator(discrim_inputs, discrim_targets):
n_layers = 4
layers = []
# 2x [batch, height, width, in_channels] => [batch, height, width, in_channels * 2]
input = tf.concat([discrim_inputs, discrim_targets], axis=3)
# layer_1: [batch, 256, 256, in_channels * 2] => [batch, 128, 128, ndf]
with tf.variable_scope("layer_1"):
convolved = discrim_conv(input, a.ndf, stride=2)
rectified = lrelu(convolved, 0.2)
layers.append(rectified)
# layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]
# layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]
# layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (len(layers) + 1)):
out_channels = a.ndf * min(2 ** (i + 1), 8)
stride = 1 if i == n_layers - 1 else 2 # last layer here has stride 1
convolved = discrim_conv(layers[-1], out_channels, stride=stride)
normalized = batchnorm(convolved)
rectified = lrelu(normalized, 0.2)
layers.append(rectified)
# layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]
with tf.variable_scope("layer_%d" % (len(layers) + 1)):
convolved = discrim_conv(rectified, out_channels=1, stride=1)
output = tf.sigmoid(convolved)
layers.append(output)
print(output.get_shape().as_list())
return layers[-1]
with tf.variable_scope("generator"):
out_channels = int(targets.get_shape()[-1])
outputs = create_generator(inputs, out_channels)
# create two copies of discriminator, one for real pairs and one for fake pairs
# they share the same underlying variables
with tf.name_scope("real_discriminator"):
with tf.variable_scope("discriminator"):
# 2x [batch, height, width, channels] => [batch, 30, 30, 1]
predict_real = create_discriminator(inputs, targets)
with tf.name_scope("fake_discriminator"):
with tf.variable_scope("discriminator", reuse=True):
# 2x [batch, height, width, channels] => [batch, 30, 30, 1]
predict_fake = create_discriminator(inputs, outputs)
with tf.name_scope("discriminator_loss"):
# minimizing -tf.log will try to get inputs to 1
# predict_real => 1
# predict_fake => 0
discrim_loss = tf.reduce_mean(
-(tf.log(predict_real + EPS) + tf.log(1 - predict_fake + EPS))
)
with tf.name_scope("generator_loss"):
# predict_fake => 1
# abs(targets - outputs) => 0
gen_loss_GAN = tf.reduce_mean(-tf.log(predict_fake + EPS))
gen_loss_L1 = tf.reduce_mean(tf.abs(targets - outputs))
gen_loss = gen_loss_GAN * a.gan_weight + gen_loss_L1 * a.l1_weight
if a.mode.startswith("tex"):
# NOTE: added by cyj to remove red dots
mask = Image.open(a.regional_mask_path)
mask = np.asarray(mask, np.float32) / 255.0
mask = mask[START_Y:END_Y, START_X:START_X, :]
_, out_height, out_width, _ = outputs.get_shape().as_list()
tv_mask = np.expand_dims(1 - mask, 0)
gen_loss_tv = Losses.uv_tv_loss2(outputs, tv_mask, tv_mask)
gen_loss = gen_loss + a.tv_weight * gen_loss_tv
else:
targetc = tf.nn.l2_normalize(targets, axis=-1)
outputc = tf.nn.l2_normalize(outputs, axis=-1)
gen_loss_cos = tf.losses.cosine_distance(targetc, outputc, axis=-1)
print("gen_loss_cos", gen_loss_cos.get_shape().as_list())
gen_loss = gen_loss + gen_loss_cos * a.cos_weight
with tf.name_scope("discriminator_train"):
discrim_tvars = [
var
for var in tf.trainable_variables()
if var.name.startswith("discriminator")
]
discrim_optim = tf.train.AdamOptimizer(a.lr, a.beta1)
discrim_grads_and_vars = discrim_optim.compute_gradients(
discrim_loss, var_list=discrim_tvars
)
discrim_train = discrim_optim.apply_gradients(discrim_grads_and_vars)
with tf.name_scope("generator_train"):
with tf.control_dependencies([discrim_train]):
gen_tvars = [
var
for var in tf.trainable_variables()
if var.name.startswith("generator")
]
gen_optim = tf.train.AdamOptimizer(a.lr, a.beta1)
gen_grads_and_vars = gen_optim.compute_gradients(
gen_loss, var_list=gen_tvars
)
gen_train = gen_optim.apply_gradients(gen_grads_and_vars)
ema = tf.train.ExponentialMovingAverage(decay=0.99)
if a.mode.startswith("tex"):
update_losses = ema.apply(
[discrim_loss, gen_loss_GAN, gen_loss_L1, gen_loss_tv]
)
else:
update_losses = ema.apply(
[discrim_loss, gen_loss_GAN, gen_loss_L1, gen_loss_cos]
)
global_step = tf.train.get_or_create_global_step()
incr_global_step = tf.assign(global_step, global_step + 1)
if a.mode.startswith("tex"):
return Model(
predict_real=predict_real,
predict_fake=predict_fake,
discrim_loss=ema.average(discrim_loss),
discrim_grads_and_vars=discrim_grads_and_vars,
gen_loss_GAN=ema.average(gen_loss_GAN),
gen_loss_L1=ema.average(gen_loss_L1),
gen_loss_tv=ema.average(gen_loss_tv),
gen_grads_and_vars=gen_grads_and_vars,
outputs=outputs,
train=tf.group(update_losses, incr_global_step, gen_train),
)
else:
return Model(
predict_real=predict_real,
predict_fake=predict_fake,
discrim_loss=ema.average(discrim_loss),
discrim_grads_and_vars=discrim_grads_and_vars,
gen_loss_GAN=ema.average(gen_loss_GAN),
gen_loss_L1=ema.average(gen_loss_L1),
gen_loss_cos=ema.average(gen_loss_cos),
gen_grads_and_vars=gen_grads_and_vars,
outputs=outputs,
train=tf.group(update_losses, incr_global_step, gen_train),
)
def save_images(fetches, step=None):
image_dir = os.path.join(a.output_dir, "images")
if not os.path.exists(image_dir):
os.makedirs(image_dir)
filesets = []
for i, in_path in enumerate(fetches["paths"]):
name, _ = os.path.splitext(os.path.basename(in_path.decode("utf8")))
fileset = {"name": name, "step": step}
for kind in ["inputs", "outputs", "targets"]:
filename = name + "-" + kind + ".png"
if step is not None:
filename = "%08d-%s" % (step, filename)
fileset[kind] = filename
out_path = os.path.join(image_dir, filename)
contents = fetches[kind][i]
with open(out_path, "wb") as f:
f.write(contents)
filesets.append(fileset)
return filesets
def append_index(filesets, step=False):
index_path = os.path.join(a.output_dir, "index.html")
if os.path.exists(index_path):
index = open(index_path, "a")
else:
index = open(index_path, "w")
index.write("<html><body><table><tr>")
if step:
index.write("<th>step</th>")
index.write("<th>name</th><th>input</th><th>output</th><th>target</th></tr>")
for fileset in filesets:
index.write("<tr>")
if step:
index.write("<td>%d</td>" % fileset["step"])
index.write("<td>%s</td>" % fileset["name"])
for kind in ["inputs", "outputs", "targets"]:
index.write("<td><img src='images/%s'></td>" % fileset[kind])
index.write("</tr>")
return index_path
def train():
if a.seed is None:
a.seed = random.randint(0, 2 ** 31 - 1)
tf.set_random_seed(a.seed)
np.random.seed(a.seed)
random.seed(a.seed)
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
for k, v in a._get_kwargs():
print(k, "=", v)
with open(os.path.join(a.output_dir, "options.json"), "w") as f:
f.write(json.dumps(vars(a), sort_keys=True, indent=4))
examples = load_examples()
print("examples count = %d" % examples.count)
# inputs and targets are [batch_size, height, width, channels]
model = create_train_model(examples.inputs, examples.targets)
# undo colorization splitting on images that we use for display/output
if a.lab_colorization:
if a.which_direction == "AtoB":
# inputs is brightness, this will be handled fine as a grayscale image
# need to augment targets and outputs with brightness
targets = augment(examples.targets, examples.inputs)
outputs = augment(model.outputs, examples.inputs)
# inputs can be deprocessed normally and handled as if they are single channel
# grayscale images
inputs = deprocess(examples.inputs)
elif a.which_direction == "BtoA":
# inputs will be color channels only, get brightness from targets
inputs = augment(examples.inputs, examples.targets)
targets = deprocess(examples.targets)
outputs = deprocess(model.outputs)
else:
raise Exception("invalid direction")
else:
if a.mode.startswith("tex"):
inputs = deprocess(examples.inputs, mu_texture, std_texture)
targets = deprocess(examples.targets, mu_texture, std_texture)
outputs = deprocess(model.outputs, mu_texture, std_texture)
else:
inputs1 = deprocess(examples.inputs[:, :, :, :3], mu_normal, std_normal)
inputs2 = deprocess(examples.inputs[:, :, :, 3:], mu_texture, std_texture)
targets = deprocess(examples.targets, mu_normal, std_normal)
outputs = deprocess(model.outputs, mu_normal, std_normal)
inputs = tf.concat([inputs1, inputs2], axis=2)
def convert(image):
return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)
# reverse any processing on images so they can be written to disk or displayed to user
with tf.name_scope("convert_inputs"):
converted_inputs = convert(inputs)
with tf.name_scope("convert_targets"):
converted_targets = convert(targets)
with tf.name_scope("convert_outputs"):
converted_outputs = convert(outputs)
with tf.name_scope("encode_images"):
display_fetches = {
"paths": examples.paths,
"inputs": tf.map_fn(
tf.image.encode_png,
converted_inputs,
dtype=tf.string,
name="input_pngs",
),
"targets": tf.map_fn(
tf.image.encode_png,
converted_targets,
dtype=tf.string,
name="target_pngs",
),
"outputs": tf.map_fn(
tf.image.encode_png,
converted_outputs,
dtype=tf.string,
name="output_pngs",
),
}
# summaries
with tf.name_scope("inputs_summary"):
tf.summary.image("inputs", converted_inputs)
with tf.name_scope("targets_summary"):
tf.summary.image("targets", converted_targets)
with tf.name_scope("outputs_summary"):
tf.summary.image("outputs", converted_outputs)
with tf.name_scope("predict_real_summary"):
tf.summary.image(
"predict_real",
tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8),
)
with tf.name_scope("predict_fake_summary"):
tf.summary.image(
"predict_fake",
tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8),
)
tf.summary.scalar("discriminator_loss", model.discrim_loss)
tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)
if a.mode.startswith("tex"):
tf.summary.scalar("generator_loss_tv", model.gen_loss_tv)
else:
tf.summary.scalar("generator_loss_cos", model.gen_loss_cos)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name + "/values", var)
for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
tf.summary.histogram(var.op.name + "/gradients", grad)
with tf.name_scope("parameter_count"):
parameter_count = tf.reduce_sum(
[tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()]
)
saver = tf.train.Saver(max_to_keep=0)
logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
with sv.managed_session() as sess:
print("parameter_count =", sess.run(parameter_count))
if a.checkpoint is not None:
print("loading model from checkpoint")
checkpoint = tf.train.latest_checkpoint(a.checkpoint)
saver.restore(sess, checkpoint)
max_steps = 2 ** 32
if a.max_epochs is not None:
max_steps = examples.steps_per_epoch * a.max_epochs
if a.max_steps is not None:
max_steps = a.max_steps
# training
start = time.time()
for step in range(max_steps):
def should(freq):
return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)
options = None
run_metadata = None
if should(a.trace_freq):
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
fetches = {
"train": model.train,
"global_step": sv.global_step,
}
if should(a.progress_freq):
fetches["discrim_loss"] = model.discrim_loss
fetches["gen_loss_GAN"] = model.gen_loss_GAN
fetches["gen_loss_L1"] = model.gen_loss_L1
if a.mode.startswith("tex"):
fetches["gen_loss_tv"] = model.gen_loss_tv
else:
fetches["gen_loss_cos"] = model.gen_loss_cos
if should(a.summary_freq):
fetches["summary"] = sv.summary_op
if should(a.display_freq):
fetches["display"] = display_fetches
results = sess.run(fetches, options=options, run_metadata=run_metadata)
if should(a.summary_freq):
print("recording summary")
sv.summary_writer.add_summary(
results["summary"], results["global_step"]
)
if should(a.display_freq):
print("saving display images")
filesets = save_images(results["display"], step=results["global_step"])
append_index(filesets, step=True)
if should(a.trace_freq):
print("recording trace")
sv.summary_writer.add_run_metadata(
run_metadata, "step_%d" % results["global_step"]
)
if should(a.progress_freq):
# global_step will have the correct step count if we resume from a checkpoint
train_epoch = math.ceil(
results["global_step"] / examples.steps_per_epoch
)
train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
rate = (step + 1) * a.batch_size / (time.time() - start)
remaining = (max_steps - step) * a.batch_size / rate
print(
"progress epoch %d step %d image/sec %0.1f remaining %dm"
% (train_epoch, train_step, rate, remaining / 60)
)
print("discrim_loss", results["discrim_loss"])
print("gen_loss_GAN", results["gen_loss_GAN"])
print("gen_loss_L1", results["gen_loss_L1"])
if a.mode.startswith("tex"):
print("gen_loss_tv", results["gen_loss_tv"])
else:
print("gen_loss_cos", results["gen_loss_cos"])
if should(a.save_freq):
print("saving model")
saver.save(
sess,
os.path.join(a.output_dir, "model"),
global_step=sv.global_step,
)
if sv.should_stop():
break
def freeze():
if a.mode.startswith("tex"):
input_image = tf.placeholder(
tf.float32, shape=[1, END_Y - START_Y, END_X - START_X, 3], name="inputs"
)
input_norm = normalize(input_image)
else:
input_tex = tf.placeholder(
tf.float32,
shape=[1, END_Y - START_Y, END_X - START_X, 3],
name="texture_inputs",
)
input_normal = tf.placeholder(
tf.float32,
shape=[1, END_Y - START_Y, END_X - START_X, 3],
name="normal_inputs",
)
input_tex_norm = normalize(input_tex, mu_texture, std_texture)
input_normal_norm = normalize(input_normal, mu_normal, std_normal)
input_norm = tf.concat([input_normal_norm, input_tex_norm], axis=3)
model = create_test_model(input_norm)
if a.mode.startswith("tex"):
output_image = denormalize(model.outputs)
else:
output_image = denormalize(model.outputs, mu_normal, std_normal)
output_image = tf.clip_by_value(output_image, 0, 255)
output_image = tf.identity(output_image, name="outputs")
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if a.checkpoint is not None:
print("loading model from checkpoint")
saver.restore(sess, a.checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, tf.get_default_graph().as_graph_def(), ["outputs"]
)
with tf.gfile.GFile(a.pb_path, "wb") as fp:
fp.write(output_graph_def.SerializeToString())
print("%d ops in the final graph" % len(output_graph_def.node))
def test_texture():
if os.path.exists(a.output_dir) is False:
os.makedirs(a.output_dir)
start = time.time()
base_uv = Image.open(a.base_tex_path)
base_uv = np.asarray(base_uv, np.float32)
with tf.Graph().as_default():
graph_def = tf.GraphDef()
with open(a.pb_path, "rb") as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
end1 = time.time()
with tf.Session() as sess:
tf.initialize_all_variables().run()
input_x = sess.graph.get_tensor_by_name("inputs:0")
output_x = sess.graph.get_tensor_by_name("outputs:0")
for path in glob.glob(os.path.join(a.input_dir, "*D*.png")):
print(path)
img = np.array(Image.open(path)).astype(np.float32)
crop_img = img[START_Y:END_Y, START_X:END_X]
crop_img = np.expand_dims(crop_img, 0)
result = sess.run(output_x, {input_x: crop_img})
result = np.clip(result, 0, 255)
start2 = time.time()
result = sess.run(output_x, {input_x: crop_img})
end2 = time.time()
result = result[0]
mask = np.zeros_like(img)
mask[START_Y:END_Y, START_X:END_X] = 1.0
face_tex = np.zeros_like(img)
face_tex[START_Y:END_Y, START_X:END_X] = result
img = blend_uv(
base_uv / 255, face_tex / 255, mask, match_color=True, times=7
)
img = img * 255
result = Image.fromarray(img.astype(np.uint8))
result.save(os.path.join(a.output_dir, path.split("/")[-1]))
def test_normal():
if os.path.exists(a.output_dir) is False:
os.makedirs(a.output_dir)
base_normal = Image.open(a.base_normal_path)
base_normal = np.asarray(base_normal, np.float32)
with tf.Graph().as_default():
graph_def = tf.GraphDef()
with open(a.pb_path, "rb") as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
with tf.Session() as sess:
tf.initialize_all_variables().run()
texture_input_x = sess.graph.get_tensor_by_name("texture_inputs:0")
normal_input_x = sess.graph.get_tensor_by_name("normal_inputs:0")
output_x = sess.graph.get_tensor_by_name("outputs:0")
# refined texture paths, fitted normal paths
texture_paths = sorted(glob.glob(os.path.join(a.output_dir, "*D.png")))
normal_paths = sorted(glob.glob(os.path.join(a.input_dir, "*N.png")))
for tex_path, norm_path in zip(texture_paths, normal_paths):
print(tex_path, norm_path)
tex_img = np.array(Image.open(tex_path)).astype(np.float32)
crop_tex_img = tex_img[START_Y:END_Y, START_X:END_X]
crop_tex_img = np.expand_dims(crop_tex_img, 0)
norm_img = np.array(Image.open(norm_path)).astype(np.float32)
crop_norm_img = norm_img[START_Y:END_Y, START_X:END_X]
crop_norm_img = np.expand_dims(crop_norm_img, 0)
result = sess.run(
output_x,
{texture_input_x: crop_tex_img, normal_input_x: crop_norm_img},
)
result = np.clip(result, 0, 255)
result = result[0]
mask = np.zeros_like(norm_img)
mask[START_Y:END_Y, START_X:END_X] = 1.0
face_norm = np.zeros_like(norm_img)
face_norm[START_Y:END_Y, START_X:END_X] = result
norm_img = blend_uv(
base_normal / 255, face_norm / 255, mask, match_color=False
)
norm_img = norm_img * 255
result = Image.fromarray(norm_img.astype(np.uint8))
result.save(os.path.join(a.output_dir, norm_path.split("/")[-1]))
if __name__ == "__main__":
if a.func == "train":
train()
elif a.func == "freeze":
freeze()
elif a.func == "test":
if a.mode.startswith("tex"):
test_texture()
else:
test_normal()
|
the-stack_106_16236
|
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for the scripts.pre_commit_linter module.
Do not use this module anywhere else in the code base!
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import functools
import inspect
import threading
import python_utils
def memoize(func):
"""Decorator which provides thread-safe, cached-access to the return values
of function calls.
NOTE: This function uses dicts to manage the cache. This means that all
values provided as arguments to func *must be hashable!*
Args:
func: callable.
Returns:
callable. The same func, but calls to it using the same arguments are
made exactly once.
"""
key_locks = {}
lock_for_key_locks = threading.Lock()
def threadsafe_access(key):
"""Returns a threading.Lock unique to the given key.
Args:
key: *. A hashable value.
Returns:
threading.Lock. A lock unique to the given key.
"""
# Use double-checked locking to prevent race-conditions.
if key not in key_locks:
with lock_for_key_locks:
if key not in key_locks:
key_locks[key] = threading.Lock()
return key_locks[key]
cache = {}
def get_from_cache(key, factory):
"""Returns and associates a factory-provided value to the given key if a
value isn't associated to it yet. Otherwise, returns the pre-existing
associated value.
Args:
key: *. A hashable value.
factory: callable. A value producer that takes no arguments.
Returns:
*. The result of factory(), or the last value to be associated to
key.
"""
if key in cache:
return cache[key]
with threadsafe_access(key):
if key not in cache:
cache[key] = factory()
return cache[key]
# In order to allow calls to functions with default arguments to use the
# same hash as calls which explicitly supply them, we fetch those default
# values and use them to build the kwargs that func will actually see.
arg_names, _, _, defaults = inspect.getargspec(func)
defaults = defaults if defaults is not None else ()
default_func_kwargs = dict(
python_utils.ZIP(arg_names[-len(defaults):], defaults))
@functools.wraps(func)
def memoized_func(*args, **kwargs):
"""The same func, but calls to it using the same argument values are
made exactly once.
Returns:
The value of func(*args, **kwargs).
"""
func_kwargs = default_func_kwargs.copy()
func_kwargs.update(kwargs)
key = (tuple(args), tuple(sorted(func_kwargs.items())))
return get_from_cache(key, factory=lambda: func(*args, **kwargs))
return memoized_func
|
the-stack_106_16238
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.glance.common.constants import Messages
from cloudroast.glance.fixtures import ImagesFixture
class GetImagesSchema(ImagesFixture):
def test_get_images_schema(self):
"""
@summary: Get images json schema
1) Retrieve the images schema json resp file
2) Get images json schema
3) Verify the response status code is 200
4) Verify that the response body contain the expected images schema as
compared to the images schema json file
"""
with open(self.images.config.images_schema_json, 'r') as DATA:
images_schema_resp = DATA.read().rstrip()
resp = self.images.client.get_images_schema()
self.assertEqual(
resp.status_code, 200,
Messages.STATUS_CODE_MSG.format(200, resp.status_code))
self.assertEqual(resp.content, images_schema_resp)
|
the-stack_106_16239
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 00:08:33 2020
@author: hiroyasu
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
def grad_U(Ui,Yij,Vj,mu,ai,bj,reg,eta):
return eta*(reg*Ui-((Yij-mu)-(Ui@Vj+ai+bj))*Vj)
def grad_V(Ui,Yij,Vj,mu,ai,bj,reg,eta):
return eta*(reg*Vj-((Yij-mu)-(Ui@Vj+ai+bj))*Ui)
def grad_a(Ui,Yij,Vj,mu,ai,bj,reg,eta):
return eta*(reg*ai-((Yij-mu)-(Ui@Vj+ai+bj)))
def grad_b(Ui,Yij,Vj,mu,ai,bj,reg,eta):
return eta*(reg*bj-((Yij-mu)-(Ui@Vj+ai+bj)))
def get_err(U,V,Y,mu,a,b,idxb,reg=0.0):
error = 0
N = Y.shape[0]
for k in range(N):
i = Y[k,0]-1
j = Y[k,1]-1
if idxb == 1:
Yij = Y[k,2]-mu
Yij_hat = U[i,:]@V[j,:]+a[i]+b[j]
else:
Yij = Y[k,2]
Yij_hat = U[i,:]@V[j,:]
error += (Yij-Yij_hat)**2/2
error = error/N
return error
def train_model(M,N,K,eta,reg,Y,mu,idxb,eps=0.0001,max_epochs=300):
U = np.random.uniform(-0.5,0.5,(M,K))
V = np.random.uniform(-0.5,0.5,(N,K))
if idxb == 1:
a = np.random.uniform(-0.5,0.5,M)
b = np.random.uniform(-0.5,0.5,N)
else:
a = np.zeros(M)
b = np.zeros(N)
mu = 0
DataNum = Y.shape[0]
e = eps
err_tm1 = get_err(U,V,Y,mu,a,b,idxb)
eta_da = 0
eta_db = 0
for epoch in range(max_epochs):
for k in range(DataNum):
i = Y[k,0]-1
j = Y[k,1]-1
Yij = Y[k,2]
if idxb ==1:
eta_da = grad_a(U[i,:],Yij,V[j,:],mu,a[i],b[j],reg,eta)
eta_du = grad_U(U[i,:],Yij,V[j,:],mu,a[i],b[j],reg,eta)
if idxb ==1:
eta_db = grad_b(U[i,:],Yij,V[j,:],mu,a[i],b[j],reg,eta)
eta_dv = grad_V(U[i,:],Yij,V[j,:],mu,a[i],b[j],reg,eta)
U[i,:] = U[i,:]-eta_du
a[i] = a[i]-eta_da
V[j,:] = V[j,:]-eta_dv
b[i] = b[i]-eta_db
err_t = get_err(U,V,Y,mu,a,b,idxb)
Deltm1t = err_tm1-err_t
if epoch == 0:
Del01 = err_tm1-err_t
if Deltm1t/Del01 < e:
print(Deltm1t/Del01)
break
err_tm1 = err_t
return U,V,a,b,err_t
def GetErrors(M,N,K,Y_train,Y_test,mu,idxb):
regs = [0,10**-4,10**-3,10**-2,10**-1,1]
etas = [0.01,0.03,0.06,0.09]
E_ins = []
E_outs = []
for eta in etas:
E_ins_for_lambda = []
E_outs_for_lambda = []
for reg in regs:
print("Training model with M = %s,N = %s,k = %s,eta = %s,reg = %s"%(M,N,K,eta,reg))
U,V,a,b,e_in = train_model(M,N,K,eta,reg,Y_train,mu,idxb)
E_ins_for_lambda.append(e_in)
eout = get_err(U,V,Y_test,mu,a,b,idxb)
E_outs_for_lambda.append(eout)
E_ins.append(E_ins_for_lambda)
E_outs.append(E_outs_for_lambda)
return regs,etas,E_ins,E_outs
def SaveDict(filename,var):
output = open(filename,'wb')
pickle.dump(var,output)
output.close()
pass
def LoadDict(filename):
pkl_file = open(filename,'rb')
varout = pickle.load(pkl_file)
pkl_file.close()
return varout
if __name__ == "__main__":
df = pd.read_csv('data/movies.csv',index_col=0)
M = 943
N = 1682
Y_train = np.loadtxt('data/train.txt',dtype=int)
mu = np.mean(Y_train[:,2])
Y_test = np.loadtxt('data/test.txt',dtype=int)
K = 20
'''
regs,etas,E_ins1,E_outs1 = GetErrors(M,N,K,Y_train,Y_test,mu,0)
SaveDict('data/regs1.pkl',regs)
SaveDict('data/etas1.pkl',etas)
SaveDict('data/E_ins1.pkl',E_ins1)
SaveDict('data/E_outs1.pkl',E_outs1)
regs,etas,E_ins2,E_outs2 = GetErrors(M,N,K,Y_train,Y_test,mu,1)
SaveDict('data/regs2.pkl',regs)
SaveDict('data/etas2.pkl',etas)
SaveDict('data/E_ins2.pkl',E_ins2)
SaveDict('data/E_outs2.pkl',E_outs2)
'''
U,V,a,b,e_in = train_model(M,N,K,0.01,0.1,Y_train,mu,0)
eout = get_err(U,V,Y_test,mu,a,b,0)
np.save('data/U1.npy',U)
np.save('data/V1.npy',V)
U,V,a,b,e_in = train_model(M,N,K,0.03,0.1,Y_train,mu,1)
eout = get_err(U,V,Y_test,mu,a,b,1)
np.save('data/U2.npy',U)
np.save('data/V2.npy',V)
it = 5
i = Y_test[it,0]-1
j = Y_test[it,1]-1
y1 = Y_test[it,2]
y2 = U[i,:]@V[j,:]+a[i]+b[j]+mu
print(y1)
print(y2)
|
the-stack_106_16244
|
# -*- coding: UTF-8 -*-
import logging
import os
import re
from typing import Any, Dict, List, Optional, Text
from rasa_nlu import utils
from rasa_nlu.featurizers import Featurizer
from rasa_nlu.training_data import Message
from rasa_nlu.components import Component
from rasa_nlu.model import Metadata
from rasa_nlu.training_data.training_data import TrainingData
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.tokenizers import jieba_tokenizer
import jieba
from bert_serving.client import ConcurrentBertClient
import numpy as np
from tqdm import tqdm
logger = logging.getLogger(__name__)
class BertFeaturizer(Featurizer):
provides = []
requires = []
defaults = {
"ip": 'localhost',
"port": '8125',
"port_out": '5556',
"show_server_config": False,
"output_fmt": 'ndarray',
"check_version": False,
"identity": None,
"batch_size": 128
}
language_list = None
def __init__(self, component_config):
super(BertFeaturizer, self).__init__(component_config)
ip = self.component_config['ip']
port = self.component_config['port']
port_out = self.component_config['port_out']
show_server_config = self.component_config['show_server_config']
output_fmt = self.component_config['output_fmt']
check_version = self.component_config['check_version']
timeout = self.component_config['timeout']
identity = self.component_config['identity']
self.concurrent_bertClient = ConcurrentBertClient(
ip = ip,
port = int(port),
port_out = int(port_out),
show_server_config = show_server_config,
output_fmt = output_fmt,
check_version = check_version,
timeout = timeout,
identity = identity,
check_length= False
)
@classmethod
def required_packages(cls) -> List[Text]:
return ["numpy", "bert_serving"]
@classmethod
def load(cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any
) -> "Component":
return cls(meta)
def _get_message_text(self, messages):
# all_tokens = [message.data['tokens'] for message in messages]
all_tokens = [list(jieba.cut(message.text)) for message in messages]
bert_embedding = self.concurrent_bertClient.encode(all_tokens, is_tokenized=True)
return np.squeeze(bert_embedding)
def train(self, training_data: TrainingData, cfg: RasaNLUModelConfig = None, **kwargs: Any) -> None:
batch_size = self.component_config['batch_size']
epochs = len(training_data.intent_examples) // batch_size + \
int(len(training_data.intent_examples) % batch_size > 0)
for ep in tqdm(range(epochs), desc="Epochs"):
end_index = (ep+1) * batch_size
start_index = ep * batch_size
examples = training_data.intent_examples[start_index: end_index]
tokens = self._get_message_text(examples)
X = np.array(tokens)
for index, example in enumerate(examples):
example.set("text_features", self._combine_with_existing_text_features(example, X[index]))
def process(self, message: Message, **kwargs) -> None:
features = self._get_message_text([message])
message.set("text_features", self._combine_with_existing_text_features(message, features))
|
the-stack_106_16245
|
import unittest
from pathlib import Path
import pandas as pd
import json
import lusid
import lusid.models as models
from lusidfeature import lusid_feature
from lusidtools import cocoon as cocoon
from lusidtools.cocoon.utilities import create_scope_id
import datetime
from dateutil.tz import tzutc
import logging
logger = logging.getLogger()
class CocoonTestPortfolioGroup(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.portfolio_scope = create_scope_id()
secrets_file = Path(__file__).parent.parent.parent.joinpath("secrets.json")
cls.api_factory = lusid.utilities.ApiClientFactory(
api_secrets_filename=secrets_file
)
cls.unique_portfolios = pd.read_csv(
Path(__file__).parent.joinpath(
"data/port_group_tests/test_1_pg_create_with_portfolios.csv"
)
)["FundCode"].tolist()
def create_portfolio_model(code):
model = models.CreateTransactionPortfolioRequest(
display_name=code,
code=code,
base_currency="GBP",
description="Paper transaction portfolio",
created="2020-02-25T00:00:00Z",
)
return model
try:
for code in cls.unique_portfolios:
cls.api_factory.build(
lusid.api.TransactionPortfoliosApi
).create_portfolio(
scope=cls.portfolio_scope,
create_transaction_portfolio_request=create_portfolio_model(code),
)
except lusid.exceptions.ApiException as e:
if e.status == 404:
logger.error(f"The portfolio {code} already exists")
def log_error_requests_title(cls, domain, responses):
if len(responses.get(domain, {}).get("errors", [])) > 0:
for error in responses[domain]["errors"]:
return logger.error(json.loads(error.body)["title"])
def csv_to_data_frame_with_scope(cls, csv, scope):
data_frame = pd.read_csv(Path(__file__).parent.joinpath(csv))
data_frame["Scope"] = scope
return data_frame
def cocoon_load_from_dataframe(
cls,
scope,
data_frame,
mapping_optional={"values.scope": "Scope", "values.code": "FundCode",},
property_columns=[],
properties_scope=None,
):
return cocoon.cocoon.load_from_data_frame(
api_factory=cls.api_factory,
scope=scope,
data_frame=data_frame,
mapping_required={
"code": "PortGroupCode",
"display_name": "PortGroupDisplayName",
},
mapping_optional=mapping_optional,
file_type="portfolio_group",
property_columns=property_columns,
properties_scope=properties_scope,
)
@lusid_feature("T5-1")
def test_01_pg_create_with_portfolios(self) -> None:
"""
Test description:
------------------
Here we test adding multiple new portfolio groups with multiple portfolios.
Expected outcome:
-----------------
We expect one successful request/response per portfolio group with multiple portfolios.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_1_pg_create_with_portfolios.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame
)
self.log_error_requests_title("portfolio_groups", responses)
# Test that there is a successful request per line in the dataframe
self.assertEqual(
len(
[
port_group
for nested_group in [
port_group.portfolios
for port_group in responses["portfolio_groups"]["success"]
]
for port_group in nested_group
]
),
len(data_frame),
)
# Test that all the portfolios in the dataframe are in the request
self.assertEqual(
first=sorted(
[
code.to_dict()
for code in responses["portfolio_groups"]["success"][0].portfolios
],
key=lambda item: item.get("code"),
),
second=sorted(
[
lusid.models.ResourceId(
code=data_frame["FundCode"][1], scope=data_frame["Scope"][1]
).to_dict(),
lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=data_frame["Scope"][0]
).to_dict(),
],
key=lambda item: item.get("code"),
),
)
self.assertEqual(
first=responses["portfolio_groups"]["success"][1].portfolios,
second=[
lusid.models.ResourceId(
code=data_frame["FundCode"][2], scope=data_frame["Scope"][2]
)
],
)
@lusid_feature("T5-2")
def test_02_pg_create_with_no_portfolio(self) -> None:
"""
Test description:
-----------------
Here we test adding one new portfolio group with no portfolios.
Expected outcome:
-----------------
We expect one successful new portfolio group with no portfolios.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_2_pg_create_with_no_portfolio.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame, mapping_optional={}
)
self.log_error_requests_title("portfolio_groups", responses)
# Check that the portfolio group is created
self.assertEqual(
first=len(
[
port_group._id
for port_group in responses["portfolio_groups"]["success"]
]
),
second=len(data_frame),
)
# Check that the correct portfolio group code is used
self.assertEqual(
first=responses["portfolio_groups"]["success"][0].id,
second=lusid.models.ResourceId(
scope=test_case_scope, code=data_frame["PortGroupCode"].tolist()[0]
),
)
# Check that the portfolio group request has now portfolios
self.assertTrue(
len(responses["portfolio_groups"]["success"][0].portfolios) == 0
)
@lusid_feature("T5-3")
def test_03_pg_create_multiple_groups_no_portfolio(self) -> None:
"""
Test description:
-----------------
Here we test adding multiple new portfolio group with no portfolios.
Expected outcome
-----------------
We expect successful requests/responses for multiple new portfolio groups.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_3_pg_create_multiple_groups_no_portfolio.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame, mapping_optional={}
)
self.log_error_requests_title("portfolio_groups", responses)
# Check that there is a requet per portfolio group
self.assertEqual(
first=len(
[
port_group._id
for port_group in responses["portfolio_groups"]["success"]
]
),
second=len(data_frame),
)
# Check that the portfolio group code matches the code in the dataframe
self.assertEqual(
first=responses["portfolio_groups"]["success"][1].id,
second=lusid.models.ResourceId(
scope=test_case_scope, code=data_frame["PortGroupCode"].tolist()[1]
),
)
@lusid_feature("T5-4")
def test_04_pg_create_with_portfolio_not_exist(self):
"""
Test description:
-----------------
Here we test attempting to add a portfolio which does not exist to a portfolio group.
Expected outcome:
-----------------
We expect the entire request to fail with a PortfolioNotFound error.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_4_pg_create_with_portfolio_not_exist.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame
)
self.log_error_requests_title("portfolio_groups", responses)
# Check that LUSID cannot find the portfolio
self.assertEqual(
json.loads(responses["portfolio_groups"]["errors"][0].body)["name"],
"PortfolioNotFound",
)
# Check there are no successful requests
self.assertEqual(len(responses["portfolio_groups"]["success"]), 0)
@lusid_feature("T5-5")
def test_05_pg_create_with_duplicate_portfolios(self):
"""
Test description:
-----------------
Here we test attempting to add two of the same portfolios to a portfolio group.
Expected result:
----------------
We expect that each unique portfolio gets added and duplicates should be ignored.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_5_pg_create_with_duplicate_portfolios.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame
)
self.log_error_requests_title("portfolio_groups", responses)
data_frame.drop_duplicates(inplace=True)
# Check that there is a request for each unique portfolio
self.assertEqual(
len(
[
port_group
for nested_group in [
port_group.portfolios
for port_group in responses["portfolio_groups"]["success"]
]
for port_group in nested_group
]
),
len(data_frame),
)
# Check that a request is generated with unqiue portfolio only
self.assertEqual(
first=sorted(
[
code.to_dict()
for code in responses["portfolio_groups"]["success"][0].portfolios
],
key=lambda item: item.get("code"),
),
second=sorted(
[
lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=data_frame["Scope"][0]
).to_dict(),
lusid.models.ResourceId(
code=data_frame["FundCode"][1], scope=data_frame["Scope"][1]
).to_dict(),
],
key=lambda item: item.get("code"),
),
)
@lusid_feature("T5-6")
def test_06_pg_create_duplicate_port_group(self):
"""
Test description:
-----------------
Here we test create two of the same portfolio groups
Expected results
-----------------
We expect one successful requesy for the portfolio group
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_6_pg_create_duplicate_port_group.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame, mapping_optional={}
)
self.log_error_requests_title("portfolio_groups", responses)
# Check for one successful request
self.assertEqual(len(responses["portfolio_groups"]["success"]), 1)
# Check the successful request has same code as dataframe portfolio group
self.assertEqual(
first=responses["portfolio_groups"]["success"][0].id,
second=lusid.models.ResourceId(
scope=test_case_scope, code=data_frame["PortGroupCode"].tolist()[0]
),
)
@lusid_feature("T5-7")
def test_07_pg_create_with_properties(self) -> None:
"""
Test description:
-----------------
Here we test creating a portfolio group with properties.
Expected output:
----------------
The response contains the upserted properties.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_7_pg_create_with_properties.csv",
self.portfolio_scope,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope,
data_frame=data_frame,
property_columns=["location", "MifidFlag"],
properties_scope=test_case_scope,
)
self.log_error_requests_title("portfolio_groups", responses)
response_with_properties = self.api_factory.build(
lusid.api.PortfolioGroupsApi
).get_group_properties(
scope=test_case_scope, code=data_frame["PortGroupCode"].tolist()[0],
)
self.assertEqual(
{
"PortfolioGroup/"
+ test_case_scope
+ "/location": lusid.models.ModelProperty(
key="PortfolioGroup/" + test_case_scope + "/location",
value=lusid.models.PropertyValue(label_value="UK"),
effective_from=datetime.datetime.min.replace(tzinfo=tzutc()),
effective_until=datetime.datetime.max.replace(tzinfo=tzutc()),
),
"PortfolioGroup/"
+ test_case_scope
+ "/MifidFlag": lusid.models.ModelProperty(
key="PortfolioGroup/" + test_case_scope + "/MifidFlag",
value=lusid.models.PropertyValue(label_value="Y"),
effective_from=datetime.datetime.min.replace(tzinfo=tzutc()),
effective_until=datetime.datetime.max.replace(tzinfo=tzutc()),
),
},
response_with_properties.properties,
)
@lusid_feature("T5-8")
def test_08_pg_add_bad_portfolio(self):
"""
Description:
------------
Here we test add a portfolio which does not exist to a current portfolio group.
Expected results:
-----------------
The portfolio group is returned without the portfolios added.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_8_pg_add_bad_portfolio.csv",
self.portfolio_scope,
)
# Create the portfolio group as a seperate request
port_group_request = lusid.models.CreatePortfolioGroupRequest(
code=data_frame["PortGroupCode"][0],
display_name=data_frame["PortGroupCode"][0],
)
self.api_factory.build(lusid.api.PortfolioGroupsApi).create_portfolio_group(
scope=test_case_scope, create_portfolio_group_request=port_group_request
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame
)
self.log_error_requests_title("portfolio_groups", responses)
self.assertTrue(len(responses["portfolio_groups"]["errors"]) == 0)
self.assertEqual(
first=responses["portfolio_groups"]["success"][0].id,
second=lusid.models.ResourceId(
scope=test_case_scope, code=data_frame["PortGroupCode"].tolist()[0]
),
)
@lusid_feature("T5-9")
def test_09_pg_add_duplicate_portfolio(self) -> None:
"""
Description:
------------
Here we test adding duplicate portfolios to a portfolio group.
Expected outcome:
-----------------
We expect the one portfolio to be added.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_9_pg_add_duplicate_portfolio.csv",
self.portfolio_scope,
)
# Create the portfolio group as a seperate request
port_group_request = lusid.models.CreatePortfolioGroupRequest(
code=data_frame["PortGroupCode"][0],
display_name=data_frame["PortGroupCode"][0],
)
self.api_factory.build(lusid.api.PortfolioGroupsApi).create_portfolio_group(
scope=test_case_scope, create_portfolio_group_request=port_group_request
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame
)
self.log_error_requests_title("portfolio_groups", responses)
self.assertEqual(
first=responses["portfolio_groups"]["success"][0].portfolios[0],
second=lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=data_frame["Scope"][0]
),
)
@lusid_feature("T5-10")
def test_10_pg_add_no_new_portfolio(self) -> None:
"""
Test description:
------------
Here we test adding an existing portfolio to portfolio group.
Expected result:
----------------
The portfolio group response should be returned with one unmodified portfolio.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_10_pg_add_no_new_portfolio.csv",
self.portfolio_scope,
)
port_group_request = lusid.models.CreatePortfolioGroupRequest(
code=data_frame["PortGroupCode"][0],
display_name=data_frame["PortGroupCode"][0],
values=[
lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=self.portfolio_scope
)
],
)
self.api_factory.build(lusid.api.PortfolioGroupsApi).create_portfolio_group(
scope=test_case_scope, create_portfolio_group_request=port_group_request,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame,
)
self.log_error_requests_title("portfolio_groups", responses)
self.assertEqual(
first=responses["portfolio_groups"]["success"][0].portfolios[0],
second=lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=data_frame["Scope"][0]
),
)
@lusid_feature("T5-11")
def test_11_pg_add_bad_and_good_portfolios(self):
"""
Test description:
-----------------
Here we test updating a portfolio group with good and bad portfolios.
Expected result:
-----------------
Good portfolios should be added and bad ones not added.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_11_pg_add_bad_and_good_portfolios.csv",
self.portfolio_scope,
)
# Create the portfolio group as a seperate request
port_group_request = lusid.models.CreatePortfolioGroupRequest(
code=data_frame["PortGroupCode"][0],
display_name=data_frame["PortGroupCode"][0],
)
self.api_factory.build(lusid.api.PortfolioGroupsApi).create_portfolio_group(
scope=test_case_scope, create_portfolio_group_request=port_group_request
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame
)
self.log_error_requests_title("portfolio_groups", responses)
remove_dupe_df = data_frame[~data_frame["FundCode"].str.contains("BAD_PORT")]
self.assertEqual(
first=sorted(
[
code.to_dict()
for code in responses["portfolio_groups"]["success"][0].portfolios
],
key=lambda item: item.get("code"),
),
second=sorted(
[
lusid.models.ResourceId(
code=remove_dupe_df["FundCode"].tolist()[0],
scope=self.portfolio_scope,
).to_dict(),
lusid.models.ResourceId(
code=remove_dupe_df["FundCode"].tolist()[1],
scope=self.portfolio_scope,
).to_dict(),
],
key=lambda item: item.get("code"),
),
)
@lusid_feature("T5-12")
def test_12_pg_add_portfolios_different_scopes(self) -> None:
"""
Test description:
-----------------
Here we test adding portfolios with multiple scopes.
Expected outcome:
-----------------
Request should be successful - returned with portfolios with multiple scopes.
"""
test_case_scope = create_scope_id()
data_frame = self.csv_to_data_frame_with_scope(
"data/port_group_tests/test_12_pg_add_portfolios_different_scopes.csv",
self.portfolio_scope,
)
port_scope_for_test = create_scope_id()
self.api_factory.build(lusid.api.TransactionPortfoliosApi).create_portfolio(
scope=port_scope_for_test,
create_transaction_portfolio_request=models.CreateTransactionPortfolioRequest(
display_name=data_frame["FundCode"][0],
code=data_frame["FundCode"][0],
base_currency="GBP",
),
)
port_group_request = lusid.models.CreatePortfolioGroupRequest(
code=data_frame["PortGroupCode"][0],
display_name=data_frame["PortGroupCode"][0],
values=[
lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=port_scope_for_test
)
],
)
self.api_factory.build(lusid.api.PortfolioGroupsApi).create_portfolio_group(
scope=test_case_scope, create_portfolio_group_request=port_group_request,
)
responses = self.cocoon_load_from_dataframe(
scope=test_case_scope, data_frame=data_frame,
)
self.log_error_requests_title("portfolio_groups", responses)
self.assertTrue(
expr=all(
[
id in responses["portfolio_groups"]["success"][0].portfolios
for id in [
lusid.models.ResourceId(
code=data_frame["FundCode"][1], scope=data_frame["Scope"][1]
),
lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=port_scope_for_test
),
lusid.models.ResourceId(
code=data_frame["FundCode"][0], scope=data_frame["Scope"][0]
),
lusid.models.ResourceId(
code=data_frame["FundCode"][2], scope=data_frame["Scope"][2]
),
]
]
)
)
|
the-stack_106_16247
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A DDPG Agent.
Implements the Deep Deterministic Policy Gradient (DDPG) algorithm from
"Continuous control with deep reinforcement learning" - Lilicrap et al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import tensorflow as tf
from tf_agents.agents import tf_agent
from tf_agents.policies import actor_policy
from tf_agents.policies import ou_noise_policy
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
class DdpgInfo(collections.namedtuple(
'DdpgInfo', ('actor_loss', 'critic_loss'))):
pass
@gin.configurable
class DdpgAgent(tf_agent.TFAgent):
"""A DDPG Agent."""
def __init__(self,
time_step_spec,
action_spec,
actor_network,
critic_network,
actor_optimizer=None,
critic_optimizer=None,
ou_stddev=1.0,
ou_damping=1.0,
target_update_tau=1.0,
target_update_period=1,
dqda_clipping=None,
td_errors_loss_fn=None,
gamma=1.0,
reward_scale_factor=1.0,
gradient_clipping=None,
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=None,
name=None):
"""Creates a DDPG Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
actor_network: A tf_agents.network.Network to be used by the agent. The
network will be called with call(observation, step_type).
critic_network: A tf_agents.network.Network to be used by the agent. The
network will be called with call(observation, action, step_type).
actor_optimizer: The optimizer to use for the actor network.
critic_optimizer: The optimizer to use for the critic network.
ou_stddev: Standard deviation for the Ornstein-Uhlenbeck (OU) noise added
in the default collect policy.
ou_damping: Damping factor for the OU noise added in the default collect
policy.
target_update_tau: Factor for soft update of the target networks.
target_update_period: Period for soft update of the target networks.
dqda_clipping: when computing the actor loss, clips the gradient dqda
element-wise between [-dqda_clipping, dqda_clipping]. Does not perform
clipping if dqda_clipping == 0.
td_errors_loss_fn: A function for computing the TD errors loss. If None,
a default value of elementwise huber_loss is used.
gamma: A discount factor for future rewards.
reward_scale_factor: Multiplicative scale for the reward.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
name: The name of this agent. All variables in this module will fall
under that name. Defaults to the class name.
"""
tf.Module.__init__(self, name=name)
self._actor_network = actor_network
self._target_actor_network = self._actor_network.copy(
name='TargetActorNetwork')
self._critic_network = critic_network
self._target_critic_network = self._critic_network.copy(
name='TargetCriticNetwork')
self._actor_optimizer = actor_optimizer
self._critic_optimizer = critic_optimizer
self._ou_stddev = ou_stddev
self._ou_damping = ou_damping
self._target_update_tau = target_update_tau
self._target_update_period = target_update_period
self._dqda_clipping = dqda_clipping
self._td_errors_loss_fn = (
td_errors_loss_fn or common.element_wise_huber_loss)
self._gamma = gamma
self._reward_scale_factor = reward_scale_factor
self._gradient_clipping = gradient_clipping
self._update_target = self._get_target_updater(
target_update_tau, target_update_period)
policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec, action_spec=action_spec,
actor_network=self._actor_network, clip=True)
collect_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec, action_spec=action_spec,
actor_network=self._actor_network, clip=False)
collect_policy = ou_noise_policy.OUNoisePolicy(
collect_policy,
ou_stddev=self._ou_stddev,
ou_damping=self._ou_damping,
clip=True)
super(DdpgAgent, self).__init__(
time_step_spec,
action_spec,
policy,
collect_policy,
train_sequence_length=2 if not self._actor_network.state_spec else None,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter)
def _initialize(self):
common.soft_variables_update(
self._critic_network.variables,
self._target_critic_network.variables,
tau=1.0)
common.soft_variables_update(
self._actor_network.variables,
self._target_actor_network.variables,
tau=1.0)
def _get_target_updater(self, tau=1.0, period=1):
"""Performs a soft update of the target network parameters.
For each weight w_s in the original network, and its corresponding
weight w_t in the target network, a soft update is:
w_t = (1- tau) x w_t + tau x ws
Args:
tau: A float scalar in [0, 1]. Default `tau=1.0` means hard update.
period: Step interval at which the target networks are updated.
Returns:
An operation that performs a soft update of the target network parameters.
"""
with tf.name_scope('get_target_updater'):
def update():
# TODO(b/124381161): What about observation normalizer variables?
critic_update = common.soft_variables_update(
self._critic_network.variables,
self._target_critic_network.variables, tau)
actor_update = common.soft_variables_update(
self._actor_network.variables, self._target_actor_network.variables,
tau)
return tf.group(critic_update, actor_update)
return common.Periodically(update, period, 'periodic_update_targets')
def _experience_to_transitions(self, experience):
transitions = trajectory.to_transition(experience)
# Remove time dim if we are not using a recurrent network.
if not self._actor_network.state_spec:
transitions = tf.nest.map_structure(lambda x: tf.squeeze(x, [1]),
transitions)
time_steps, policy_steps, next_time_steps = transitions
actions = policy_steps.action
return time_steps, actions, next_time_steps
def _train(self, experience, weights=None):
time_steps, actions, next_time_steps = self._experience_to_transitions(
experience)
# TODO(b/124382524): Apply a loss mask or filter boundary transitions.
critic_variables = self._critic_network.variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert critic_variables, 'No critic variables to optimize.'
tape.watch(critic_variables)
critic_loss = self.critic_loss(time_steps, actions, next_time_steps,
weights=weights)
tf.debugging.check_numerics(critic_loss, 'Critic loss is inf or nan.')
critic_grads = tape.gradient(critic_loss, critic_variables)
self._apply_gradients(critic_grads, critic_variables,
self._critic_optimizer)
actor_variables = self._actor_network.variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert actor_variables, 'No actor variables to optimize.'
tape.watch(actor_variables)
actor_loss = self.actor_loss(time_steps, weights=weights)
tf.debugging.check_numerics(actor_loss, 'Actor loss is inf or nan.')
actor_grads = tape.gradient(actor_loss, actor_variables)
self._apply_gradients(actor_grads, actor_variables, self._actor_optimizer)
self.train_step_counter.assign_add(1)
self._update_target()
# TODO(b/124382360): Compute per element TD loss and return in loss_info.
total_loss = actor_loss + critic_loss
return tf_agent.LossInfo(total_loss,
DdpgInfo(actor_loss, critic_loss))
def _apply_gradients(self, gradients, variables, optimizer):
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(gradients, variables))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,
self._gradient_clipping)
if self._summarize_grads_and_vars:
eager_utils.add_variables_summaries(grads_and_vars,
self.train_step_counter)
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
optimizer.apply_gradients(grads_and_vars)
@common.function
def critic_loss(self,
time_steps,
actions,
next_time_steps,
weights=None):
"""Computes the critic loss for DDPG training.
Args:
time_steps: A batch of timesteps.
actions: A batch of actions.
next_time_steps: A batch of next timesteps.
weights: Optional scalar or element-wise (per-batch-entry) importance
weights.
Returns:
critic_loss: A scalar critic loss.
"""
with tf.name_scope('critic_loss'):
target_actions, _ = self._target_actor_network(
next_time_steps.observation, next_time_steps.step_type)
target_critic_net_input = (next_time_steps.observation, target_actions)
target_q_values, _ = self._target_critic_network(
target_critic_net_input, next_time_steps.step_type)
td_targets = tf.stop_gradient(
self._reward_scale_factor * next_time_steps.reward +
self._gamma * next_time_steps.discount * target_q_values)
critic_net_input = (time_steps.observation, actions)
q_values, _ = self._critic_network(critic_net_input,
time_steps.step_type)
critic_loss = self._td_errors_loss_fn(td_targets, q_values)
if nest_utils.is_batched_nested_tensors(
time_steps, self.time_step_spec, num_outer_dims=2):
# Do a sum over the time dimension.
critic_loss = tf.reduce_sum(critic_loss, axis=1)
if weights is not None:
critic_loss *= weights
critic_loss = tf.reduce_mean(critic_loss)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='critic_loss', data=critic_loss, step=self.train_step_counter)
if self._debug_summaries:
td_errors = td_targets - q_values
common.generate_tensor_summaries('td_errors', td_errors,
self.train_step_counter)
common.generate_tensor_summaries('td_targets', td_targets,
self.train_step_counter)
common.generate_tensor_summaries('q_values', q_values,
self.train_step_counter)
return critic_loss
@common.function
def actor_loss(self, time_steps, weights=None):
"""Computes the actor_loss for DDPG training.
Args:
time_steps: A batch of timesteps.
weights: Optional scalar or element-wise (per-batch-entry) importance
weights.
# TODO(b/124383618): Add an action norm regularizer.
Returns:
actor_loss: A scalar actor loss.
"""
with tf.name_scope('actor_loss'):
actions, _ = self._actor_network(time_steps.observation,
time_steps.step_type)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(actions)
q_values, _ = self._critic_network((time_steps.observation, actions),
time_steps.step_type)
actions = tf.nest.flatten(actions)
dqdas = tape.gradient([q_values], actions)
actor_losses = []
for dqda, action in zip(dqdas, actions):
if self._dqda_clipping is not None:
dqda = tf.clip_by_value(dqda, -1 * self._dqda_clipping,
self._dqda_clipping)
loss = common.element_wise_squared_loss(
tf.stop_gradient(dqda + action), action)
if nest_utils.is_batched_nested_tensors(
time_steps, self.time_step_spec, num_outer_dims=2):
# Sum over the time dimension.
loss = tf.reduce_sum(loss, axis=1)
if weights is not None:
loss *= weights
loss = tf.reduce_mean(loss)
actor_losses.append(loss)
actor_loss = tf.add_n(actor_losses)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='actor_loss', data=actor_loss, step=self.train_step_counter)
return actor_loss
|
the-stack_106_16250
|
from datetime import timedelta
from feast import Entity, FeatureView, Field, RedshiftSource, ValueType
from feast.types import Float32, Int64
# Define an entity for the driver. Entities can be thought of as primary keys used to
# retrieve features. Entities are also used to join multiple tables/views during the
# construction of feature vectors
driver = Entity(
# Name of the entity. Must be unique within a project
name="driver",
# The join key of an entity describes the storage level field/column on which
# features can be looked up. The join key is also used to join feature
# tables/views when building feature vectors
join_key="driver_id",
# The storage level type for an entity
value_type=ValueType.INT64,
)
# Indicates a data source from which feature values can be retrieved. Sources are queried when building training
# datasets or materializing features into an online store.
driver_stats_source = RedshiftSource(
# The Redshift table where features can be found
table="feast_driver_hourly_stats",
# The event timestamp is used for point-in-time joins and for ensuring only
# features within the TTL are returned
timestamp_field="event_timestamp",
# The (optional) created timestamp is used to ensure there are no duplicate
# feature rows in the offline store or when building training datasets
created_timestamp_column="created",
# Database to redshift source.
database="%REDSHIFT_DATABASE%",
)
# Feature views are a grouping based on how features are stored in either the
# online or offline store.
driver_stats_fv = FeatureView(
# The unique name of this feature view. Two feature views in a single
# project cannot have the same name
name="driver_hourly_stats",
# The list of entities specifies the keys required for joining or looking
# up features from this feature view. The reference provided in this field
# correspond to the name of a defined entity (or entities)
entities=["driver"],
# The timedelta is the maximum age that each feature value may have
# relative to its lookup time. For historical features (used in training),
# TTL is relative to each timestamp provided in the entity dataframe.
# TTL also allows for eviction of keys from online stores and limits the
# amount of historical scanning required for historical feature values
# during retrieval
ttl=timedelta(weeks=52),
# The list of features defined below act as a schema to both define features
# for both materialization of features into a store, and are used as references
# during retrieval for building a training dataset or serving features
schema=[
Field(name="conv_rate", dtype=Float32),
Field(name="acc_rate", dtype=Float32),
Field(name="avg_daily_trips", dtype=Int64),
],
# Batch sources are used to find feature values. In the case of this feature
# view we will query a source table on Redshift for driver statistics
# features
source=driver_stats_source,
# Tags are user defined key/value pairs that are attached to each
# feature view
tags={"team": "driver_performance"},
)
|
the-stack_106_16251
|
#
# %CopyrightBegin%
#
# Copyright Ericsson AB 2013-2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# %CopyrightEnd%
#
has_exited = False
def stop_handler (event):
global has_exited
if isinstance(event, gdb.SignalEvent):
print("exit code: %s" % (event.stop_signal))
has_exited = True
gdb.events.stop.connect (stop_handler)
gdb.execute('continue')
while not has_exited:
r = gdb.execute('when', to_string=True)
m = re.match("[^0-9]*([0-9]+)", r)
if m:
event = int(m.group(1));
gdb.execute('start ' + str(event + 1));
gdb.execute('continue')
gdb.events.stop.disconnect (stop_handler)
gdb.execute('file ' + str(gdb.parse_and_eval("$etp_beam_executable")))
gdb.execute('break main')
gdb.execute('reverse-continue')
|
the-stack_106_16254
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import yaml
from .utils import *
def readConfig(configfile, output_dir=None, input_dir=None, backup_dir=None, interval=None, logfile=None, loglevel=None, env=None, logger=None):
""" Read a config file or return a default config """
if not env:
env = os.environ.copy()
default_config = {
# Default "schema" prefix
"type_prefix" : "http://www.arkivverket.no/standarder/noark5/arkivstruktur/",
# Default subject prefix
"subject_prefix" : "http://sesam.io/sys1/",
# Default id elements
"ids" : ["systemID", "arkivskaperID"],
# ObjectElements are the names of XML elements that should produce new RDF resources
# The form is Key : idElement - if the idElement begins with a "@" it is an attribute,
# else it is a element name. If there are several, only the last value is used.
# if idElement is None, it is treated as a blank node in the output
# if no "subject_prefix" is given, the default is used
"ObjectElements" : {
},
"output_dir" : "",
"logfile" : logfile,
"loglevel" : loglevel
}
config_file = configfile.strip()
if not os.path.isabs(config_file):
root_folder = getCurrDir()
config_file = os.path.join(root_folder, config_file)
if logger:
logger.debug("Reading config file from '%s'..." % config_file)
if os.path.isfile(config_file):
stream = open(config_file, 'r')
config = yaml.load(stream)
stream.close()
else:
config = {}
msg = "Could not find config file '%s'. Using defaults." % config_file
if logger:
logger.warning(msg)
default_config.update(config)
# Command line overrides config
if output_dir:
default_config["output_dir"] = output_dir
if input_dir:
default_config["input_dir"] = input_dir
if backup_dir:
default_config["backup_dir"] = backup_dir
if not os.path.isabs(default_config["output_dir"]):
root_folder = getCurrDir()
default_config["output_dir"] = os.path.join(root_folder, default_config["output_dir"])
if not os.path.isabs(default_config["input_dir"]):
root_folder = getCurrDir()
default_config["input_dir"] = os.path.join(root_folder, default_config["input_dir"])
if not os.path.isabs(default_config["backup_dir"]):
root_folder = env.get("SESAM_DATA", getCurrDir())
default_config["backup_dir"] = os.path.join(root_folder, default_config["backup_dir"])
if default_config["logfile"] is not None and not os.path.isabs(default_config["logfile"]):
log_folder = getCurrDir()
assertDir(log_folder)
default_config["logfile"] = os.path.join(log_folder, default_config["logfile"])
if not default_config["loglevel"]:
default_config["loglevel"] = loglevel
if interval is not None and interval > 0:
default_config["interval"] = interval
assertDir(default_config["input_dir"])
assertDir(default_config["output_dir"])
assertDir(default_config["backup_dir"])
return default_config
|
the-stack_106_16255
|
# ------------------------------------------------------------------------------
# pose.pytorch
# Copyright (c) 2018-present Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
#from core.function import validate
from utils.utils import create_logger
import dataset
import models
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
# philly
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--prevModelDir',
help='prev Model directory',
type=str,
default='')
args = parser.parse_args()
return args
def copy_prev_models(prev_models_dir, model_dir):
import shutil
vc_folder = '/hdfs/' \
+ '/' + os.environ['PHILLY_VC']
source = prev_models_dir
# If path is set as "sys/jobs/application_1533861538020_2366/models" prefix with the location of vc folder
source = vc_folder + '/' + source if not source.startswith(vc_folder) \
else source
destination = model_dir
if os.path.exists(source) and os.path.exists(destination):
for file in os.listdir(source):
source_file = os.path.join(source, file)
destination_file = os.path.join(destination, file)
if not os.path.exists(destination_file):
print("=> copying {0} to {1}".format(
source_file, destination_file))
shutil.copytree(source_file, destination_file)
else:
print('=> {} or {} does not exist'.format(source, destination))
def main():
args = parse_args()
update_config(cfg, args)
if args.prevModelDir and args.modelDir:
# copy pre models for philly
copy_prev_models(args.prevModelDir, args.modelDir)
logger, final_output_dir, tb_log_dir = create_logger(
cfg, args.cfg, 'valid')
logger.info('########################################')
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
cfg, is_train=False
)
if cfg.TEST.MODEL_FILE:
#logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
model_state_file = os.path.join(
final_output_dir, 'final_state.pth'
)
#logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT
).cuda()
# Data loading code
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
shuffle=False,
num_workers=cfg.WORKERS,
pin_memory=True
)
### importing train/validate functions
if not cfg.MODEL.SPATIOTEMPORAL_POSE_AGGREGATION:
from core.function import validate
else:
from core.function_PoseAgg import validate
####
# evaluate on validation set
#logger.info('### Method: {} ###'.format(cfg.EXPERIMENT_NAME))
validate(cfg, valid_loader, valid_dataset, model, criterion,
final_output_dir, tb_log_dir)
if __name__ == '__main__':
main()
|
the-stack_106_16256
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "molssi_train/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
the-stack_106_16257
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2009-2017 German Aerospace Center (DLR) and others.
# This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v2.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v20.html
# @file runner.py
# @author Lena Kalleske
# @author Daniel Krajzewicz
# @author Michael Behrisch
# @author Jakob Erdmann
# @date 2009-03-26
# @version $Id$
from __future__ import absolute_import
from __future__ import print_function
from select import select
import termios
import os
import sys
import optparse
import subprocess
import random
import time
import cv2
import curses
class Runner:
def __init__(self):
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary # noqa
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
import traci
options = self.get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if options.nogui:
sumoBinary = checkBinary('sumo')
else:
sumoBinary = checkBinary('sumo-gui')
# first, generate the route file for this simulation
self.generate_routefile()
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
traci.start([sumoBinary, "-c", "data/cross.sumocfg",
"--tripinfo-output", "tripinfo.xml"])
self.run()
stdscr = curses.initscr()
curses.cbreak()
stdscr.keypad(1)
stdscr.refresh()
key = ''
# we need to import python modules from the $SUMO_HOME/tools directory
def generate_routefile(self):
random.seed(42) # make tests reproducible
N = 3600 # number of time steps
# demand per second from different directions
pWE = 1. / 10
pEW = 1. / 11
pNS = 1. / 30
with open("data/cross_auto.rou.xml", "w") as routes:
print("""<routes>
<vTypeDistribution id="mixed">
<vType id="car" vClass="passenger" speedDev="0.2" latAlignment="compact" probability="0.3"/>
<vType id="moped" vClass="moped" speedDev="0.4" latAlignment="compact" probability="0.7"/>
</vTypeDistribution>
<route id="r0" edges="51o 1i 2o 52i"/>
<route id="r1" edges="51o 1i 4o 54i"/>
<route id="r2" edges="51o 1i 3o 53i"/>
<route id="r3" edges="54o 4i 3o 53i"/>
<route id="r4" edges="54o 4i 1o 51i"/>
<route id="r5" edges="54o 4i 2o 52i"/>
<route id="r6" edges="52o 2i 1o 51i"/>
<route id="r7" edges="52o 2i 4o 54i"/>
<route id="r8" edges="52o 2i 3o 53i"/>
<route id="r9" edges="53o 3i 4o 54i"/>
<route id="r10" edges="53o 3i 1o 51i"/>
<route id="r11" edges="53o 3i 2o 52i"/>
<flow id="mixed1" begin="0" end="1500" number="100" route="r0" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed2" begin="0" end="1500" number="100" route="r1" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed3" begin="0" end="1500" number="100" route="r2" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed4" begin="0" end="1500" number="100" route="r3" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed5" begin="0" end="1500" number="100" route="r4" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed6" begin="0" end="1500" number="100" route="r5" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed7" begin="0" end="1500" number="100" route="r6" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed8" begin="0" end="1500" number="100" route="r7" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed9" begin="0" end="1500" number="100" route="r8" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed10" begin="0" end="1500" number="100" route="r9" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed11" begin="0" end="1500" number="100" route="r10" type="mixed" departLane="random" departPosLat="random"/>
<flow id="mixed12" begin="0" end="1500" number="100" route="r11" type="mixed" departLane="random" departPosLat="random"/>
</routes>""", file=routes)
lastVeh = 0
vehNr = 0
''' for i in range(N):
if random.uniform(0, 1) < pWE:
print(' <vehicle id="right_%i" type="typeWE" route="right" depart="%i" />' % (
vehNr, i), file=routes)
vehNr += 1
lastVeh = i
if random.uniform(0, 1) < pEW:
print(' <vehicle id="left_%i" type="typeWE" route="left" depart="%i" />' % (
vehNr, i), file=routes)
vehNr += 1
lastVeh = i
if random.uniform(0, 1) < pNS:
print(' <vehicle id="down_%i" type="typeNS" route="down" depart="%i" color="1,0,0"/>' % (
vehNr, i), file=routes)
vehNr += 1
lastVeh = i
print("</routes>", file=routes)'''
# The program looks like this
# <tlLogic id="0" type="static" programID="0" offset="0">
# the locations of the tls are NESW
# <phase duration="31" state="GrGr"/>
# <phase duration="6" state="yryr"/>
# <phase duration="31" state="rGrG"/>
# <phase duration="6" state="ryry"/>
# </tlLogic>
def run(self):
import traci
"""execute the TraCI control loop"""
step = 0
phase = 0
states = [0,1,2,3,4,5,6,7]
traci.trafficlight.setPhase("0", 0)
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep()
action = random.choice(states)
traci.trafficlight.setPhase("0", action)
phase = traci.trafficlight.getPhase("0")
#print(phase)
# timeout = 0.2
# print("Time right now - ",step)
# rlist, wlist, xlist = select([sys.stdin],[],[],timeout)
# if rlist:
# print("Key pressed - ")
# print(rlist)
# traci.vehicle.addFull(vehID='left_'+str(step),routeID='r0',typeID='car',depart='triggered',departLane='random',departPos='random')
# termios.tcflush(sys.stdin,termios.TCIFLUSH)
'''
key = fstdscr.getch()
stdscr.addch(20,25,key)
stdscr.refresh()
if key == curses.KEY_RIGHT:
stdscr.addstr(2, 20, "Up")
traci.vehicle.addFull(vehID='right_'+str(step),routeID='r0',typeID='car',depart='triggered',departLane='random',departPos='random')
elif key == curses.KEY_DOWN:
stdscr.addstr(3, 20, "Down")
traci.vehicle.addFull(vehID='down_'+str(step),routeID='r3',typeID='car',depart='triggered',departLane='random',departPos='random')
elif key == curses.KEY_LEFT:
stdscr.addstr(4, 20, "Left")
traci.vehicle.addFull(vehID='left_'+str(step),routeID='r6',typeID='car',depart='triggered',departLane='random',departPos='random')
elif key == curses.KEY_UP:
stdscr.addstr(5, 20, "Up")
traci.vehicle.addFull(vehID='up_'+str(step),routeID='r9',typeID='car',depart='triggered',departLane='random',departPos='random')
'''
step += 1
#.curses.endwin()
traci.close()
sys.stdout.flush()
def get_options(self):
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
def take_action(self):
traci.trafficlight.setPhase("0", traci.trafficlight.getPhase("0") + 1)
# this is the main entry point of this script
'''
if __name__ == "__main__":
options = get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if options.nogui:
sumoBinary = checkBinary('sumo')
else:
sumoBinary = checkBinary('sumo-gui')
# first, generate the route file for this simulation
generate_routefile()
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
traci.start([sumoBinary, "-c", "data/cross.sumocfg",
"--tripinfo-output", "tripinfo.xml"])
run()'''
|
the-stack_106_16258
|
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import (namedtuple, OrderedDict)
http_methods = ('GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'PATCH', 'OPTIONS')
media_types = ('application/json', 'application/xml')
# based on https://docs.gitlab.com/ee/api/api_resources.html
# please add, modify resource details here, and make entry in service-to-resource mappings and in services
resource_config_dict = {
'ProjectID': OrderedDict([
('/projects/{owner}%2F{repo}', {
http_methods[0]: {
'path_params': ('owner', 'repo'),
'query_params': None,
'request_media_type': media_types[0],
'response_media_type': media_types[0],
},
}),
]),
'ListBranches': OrderedDict([
('/projects/{id}/repository/branches', {
http_methods[0]: {
'path_params': ('id', ),
'query_params': None,
'request_media_type': media_types[0],
'response_media_type': media_types[0],
},
}),
]),
}
resource = namedtuple('service', 'rest_resource mount_point http_method')
# service-to-resource mappings
project_id = resource('ProjectID', list(resource_config_dict['ProjectID'].keys())[0], http_methods[0])
list_branches = resource('ListBranches', list(resource_config_dict['ListBranches'].keys())[0], http_methods[0])
# Transtats Github support operates on resources listed here
resources = {
'project_id': project_id,
'list_branches': list_branches,
}
|
the-stack_106_16259
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
CASCI analytical nuclear gradients
Ref.
J. Comput. Chem., 5, 589
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad.mp2 import _shell_prange
from pyscf.scf import cphf
def kernel(mc, mo_coeff=None, ci=None, atmlst=None, mf_grad=None, verbose=None):
if mo_coeff is None: mo_coeff = mc._scf.mo_coeff
if ci is None: ci = mc.ci
if mf_grad is None: mf_grad = mc._scf.nuc_grad_method()
assert(isinstance(ci, numpy.ndarray))
mol = mc.mol
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
mo_energy = mc._scf.mo_energy
mo_occ = mo_coeff[:,:nocc]
mo_core = mo_coeff[:,:ncore]
mo_cas = mo_coeff[:,ncore:nocc]
neleca, nelecb = mol.nelec
assert(neleca == nelecb)
orbo = mo_coeff[:,:neleca]
orbv = mo_coeff[:,neleca:]
casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)
dm_core = numpy.dot(mo_core, mo_core.T) * 2
dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))
aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_coeff, mo_cas), compact=False)
aapa = aapa.reshape(ncas,ncas,nmo,ncas)
vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))
h1 = mc.get_hcore()
vhf_c = vj[0] - vk[0] * .5
vhf_a = vj[1] - vk[1] * .5
# Imat = h1_{pi} gamma1_{iq} + h2_{pijk} gamma_{iqkj}
Imat = numpy.zeros((nmo,nmo))
Imat[:,:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c + vhf_a, mo_occ)) * 2
Imat[:,ncore:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c, mo_cas, casdm1))
Imat[:,ncore:nocc] += lib.einsum('uviw,vuwt->it', aapa, casdm2)
aapa = vj = vk = vhf_c = vhf_a = h1 = None
ee = mo_energy[:,None] - mo_energy
zvec = numpy.zeros_like(Imat)
zvec[:ncore,ncore:neleca] = Imat[:ncore,ncore:neleca] / -ee[:ncore,ncore:neleca]
zvec[ncore:neleca,:ncore] = Imat[ncore:neleca,:ncore] / -ee[ncore:neleca,:ncore]
zvec[nocc:,neleca:nocc] = Imat[nocc:,neleca:nocc] / -ee[nocc:,neleca:nocc]
zvec[neleca:nocc,nocc:] = Imat[neleca:nocc,nocc:] / -ee[neleca:nocc,nocc:]
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
vhf = mc._scf.get_veff(mol, zvec_ao) * 2
xvo = reduce(numpy.dot, (orbv.T, vhf, orbo))
xvo += Imat[neleca:,:neleca] - Imat[:neleca,neleca:].T
def fvind(x):
x = x.reshape(xvo.shape)
dm = reduce(numpy.dot, (orbv, x, orbo.T))
v = mc._scf.get_veff(mol, dm + dm.T)
v = reduce(numpy.dot, (orbv.T, v, orbo))
return v * 2
dm1resp = cphf.solve(fvind, mo_energy, mc._scf.mo_occ, xvo, max_cycle=30)[0]
zvec[neleca:,:neleca] = dm1resp
zeta = numpy.einsum('ij,j->ij', zvec, mo_energy)
zeta = reduce(numpy.dot, (mo_coeff, zeta, mo_coeff.T))
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
p1 = numpy.dot(mo_coeff[:,:neleca], mo_coeff[:,:neleca].T)
vhf_s1occ = reduce(numpy.dot, (p1, mc._scf.get_veff(mol, zvec_ao), p1))
Imat[:ncore,ncore:neleca] = 0
Imat[ncore:neleca,:ncore] = 0
Imat[nocc:,neleca:nocc] = 0
Imat[neleca:nocc,nocc:] = 0
Imat[neleca:,:neleca] = Imat[:neleca,neleca:].T
im1 = reduce(numpy.dot, (mo_coeff, Imat, mo_coeff.T))
casci_dm1 = dm_core + dm_cas
hf_dm1 = mc._scf.make_rdm1(mo_coeff, mc._scf.mo_occ)
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
diag_idx = numpy.arange(nao)
diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx
casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)
dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,
(0, nao, 0, nao)).reshape(ncas**2,nao,nao)
dm2buf = lib.pack_tril(dm2buf)
dm2buf[:,diag_idx] *= .5
dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)
casdm2 = casdm2_cc = None
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
max_memory = mc.max_memory - lib.current_memory()[0]
blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))
blksize = min(nao, max(2, blksize))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, casci_dm1)
de[k] += numpy.einsum('xij,ij->x', h1ao, zvec_ao)
vhf1 = numpy.zeros((3,nao,nao))
q1 = 0
for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):
q0, q1 = q1, q1 + nf
dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])
shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)
de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2
for i in range(3):
eri1tmp = lib.unpack_tril(eri1[i].reshape((p1-p0)*nf,-1))
eri1tmp = eri1tmp.reshape(p1-p0,nf,nao,nao)
de[k,i] -= numpy.einsum('ijkl,ij,kl', eri1tmp, hf_dm1[p0:p1,q0:q1], zvec_ao) * 2
de[k,i] -= numpy.einsum('ijkl,kl,ij', eri1tmp, hf_dm1, zvec_ao[p0:p1,q0:q1]) * 2
de[k,i] += numpy.einsum('ijkl,il,kj', eri1tmp, hf_dm1[p0:p1], zvec_ao[q0:q1])
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, hf_dm1[q0:q1], zvec_ao[p0:p1])
#:vhf1c, vhf1a = mf_grad.get_veff(mol, (dm_core, dm_cas))
#:de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], casci_dm1[p0:p1]) * 2
#:de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1])
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1])
eri1 = eri1tmp = None
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], im1[:,p0:p1])
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], zeta[:,p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], vhf_s1occ[:,p0:p1]) * 2
de += mf_grad.grad_nuc(mol, atmlst)
return de
def as_scanner(mcscf_grad, state=0):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)
>>> mc_grad_scanner = mcscf.CASCI(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))
'''
from pyscf import gto
if isinstance(mcscf_grad, lib.GradScanner):
return mcscf_grad
logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)
class CASCI_GradScanner(mcscf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, state=state, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mc_scanner = self.base
if (mc_scanner.fcisolver.nroots > 1 and
state >= mc_scanner.fcisolver.nroots):
raise ValueError('State ID greater than the number of CASCI roots')
# TODO: Check root flip
e_tot = mc_scanner(mol)
if mc_scanner.fcisolver.nroots > 1:
e_tot = e_tot[state]
ci = mc_scanner.ci[state]
else:
ci = mc_scanner.ci
self.mol = mol
de = self.kernel(ci=ci, state=state, **kwargs)
return e_tot, de
return CASCI_GradScanner(mcscf_grad)
class Gradients(lib.StreamObject):
'''Non-relativistic restricted Hartree-Fock gradients'''
def __init__(self, mc):
self.base = mc
self.mol = mc.mol
self.stdout = mc.stdout
self.verbose = mc.verbose
self.max_memory = mc.max_memory
self.state = 0 # of which the gradients to be computed.
self.atmlst = None
self.de = None
self._keys = set(self.__dict__.keys())
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state CASCI not converged')
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
if self.state != 0 and self.base.fcisolver.nroots > 1:
log.info('State ID = %d', self.state)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
def kernel(self, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,
state=None, verbose=None):
cput0 = (time.clock(), time.time())
log = logger.new_logger(self, verbose)
if ci is None: ci = self.base.ci
if isinstance(ci, (list, tuple)):
if state is None:
state = self.state
else:
self.state = state
ci = ci[state]
logger.info(self, 'Multiple roots are found in CASCI solver. '
'Nuclear gradients of root %d are computed.', state)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
self.de = kernel(self.base, mo_coeff, ci, atmlst, mf_grad, log)
log.timer('CASCI gradients', *cput0)
self._finalize()
return self.de
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s gradients ---------------',
self.base.__class__.__name__)
rhf_grad._write(self, self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
from pyscf import mcscf
mcscf.casci.CASCI.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
mc = mcscf.CASCI(mf, 4, 4).run()
g1 = mc.Gradients().kernel()
print(lib.finger(g1) - -0.066025991364829367)
mcs = mc.as_scanner()
mol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')
e1 = mcs(mol)
mol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')
e2 = mcs(mol)
print(g1[1,2], (e1-e2)/0.002*lib.param.BOHR)
|
the-stack_106_16262
|
"""
Debug printer
Handles logging at different debug levels
"""
from colorama import Fore, Style, init
init()
LOOP_TEMPLATE = """
{tab_level}{HEADER_COLOR}[{id}] {name}{END_COLOR}
{tab_level} * Required? {VALUE_COLOR}{req}{END_COLOR}
{tab_level} * Max repeat: {VALUE_COLOR}{repeat}{END_COLOR}
"""
SEGMENT_TEMPLATE = """
{tab_level}{HEADER_COLOR}[{id}] {name}{END_COLOR}
{tab_level} * Required? {VALUE_COLOR}{req}{END_COLOR}
{tab_level} * Max uses: {VALUE_COLOR}{max_uses}{END_COLOR}
{tab_level} * Syntax rules: {VALUE_COLOR}{syntax_rules}{END_COLOR}
{tab_level} * Notes: {VALUE_COLOR}{notes}{END_COLOR}
"""
ELEMENT_TEMPLATE = """
{tab_level}{HEADER_COLOR}{index}{name}{END_COLOR}
{tab_level} * Required? {VALUE_COLOR}{req}{END_COLOR}
{tab_level} * Data type: {VALUE_COLOR}{data_type}{END_COLOR}
{tab_level} * Data type options: {VALUE_COLOR}{data_type_ids}{END_COLOR}
{tab_level} * Length (min: {VALUE_COLOR}{length[min]}{END_COLOR}, max: {VALUE_COLOR}{length[max]}{END_COLOR})
{tab_level} * Notes: {VALUE_COLOR}{notes}{END_COLOR}
"""
class DebugMaster(object):
""" Auto-instantiated as Debug to provide a single point of contact """
def __init__(self):
self.level = 3
self.tags = {
"ERROR": "{}[ ERROR ]{} ".format(Fore.RED+Style.BRIGHT, Style.RESET_ALL),
"WARNING": "{}[WARNING]{} ".format(Fore.YELLOW+Style.BRIGHT, Style.RESET_ALL),
"MESSAGE": "{}[MESSAGE]{} ".format(Fore.CYAN+Style.BRIGHT, Style.RESET_ALL)
}
def log(self, message, level=1):
""" Creates a custom message at the specified level """
if level <= self.level:
print("\n" + message)
def log_error(self, message):
""" Creates an error-level log messsage """
self.log(self.tags["ERROR"] + message, 1)
def log_warning(self, message):
""" Creates a warning-level log messsage """
self.log(self.tags["WARNING"] + message, 2)
def log_message(self, message):
""" Creates a message-level log messsage """
self.log(self.tags["MESSAGE"] + message, 3)
def explain(self, structure):
print(self.level)
if self.level <= 1:
return # Only explain if debugging level is 2+
# Decide which type of structure this is
if type(structure) is list:
for segment in structure:
self.explain_segment(segment)
elif type(structure) is dict and "type" in structure:
if structure["type"] == "segment":
self.explain_segment(structure)
elif structure["type"] == "element":
self.explain_element("", structure)
elif structure["type"] == "loop":
self.explain_loop(structure)
else:
raise TypeError("Expected either a loop, a segment, an element, or a list of segments.")
def explain_segment(self, segment, tab_level = ""):
if self.level <= 1:
return # Only explain if debugging level is 2+
print(Fore.CYAN + "\n" + tab_level + "-- [Segment] --" + Fore.RESET)
if segment["type"] == "segment":
# Parse syntax rules into human-readable format
syntax_rules_list = []
if "syntax" in segment:
for rule in segment["syntax"]:
# Troubleshooting
if "rule" not in rule or "criteria" not in rule:
raise ValueError("Invalid rule definition in segment {}: {}".format(segment["id"], rule))
if len(rule["criteria"]) < 2:
raise ValueError("Invalid criteria for syntax rule {} in segment {}: Expected two or more values".format(rule["rule"], segment["id"]))
if rule["rule"] == "ATLEASTONE":
required_elements = ", ".join(["{}{:02d}".format(segment["id"], e) for e in rule["criteria"]])
syntax_rules_list.append("At least one of {} is required".format(required_elements))
elif rule["rule"] == "ALLORNONE":
required_elements = ", ".join(["{}{:02d}".format(segment["id"], e) for e in rule["criteria"]])
syntax_rules_list.append("If one of {} is present, the rest are required".format(required_elements))
elif rule["rule"] == "IFATLEASTONE":
first_element = "{}{:02d}".format(segment["id"], rule["criteria"][0])
required_elements = ", ".join(["{}{:02d}".format(segment["id"], e) for e in rule["criteria"][1:]])
syntax_rules_list.append("If {} is present, at least one of {} are required".format(first_element, required_elements))
# Print template
print(SEGMENT_TEMPLATE.format(
tab_level=tab_level,
syntax_rules="; ".join(syntax_rules_list),
HEADER_COLOR=Fore.CYAN+Style.BRIGHT,
VALUE_COLOR=Fore.YELLOW+Style.BRIGHT,
END_COLOR=Fore.RESET+Style.RESET_ALL,
**segment))
# Print elements section
print(Fore.CYAN + tab_level + " -- [Elements] --" + Fore.RESET)
for i, element in enumerate(segment["elements"]):
self.explain_element("{}{:02d}: ".format(segment["id"], i+1), element, tab_level + " ")
# End segment
print(Fore.CYAN + tab_level + "--------------------" + Fore.RESET)
def explain_element(self, index, element, tab_level = ""):
if self.level <= 1:
return # Only explain if debugging level is 2+
# Print template
print(ELEMENT_TEMPLATE.format(
tab_level=tab_level,
index=index,
HEADER_COLOR=Fore.GREEN,
VALUE_COLOR=Fore.YELLOW+Style.BRIGHT,
END_COLOR=Fore.RESET+Style.RESET_ALL,
**element))
def explain_loop(self, loop, tab_level=""):
if self.level <= 1:
return # Only explain if debugging level is 2+
print(Fore.RED + "-- [Loop] --" + Style.RESET_ALL)
print(LOOP_TEMPLATE.format(
tab_level=tab_level,
HEADER_COLOR=Fore.RED+Style.BRIGHT,
VALUE_COLOR=Fore.YELLOW+Style.BRIGHT,
END_COLOR=Fore.RESET+Style.RESET_ALL,
**loop))
for segment in loop["segments"]:
self.explain_segment(segment, " ")
print(Fore.RED + "------------" + Style.RESET_ALL)
Debug = DebugMaster()
|
the-stack_106_16263
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import itertools
import pathlib
import sys
from typing import Iterable, Iterator, NamedTuple, Sequence, Tuple
from bentoml.types import (
ApiFuncArgs,
AwsLambdaEvent,
BatchApiFuncArgs,
FileLike,
HTTPRequest,
InferenceTask,
)
class BaseInputAdapter:
"""
InputAdapter is an abstraction layer between user defined API callback function
and prediction request input in a variety of different forms, such as HTTP request
body, command line arguments or AWS Lambda event object.
"""
HTTP_METHODS = ["POST", "GET"]
BATCH_MODE_SUPPORTED = True
SINGLE_MODE_SUPPORTED = True
def __init__(self, http_input_example=None, **base_config):
self._config = base_config
self._http_input_example = http_input_example
@property
def config(self):
return self._config
@property
def request_schema(self):
"""
:return: OpenAPI json schema for the HTTP API endpoint created with this input
adapter
"""
return {"application/json": {"schema": {"type": "object"}}}
@property
def pip_dependencies(self):
"""
:return: List of PyPI package names required by this InputAdapter
"""
return []
def from_http_request(self, req: HTTPRequest) -> InferenceTask:
"""
Handles HTTP requests, convert it into InferenceTask
"""
raise NotImplementedError()
def from_aws_lambda_event(self, event: AwsLambdaEvent) -> InferenceTask:
"""
Handles AWS lambda events, convert it into InferenceTask
"""
raise NotImplementedError()
def from_cli(self, cli_args: Tuple[str, ...]) -> Iterator[InferenceTask]:
"""
Handles CLI command, generate InferenceTask
"""
raise NotImplementedError()
def extract_user_func_args(
self, tasks: Iterable[InferenceTask]
) -> BatchApiFuncArgs:
"""
Extract args that user API function is expecting from InferenceTask
"""
raise NotImplementedError()
def iter_batch_args(
self,
batch_args: BatchApiFuncArgs,
tasks: InferenceTask = None, # pylint: disable=unused-argument
) -> Iterator[ApiFuncArgs]:
"""
Extract args that user API function is expecting from InferenceTask
"""
return iter(zip(*batch_args))
COLOR_FAIL = '\033[91m'
def exit_cli(err_msg: str = "", exit_code: int = None):
if exit_code is None:
exit_code = 1 if err_msg else 0
if err_msg:
print(f"{COLOR_FAIL}{err_msg}", file=sys.stderr)
sys.exit(exit_code)
class CliInputParser(NamedTuple):
arg_names: Tuple[str]
file_arg_names: Tuple[str]
arg_strs: Tuple[str]
file_arg_strs: Tuple[str]
parser: argparse.ArgumentParser
@classmethod
@functools.lru_cache()
def get(cls, input_names: Tuple[str] = None):
arg_names = (
tuple(f"input_{n}" for n in input_names) if input_names else ("input",)
)
arg_strs = tuple(f'--{n.replace("_", "-")}' for n in arg_names)
file_arg_names = (
tuple(f"input_file_{n}" for n in input_names)
if input_names
else ("input_file",)
)
file_arg_strs = tuple(f'--{n.replace("_", "-")}' for n in file_arg_names)
parser = argparse.ArgumentParser()
for name in itertools.chain(arg_strs, file_arg_strs):
parser.add_argument(name, nargs="+")
return cls(arg_names, file_arg_names, arg_strs, file_arg_strs, parser)
def parse(self, args: Sequence[str]) -> Iterator[Tuple[FileLike]]:
try:
parsed, _ = self.parser.parse_known_args(args)
except SystemExit:
parsed = None
inputs = tuple(getattr(parsed, name, None) for name in self.arg_names)
file_inputs = tuple(getattr(parsed, name, None) for name in self.file_arg_names)
if any(inputs) and any(file_inputs):
exit_cli(
'''
Conflict arguments:
--input* and --input-file* should not be provided at same time
'''
)
if not all(inputs) and not all(file_inputs):
exit_cli(
f'''
Insufficient arguments:
({' '.join(self.arg_strs)}) or
({' '.join(self.file_arg_strs)})
are required
'''
)
if all(inputs):
if functools.reduce(lambda i, j: len(i) == len(j), inputs):
for input_ in zip(*inputs):
yield tuple(FileLike(bytes_=i.encode()) for i in input_)
else:
exit_cli(
f'''
Arguments length mismatch:
Each ({' '.join(self.arg_strs)})
should have same amount of inputs
'''
)
if all(file_inputs):
if functools.reduce(lambda i, j: len(i) == len(j), file_inputs):
for input_ in zip(*file_inputs):
uris = (pathlib.Path(fpath).absolute().as_uri() for fpath in input_)
yield tuple(FileLike(uri=uri) for uri in uris)
else:
exit_cli(
f'''
Arguments length mismatch:
Each ({' '.join(self.file_arg_strs)})
should have same amount of inputs
'''
)
def parse_cli_inputs(
args: Sequence[str], input_names: Sequence[str] = None
) -> Iterator[Tuple[FileLike]]:
'''
Parse CLI args and iter each pair of inputs in bytes.
>>> parse_cli_inputs("--input-x '1' '2' --input-y 'a' 'b'".split(' '), ('x', 'y'))
>>> parse_cli_inputs(
>>> "--input-file-x 1.jpg 2.jpg --input-file-y 1.label 2.label".split(' '),
>>> ('x', 'y'))
'''
parser = CliInputParser.get(tuple(input_names))
return parser.parse(args)
def parse_cli_input(cli_args: Iterable[str]) -> Iterator[FileLike]:
'''
Parse CLI args and iter each input in bytes.
>>> parse_cli_input('--input {"input":1} {"input":2}'.split(' '))
OR
>>> parse_cli_inputs("--input-file 1.jpg 2.jpg 3.jpg".split(' '))
'''
parser = argparse.ArgumentParser()
input_g = parser.add_mutually_exclusive_group(required=True)
input_g.add_argument('--input', nargs="+", type=str)
input_g.add_argument('--input-file', nargs="+")
parsed_args, _ = parser.parse_known_args(list(cli_args))
inputs = tuple(
parsed_args.input if parsed_args.input_file is None else parsed_args.input_file
)
is_file = parsed_args.input_file is not None
if is_file:
for input_ in inputs:
uri = pathlib.Path(input_).absolute().as_uri()
yield FileLike(uri=uri)
else:
for input_ in inputs:
rv = FileLike(bytes_=input_.encode())
yield rv
return _
|
the-stack_106_16264
|
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from django.conf import settings
from optparse import make_option
import os.path
from subprocess import call
import tempfile
class Command(BaseCommand):
args = '<dem_path>'
help = 'Load DEM data (projecting and clipping it if necessary).\n'
help += 'You may need to create a GDAL Virtual Raster if your DEM is '
help += 'composed of several files.\n'
can_import_settings = True
option_list = BaseCommand.option_list + (
make_option('--replace',
action='store_true',
default=False,
help='Replace existing DEM if any.'),
)
def handle(self, *args, **options):
try:
from osgeo import gdal, ogr, osr
except ImportError:
msg = 'GDAL Python bindings are not available. Can not proceed.'
raise CommandError(msg)
try:
ret = call('raster2pgsql -G', shell=True)
if ret != 0:
raise Exception('raster2pgsql failed with exit code %d' % ret)
except Exception as e:
msg = 'Caught %s: %s' % (e.__class__.__name__, e,)
raise CommandError(msg)
self.stdout.write('-- Checking input DEM ------------------\n')
# Validate arguments
if len(args) != 1:
self.stdout.write(self.usage('loaddem'))
return
# Obtain DEM path
dem_path = args[0]
# Open GDAL dataset
if not os.path.exists(dem_path):
raise CommandError('DEM file does not exists at: %s' % dem_path)
ds = gdal.Open(dem_path)
if ds is None:
raise CommandError('DEM format is not recognized by GDAL.')
# GDAL dataset check 1: ensure dataset has a known SRS
if ds.GetProjection() == '':
raise CommandError('DEM coordinate system is unknown.')
wkt_box = 'POLYGON(({0} {1}, {2} {1}, {2} {3}, {0} {3}, {0} {1}))'
# Obtain dataset SRS
srs_r = osr.SpatialReference()
srs_r.ImportFromWkt(ds.GetProjection())
# Obtain project SRS
srs_p = osr.SpatialReference()
srs_p.ImportFromEPSG(settings.SRID)
# Obtain dataset BBOX
gt = ds.GetGeoTransform()
if gt is None:
raise CommandError('DEM extent is unknown.')
xsize = ds.RasterXSize
ysize = ds.RasterYSize
minx = gt[0]
miny = gt[3] + ysize * gt[5]
maxx = gt[0] + xsize * gt[1]
maxy = gt[3]
bbox_wkt = wkt_box.format(minx, miny, maxx, maxy)
bbox_r = ogr.CreateGeometryFromWkt(bbox_wkt, srs_r)
bbox_r.TransformTo(srs_p)
# Obtain project BBOX
bbox_wkt = wkt_box.format(*settings.SPATIAL_EXTENT)
bbox_p = ogr.CreateGeometryFromWkt(bbox_wkt, srs_p)
# GDAL dataset check 2: ensure dataset bbox matches project extent
if not bbox_p.Intersects(bbox_r):
raise CommandError('DEM file does not match project extent (%s <> %s).' % (bbox_r, bbox_p))
# Allow GDAL objects to be garbage-collected
ds = None
srs_p = None
srs_r = None
bbox_r = None
bbox_p = None
# Check if DEM table already exists
cur = connection.cursor()
sql = 'SELECT * FROM raster_columns WHERE r_table_name = \'mnt\''
cur.execute(sql)
dem_exists = cur.rowcount != 0
cur.close()
# Obtain replace mode
replace = options['replace']
# What to do with existing DEM (if any)
if dem_exists and replace:
# Drop table
cur = connection.cursor()
sql = 'DROP TABLE mnt'
cur.execute(sql)
cur.close()
elif dem_exists and not replace:
raise CommandError('DEM file exists, use --replace to overwrite')
self.stdout.write('Everything looks fine, we can start loading DEM\n')
# Unfortunately, PostGISRaster driver in GDAL does not have write mode
# so far. Therefore, we relay parameters to standard commands using
# subprocesses.
# Step 1: process raster (clip, project)
new_dem = tempfile.NamedTemporaryFile()
cmd = 'gdalwarp -t_srs EPSG:%d -te %f %f %f %f %s %s' % (settings.SRID,
settings.SPATIAL_EXTENT[0],
settings.SPATIAL_EXTENT[1],
settings.SPATIAL_EXTENT[2],
settings.SPATIAL_EXTENT[3],
dem_path,
new_dem.name)
try:
self.stdout.write('\n-- Relaying to gdalwarp ----------------\n')
self.stdout.write(cmd)
ret = call(cmd, shell=True)
if ret != 0:
raise Exception('gdalwarp failed with exit code %d' % ret)
except Exception as e:
new_dem.close()
msg = 'Caught %s: %s' % (e.__class__.__name__, e,)
raise CommandError(msg)
self.stdout.write('DEM successfully clipped/projected.\n')
# Step 2: Convert to PostGISRaster format
output = tempfile.NamedTemporaryFile() # SQL code for raster creation
cmd = 'raster2pgsql -c -C -I -M -t 100x100 %s mnt' % new_dem.name
try:
self.stdout.write('\n-- Relaying to raster2pgsql ------------\n')
self.stdout.write(cmd)
ret = call(cmd, stdout=output.file, shell=True)
if ret != 0:
raise Exception('raster2pgsql failed with exit code %d' % ret)
except Exception as e:
output.close()
msg = 'Caught %s: %s' % (e.__class__.__name__, e,)
raise CommandError(msg)
finally:
new_dem.close()
self.stdout.write('DEM successfully converted to SQL.\n')
# Step 3: Dump SQL code into database
self.stdout.write('\n-- Loading DEM into database -----------\n')
cur = connection.cursor()
output.file.seek(0)
for sql_line in output.file:
cur.execute(sql_line)
cur.close()
output.close()
self.stdout.write('DEM successfully loaded.\n')
return
|
the-stack_106_16266
|
#!/usr/bin/env python
# encoding: utf-8
"""
predict.py
Created by Shuailong on 2016-12-3.
Validate the correctness of the algorithm.
"""
from __future__ import print_function
from time import time
from keras.models import load_model
import os
from utils import true_accuracy
from utils import token2word
from dataset import get_data
from train import data_generator
from train import MODEL_FILE, MODEL_DIR
def main():
start_time = time()
print('\nGetting data...')
data = get_data()
X_test = data['X_test']
X_test_feats = data['X_test_feats']
y_test = data['y_test']
tag_index = data['tag_index']
tag_size = len(tag_index)
word_index = data['word_index']
index_word = {}
for word, index in word_index.items():
index_word[index] = word
index_tag = {}
for tag, index in tag_index.items():
index_tag[index] = tag
print('\nLoading model...')
model = load_model(os.path.join(MODEL_DIR, MODEL_FILE), custom_objects={'true_accuracy': true_accuracy})
print('\nPredicting...')
samples = 1 # only 1 work for now
prob = model.predict_generator(data_generator(X_test, X_test_feats, y_test, tag_size, shuffle=False),
val_samples=samples)
predict = prob.argmax(axis=-1)
for i in range(samples):
words = token2word(X_test[i], word_index)
gold_tags = token2word(y_test[i], tag_index)
tags = token2word(predict[i], tag_index)
print('\n--------- Sample {}----------'.format(i))
print('len(words): {} '.format(len(words),))
assert len(words) == len(gold_tags) and len(words) == len(tags)
print('Sentence:')
print(words)
print('Gold labeling:')
print(gold_tags)
print('Model labeling:')
print(tags)
seconds = time() - start_time
minutes = seconds / 60
print('[Finished in {} seconds ({} minutes)]'.format(str(round(seconds, 1)),
str(round(minutes, 1))))
if __name__ == '__main__':
main()
|
the-stack_106_16268
|
# Given the list motions=[1,1] which means the robot
# moves right and then right again, compute the posterior
# distribution if the robot first senses red, then moves
# right one, then senses green, then moves right again,
# starting with a uniform prior distribution.
p = [0.2, 0.2, 0.2, 0.2, 0.2]
world = ['green', 'red', 'red', 'green', 'green']
measurements = ['red', 'green']
motions = [1, 1]
pHit = 0.6
pMiss = 0.2
pExact = 0.8
pOvershoot = 0.1
pUndershoot = 0.1
def sense(p, Z):
q = []
for i in range(len(p)):
hit = (Z == world[i])
q.append(p[i] * (hit * pHit + (1-hit) * pMiss))
s = sum(q)
for i in range(len(q)):
q[i] = q[i] / s
return q
def move(p, U):
length = len(p)
q = length * [0]
for i in range(length):
value = p[(i-U) % length]
q[i] += value * pExact
q[(i - 1) % length] += value * pUndershoot
q[(i + 1) % length] += value * pOvershoot
return q
for i in range(len(motions)):
p = sense(p, measurements[i])
p = move(p, motions[i])
print(p)
|
the-stack_106_16270
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.wasb_delete_blob_operator import WasbDeleteBlobOperator
from tests.compat import mock
class TestWasbDeleteBlobOperator(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = WasbDeleteBlobOperator(
task_id='wasb_operator',
dag=self.dag,
**self._config
)
self.assertEqual(operator.container_name,
self._config['container_name'])
self.assertEqual(operator.blob_name, self._config['blob_name'])
self.assertEqual(operator.is_prefix, False)
self.assertEqual(operator.ignore_if_missing, False)
operator = WasbDeleteBlobOperator(
task_id='wasb_operator',
dag=self.dag,
is_prefix=True,
ignore_if_missing=True,
**self._config
)
self.assertEqual(operator.is_prefix, True)
self.assertEqual(operator.ignore_if_missing, True)
@mock.patch('airflow.contrib.operators.wasb_delete_blob_operator.WasbHook',
autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = WasbDeleteBlobOperator(
task_id='wasb_operator',
dag=self.dag,
is_prefix=True,
ignore_if_missing=True,
**self._config
)
operator.execute(None)
mock_instance.delete_file.assert_called_once_with(
'container', 'blob', True, True
)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_16272
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Eval independent train DQN on Atari environments.
Additional flags are available such as `--replay_buffer_capacity` and
`--n_step_update`.
"""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tf_agents.environments import suite_atari
from abps import baseline_runners
FLAGS = flags.FLAGS
# AtariPreprocessing runs 4 frames at a time, max-pooling over the last 2
# frames. We need to account for this when computing things like update
# intervals.
ATARI_FRAME_SKIP = 4
def get_run_args():
"""Builds a dict of run arguments from flags."""
run_args = {}
run_args['is_eval'] = FLAGS.is_eval
if FLAGS.n_step_update:
run_args['n_step_update'] = FLAGS.n_step_update
if FLAGS.enable_functions:
run_args['enable_functions'] = FLAGS.enable_functions
if FLAGS.dqn_type:
run_args['dqn_type'] = FLAGS.dqn_type
if FLAGS.learning_rate:
run_args['learning_rate'] = FLAGS.learning_rate
if FLAGS.hparam_path:
run_args['hparam_path'] = FLAGS.hparam_path
if FLAGS.eval_parallel_size:
run_args['eval_parallel_size'] = FLAGS.eval_parallel_size
if FLAGS.num_iterations:
run_args['num_iterations'] = FLAGS.num_iterations
# evaler specific args
if FLAGS.eval_episode_per_iteration:
run_args['eval_episode_per_iteration'] = FLAGS.eval_episode_per_iteration
if FLAGS.eval_interval_secs:
run_args['eval_interval_secs'] = FLAGS.eval_interval_secs
if FLAGS.eval_epsilon_greedy:
run_args['eval_epsilon_greedy'] = FLAGS.eval_epsilon_greedy
if FLAGS.ucb_coeff:
run_args['ucb_coeff'] = FLAGS.ucb_coeff
if FLAGS.num_worker:
run_args['num_worker'] = FLAGS.num_worker
if FLAGS.eval_agents:
run_args['eval_agents'] = FLAGS.eval_agents.split(',')
logging.info('found eval agents:%s', run_args['eval_agents'])
# if FLAGS.select_policy_way:
# run_args['select_policy_way'] = FLAGS.select_policy_way
return run_args
def main(_):
logging.set_verbosity(logging.INFO)
tf.enable_resource_variables()
if FLAGS.select_policy_way == 'independent':
runner = baseline_runners.EvalRunner(
root_dir=FLAGS.root_dir,
env_name=suite_atari.game(name=FLAGS.game_name),
**get_run_args())
runner.run()
if __name__ == '__main__':
flags.mark_flag_as_required('root_dir')
app.run(main)
|
the-stack_106_16273
|
import matplotlib.pyplot as plt
if __name__=="__main__":
with open('data.csv') as f:
raw=f.read().split("\n")
times,temps,freqs=[],[],[]
for line in raw:
if "," in line and not line.startswith("#"):
a,b,c=line.strip().split(",")
times.append(float(a)/60)
temps.append(float(b))
freqs.append(float(c))
if len(freqs)>10:
if abs(freqs[-1]-freqs[-5])>50:
freqs[-1]=freqs[-2]
plt.figure(figsize=(10,10))
ax=plt.subplot(211)
plt.grid()
plt.plot(times,temps)
plt.subplot(212,sharex=ax)
plt.grid()
plt.plot(times,freqs)
plt.figure(figsize=(10,10))
plt.plot(temps,freqs)
plt.show()
print("DONE")
|
the-stack_106_16276
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
from collections import deque
import networkx as nx
from mo.graph.graph import Node, create_edge
from mo.middle.pattern_match import apply_pattern
from mo.utils.graph import bfs_search, pseudo_topological_sort
def get_nodes_with_attributes(graph: nx.MultiDiGraph, **attrs: dict):
node_attrs = graph.nodes(data=True)
return [n for n, d in node_attrs if all(a in d.items() for a in attrs.items())]
def reverse_dfs(graph: nx.MultiDiGraph, node_name: str, update_func: callable, visited: set = None):
d = deque()
if visited is None:
visited = set()
visited.add(node_name)
d.appendleft(node_name)
while len(d) != 0:
cur_node = d.popleft()
update_func(graph, cur_node)
for in_node_name, _ in graph.in_edges(cur_node):
if in_node_name not in visited:
visited.add(in_node_name)
d.append(in_node_name)
def mark_input_nodes(graph: nx.MultiDiGraph, node_name: str, key: str, value):
for input, _ in graph.in_edges(node_name):
graph.node[input][key] = value
def mark_output_nodes(graph: nx.MultiDiGraph, node_name: str, key: str, value):
for output, _ in graph.out_edges(node_name):
graph.node[output][key] = value
def mark_output_reachable_nodes(graph: nx.MultiDiGraph):
"""
Mark nodes whether they are outputs reachable or not. The node is considered output reachable if it is connected to
one of the nodes that has attribute is_output=True.
"""
nx.set_node_attributes(graph, name='is_output_reachable', values=False)
outputs = get_nodes_with_attributes(graph, is_output=True)
log.debug('The following nodes are seeded as output reachable:\n{}'.format('\n'.join(sorted(map(str, outputs)))))
nx.set_node_attributes(graph, name='is_output_reachable', values={n: True for n in outputs})
for output_name in outputs:
reverse_dfs(graph, output_name,
lambda graph, node_name: mark_input_nodes(graph, node_name, 'is_output_reachable', True), set())
def mark_undead_nodes(graph: nx.MultiDiGraph, undead_types: list):
"""
Mark output nodes and nodes of the specific type as undead, meaning that they should survive the dead nodes
elimination phase. Then mark all children nodes of the undead nodes (except children of inputs) as undead.
:param graph: graph to operate on.
:param undead_types: list of node types that should be marked as undead.
:return: updated graph where each has attribute 'is_undead'.
"""
nx.set_node_attributes(graph, name='is_undead', values=False)
# mark output nodes as undead
outputs = get_nodes_with_attributes(graph, is_output=True)
nx.set_node_attributes(graph, name='is_undead', values={n: True for n in outputs})
# mark specifically defined with node type set of nodes
for type in undead_types:
node_of_specific_type = get_nodes_with_attributes(graph, type=type)
nx.set_node_attributes(graph, name='is_undead', values={n: True for n in node_of_specific_type})
undead_nodes = get_nodes_with_attributes(graph, is_undead=True)
# propagate 'undead' attribute to children nodes of undead nodes if the node produces constant value
for node_name in bfs_search(graph, undead_nodes):
if graph.node[node_name]['is_undead']:
for _, dst_node_name in graph.out_edges(node_name):
node_attrs = graph.node[dst_node_name]
if 'kind' in node_attrs and node_attrs['kind'] == 'data' and node_attrs['value'] is not None:
graph.node[dst_node_name]['is_undead'] = True
# mark input nodes as undead
inputs = get_nodes_with_attributes(graph, is_input=True)
nx.set_node_attributes(graph, name='is_undead', values={n: True for n in inputs})
def mark_const_producer_nodes(graph: nx.MultiDiGraph):
"""
Mark nodes that produce constant values.
:param graph: graph to operate on.
:return: .
"""
nx.set_node_attributes(graph, name='is_const_producer', values=True)
for n in pseudo_topological_sort(graph):
node = Node(graph, n)
for input, output, attrs in graph.in_edges(n, data=True):
if 'control_flow_edge' in attrs and attrs['control_flow_edge']:
graph.node[input]['is_const_producer'] = False
graph.node[output]['is_const_producer'] = False
if not node.has('value') or node.value is None:
for input, _ in graph.in_edges(n):
graph.node[input]['is_const_producer'] = False
def eliminate_dead_nodes(graph: nx.MultiDiGraph):
nodes_to_remove = set()
for node_name, node_attrs in graph.nodes(data=True):
if not node_attrs['is_output_reachable'] or (node_attrs['is_const_producer'] and not node_attrs['is_undead']):
nodes_to_remove.add(node_name)
log.debug('Removing the following dead nodes: {}'.format('\n'.join(sorted(map(str, nodes_to_remove)))))
graph.remove_nodes_from(nodes_to_remove)
def graph_clean_up(graph: nx.MultiDiGraph, undead_node_types: list = []):
mark_output_reachable_nodes(graph)
mark_undead_nodes(graph, undead_node_types)
mark_const_producer_nodes(graph)
eliminate_dead_nodes(graph)
def graph_clean_up_tf(graph: nx.MultiDiGraph):
graph_clean_up(graph, ['TFCustomSubgraphCall'])
def remove_identity_action(graph: nx.MultiDiGraph, matches: dict):
remove_op_node(graph, matches['identity'])
# TODO: unit tests
def merge_data_nodes(graph: nx.MultiDiGraph, survived: Node, removed: Node):
if survived.has_and_set('is_output'):
graph.node[removed.id].update({'is_output': True})
for u, v, d in list(graph.in_edges(removed.id, data=True)):
graph.add_edges_from([(u, survived.id, d)])
graph.remove_edge(u, v)
for u, v, d in list(graph.out_edges(removed.id, data=True)):
graph.add_edges_from([(survived.id, v, d)])
graph.remove_edge(u, v)
for attr in graph.node[removed.id]:
if not attr in ['name']:
# We need to save debug info from removed data node
if attr == 'fw_tensor_debug_info':
if not survived.has_valid(attr):
survived[attr] = []
for fw_tensor_debug_info in removed[attr]:
survived[attr].append(fw_tensor_debug_info)
else:
survived[attr] = removed[attr]
# TODO: unit tests
def remove_op_node(graph: nx.MultiDiGraph, identity: Node):
input = identity.in_node()
output = [v for _, v in graph.out_edges(identity.id)]
assert len(output) == 1
output = Node(graph, output[0])
graph.remove_edge(input.id, identity.id)
graph.remove_edge(identity.id, output.id)
merge_data_nodes(graph, output, input)
# we just have saved all output edges from 'input' by reconnecting them to 'output', now we can delete 'input'
log.debug('Removing op node: {}'.format(identity.id))
graph.remove_node(identity.id)
graph.remove_node(input.id)
def remove_op_nodes(graph: nx.MultiDiGraph, attrs: dict):
op_attrs = {'kind': 'op'}
op_attrs.update(attrs)
apply_pattern(
graph,
nodes=[('identity', op_attrs)],
edges=[],
action=remove_identity_action
)
def remove_edges_for_nodes(graph: nx.MultiDiGraph, node_attrs: dict, edge_attrs: dict):
for node in graph.nodes():
node = Node(graph, node)
if all([node.has(attr) and node[attr] == node_attrs[attr] for attr in node_attrs]):
nodes_edges = node.in_nodes_edges()
for port in nodes_edges:
src_node, edge = nodes_edges[port]
if all([attr in edge and edge[attr] == edge_attrs[attr] for attr in edge_attrs]):
graph.remove_edge(src_node.id, node.id)
def remove_useless_split_action(graph: nx.MultiDiGraph, matches: dict):
split_node = matches['split']
input = split_node.in_node(1)
output = split_node.out_node()
graph.remove_edge(input.id, split_node.id)
for u, v, d in list(graph.out_edges(output.id, data=True)):
graph.add_edges_from([(input.id, v, d)])
graph.remove_edge(u, v)
def remove_useless_split(graph: nx.MultiDiGraph):
apply_pattern(
graph,
nodes=[('split', {'kind': 'op', 'op': 'Split', 'num_split': 1})],
edges=[],
action=remove_useless_split_action
)
def remove_node_from_graph(graph: nx.MultiDiGraph, previous_node: Node, removing_node: Node):
if len(removing_node.out_nodes()) > 0:
last_node_out = removing_node.out_node(0)
edge_data = graph.get_edge_data(removing_node.id, last_node_out.id)
out_port = edge_data[0]['out']
in_port = edge_data[0]['in']
graph.remove_edge(previous_node.id, removing_node.id)
graph.remove_edge(removing_node.id, last_node_out.id)
create_edge(previous_node, last_node_out, out_port=out_port, in_port=in_port)
graph.remove_node(removing_node.id)
|
the-stack_106_16279
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from threading import Thread
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from traits.api import Instance, Int, Property, String, Bool
from traitsui.api import Controller, UItem, TabularEditor, VGroup, UReadonly
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.tabular_adapter import TabularAdapter
from pychron.core.helpers.formatting import floatfmt
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.envisage.resources import icon
from pychron.pychron_constants import LIGHT_GREEN, LIGHT_RED, LIGHT_YELLOW
from pychron.startup_test.tester import TestResult
COLOR_MAP = {'Passed': LIGHT_GREEN,
'Skipped': 'lightblue',
'Failed': LIGHT_RED,
'Invalid': LIGHT_YELLOW}
ICON_MAP = {'Passed': 'green_ball',
'Skipped': 'gray_ball',
'Failed': 'red_ball',
'Invalid': 'yellow_ball'}
class ResultsAdapter(TabularAdapter):
columns = [('', 'result_image'),
('Plugin', 'plugin'),
('Name', 'name'),
('Duration (s)', 'duration'),
('Result', 'result')]
plugin_width = Int(200)
name_width = Int(190)
duration_width = Int(80)
duration_text = Property
result_image_image = Property
result_image_text = Property
def _get_result_image_text(self):
return ''
def _get_result_image_image(self):
return icon(ICON_MAP[self.item.result])
def get_bg_color(self, obj, trait, row, column=0):
return COLOR_MAP[self.item.result]
def _get_duration_text(self):
return floatfmt(self.item.duration) # '{:0.5f}'.format(self.item.duration)
class ResultsView(Controller):
model = Instance('pychron.startup_test.tester.StartupTester')
auto_close = 5
selected = Instance(TestResult, ())
base_help_str = 'Select any row to cancel auto close. Auto close in {}'
help_str = String
_auto_closed = False
_cancel_auto_close = False
can_cancel = Bool(True)
def _selected_changed(self, new):
self._cancel_auto_close = bool(new)
def _timer_func(self):
delay = self.auto_close
st = time.time()
while 1:
time.sleep(0.25)
ct = time.time() - st
if ct > delay or self._cancel_auto_close:
break
self.help_str = self.base_help_str.format(delay - int(ct))
if self._cancel_auto_close:
self.help_str = 'Auto close canceled'
else:
invoke_in_main_thread(self._do_auto_close)
def init(self, info):
if self.auto_close and self.model.all_passed:
t = Thread(target=self._timer_func)
t.start()
# do_after(self.auto_close * 1000, self._do_auto_close)
else:
self.help_str = ''
def closed(self, info, is_ok):
import sys
if not self._auto_closed and not is_ok:
if confirm(info.ui.control, 'Are you sure you want to Quit?') == YES:
self.model.info('User quit because of Startup fail')
sys.exit()
else:
if not self.model.ok_close():
if confirm(info.ui.control, 'Pychron is not communicating with a Spectrometer.\n'
'Are you sure you want to enter '
'Spectrometer Simulation mode?') != YES:
sys.exit()
def _do_auto_close(self):
if not self._cancel_auto_close:
self._auto_closed = True
try:
self.info.ui.dispose()
except AttributeError:
pass
def traits_view(self):
if self.can_cancel:
buttons = ['OK', 'Cancel']
else:
buttons = ['OK']
v = okcancel_view(VGroup(UItem('results', editor=TabularEditor(adapter=ResultsAdapter(),
editable=False,
selected='controller.selected')),
VGroup(UReadonly('controller.selected.description'),
show_border=True,
label='Description'),
VGroup(UReadonly('controller.selected.error'),
show_border=True,
visible_when='controller.selected.error',
label='Error'),
VGroup(UReadonly('controller.help_str'),
show_border=True,
visible_when='controller.help_str')),
title='Test Results',
buttons=buttons,
height=500,
width=650)
return v
# ============= EOF =============================================
|
the-stack_106_16280
|
import textwrap
import tkinter as tk
from tkinter import font as tk_font
from tkinter import ttk
from thonny import get_workbench
from thonny.codeview import CodeView
from thonny.config_ui import ConfigurationPage
from thonny.ui_utils import create_string_var
class ThemeAndFontConfigurationPage(ConfigurationPage):
def __init__(self, master):
self._original_family = get_workbench().get_option("view.editor_font_family")
self._original_size = get_workbench().get_option("view.editor_font_size")
self._original_ui_theme = get_workbench().get_option("view.ui_theme")
self._original_syntax_theme = get_workbench().get_option("view.syntax_theme")
ConfigurationPage.__init__(self, master)
self._family_variable = create_string_var(
self._original_family, modification_listener=self._update_appearance
)
self._size_variable = create_string_var(
self._original_size, modification_listener=self._update_appearance
)
self._ui_theme_variable = create_string_var(
self._original_ui_theme, modification_listener=self._update_appearance
)
self._syntax_theme_variable = create_string_var(
self._original_syntax_theme, modification_listener=self._update_appearance
)
ttk.Label(self, text="UI theme").grid(row=1, column=0, sticky="w", pady=(10, 0))
self._ui_theme_combo = ttk.Combobox(
self,
exportselection=False,
textvariable=self._ui_theme_variable,
state="readonly",
height=15,
values=get_workbench().get_usable_ui_theme_names(),
)
self._ui_theme_combo.grid(row=2, column=0, sticky="nsew", padx=(0, 10))
ttk.Label(self, text="Syntax theme").grid(
row=1, column=1, sticky="w", pady=(10, 0)
)
self._syntax_theme_combo = ttk.Combobox(
self,
exportselection=False,
textvariable=self._syntax_theme_variable,
state="readonly",
height=15,
values=get_workbench().get_syntax_theme_names(),
)
self._syntax_theme_combo.grid(row=2, column=1, sticky="nsew", padx=(0, 10))
ttk.Label(self, text="Editor font").grid(
row=1, column=2, sticky="w", pady=(10, 0)
)
self._family_combo = ttk.Combobox(
self,
exportselection=False,
state="readonly",
height=15,
textvariable=self._family_variable,
values=self._get_families_to_show(),
)
self._family_combo.grid(row=2, column=2, sticky=tk.NSEW, padx=(0, 10))
ttk.Label(self, text="Size").grid(row=1, column=3, sticky="w", pady=(10, 0))
self._size_combo = ttk.Combobox(
self,
width=4,
exportselection=False,
textvariable=self._size_variable,
state="readonly",
height=15,
values=[str(x) for x in range(3, 73)],
)
self._size_combo.grid(row=2, column=3, sticky="nsew")
ttk.Label(self, text="Editor preview").grid(
row=3, column=0, sticky="w", pady=(10, 0), columnspan=4
)
self._preview_codeview = CodeView(
self,
height=10,
font="EditorFont",
# relief="sunken",
# borderwidth=1,
)
self._preview_codeview.set_content(
textwrap.dedent(
"""
def foo(bar):
if bar is None: # This is a comment
print("The answer is", 33)
unclosed_string = "blah, blah
"""
).strip()
)
self._preview_codeview.grid(
row=4, column=0, columnspan=4, sticky=tk.NSEW, pady=(0, 5)
)
(
ttk.Label(
self, text="NB! Some style elements change only after restarting Thonny"
).grid(row=5, column=0, columnspan=4, sticky="w", pady=(0, 5))
)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.rowconfigure(4, weight=1)
def apply(self):
# don't do anything, as preview already did the thing
return
def cancel(self):
if (
getattr(self._family_variable, "modified")
or getattr(self._size_variable, "modified")
or getattr(self._ui_theme_variable, "modified")
or getattr(self._syntax_theme_variable, "modified")
):
get_workbench().set_option("view.ui_theme", self._original_ui_theme)
get_workbench().set_option("view.syntax_theme", self._original_syntax_theme)
get_workbench().set_option("view.editor_font_size", self._original_size)
get_workbench().set_option("view.editor_font_family", self._original_family)
get_workbench().reload_themes()
get_workbench().update_fonts()
def _update_appearance(self):
get_workbench().set_option("view.ui_theme", self._ui_theme_variable.get())
get_workbench().set_option(
"view.syntax_theme", self._syntax_theme_variable.get()
)
get_workbench().set_option(
"view.editor_font_size", int(self._size_variable.get())
)
get_workbench().set_option(
"view.editor_font_family", self._family_variable.get()
)
get_workbench().reload_themes()
get_workbench().update_fonts()
def _get_families_to_show(self):
# In Linux, families may contain duplicates (actually different fonts get same names)
return sorted(set(filter(lambda name: name[0].isalpha(), tk_font.families())))
def load_plugin() -> None:
get_workbench().add_configuration_page(
"Theme & Font", ThemeAndFontConfigurationPage
)
|
the-stack_106_16281
|
from typing import List, Dict, Tuple
import ROOT as r
from tqdm import tqdm
from . import calibrationUtils as util
import os
class CalibrationData:
def __init__(self, image_dir_path: str, MPPC_high_voltage: str) -> None:
self._image_dir_path: str = image_dir_path
self._HV: str = MPPC_high_voltage
self._hists_VadcHigh: List[r.TH1D] = [None for _ in range(64)]
self._fitted_adc_means: List[List[float]] = [None for _ in range(64)]
self._fitted_adc_mean_errors: List[List[float]] = [None for _ in range(64)]
def set_hist(self, detector_ch: int, cal_root_file_path: str, cal_ch: int) -> None:
hist = util.getHistMPPC(cal_root_file_path, cal_ch)
hist.SetTitle("{0} [{1}ch];ADC;Events".format(cal_root_file_path.split('/')[-1], detector_ch))
self._hists_VadcHigh[detector_ch] = hist
def fit_multi_gaus(
self,
ch,
peak_search_range=(0, 1500),
fitting_range=(0, 1500),
peak_search_sigma=10
) -> None:
hist = self._hists_VadcHigh[ch]
# determine hist showing range
xmin = 0
xmax = 4096
while self._hists_VadcHigh[ch].GetBinContent(xmin) == 0:
xmin += 1
while self._hists_VadcHigh[ch].GetBinContent(xmax) == 0:
xmax -= 1
# fit
ret_adc_means, ret_adc_mean_errors = util.getFittedParams(
hist,
peak_search_range,
fitting_range,
(xmin, xmax),
peak_search_sigma
)
# set to member variable & save hist as image
self._fitted_adc_means[ch] = ret_adc_means
self._fitted_adc_mean_errors[ch] = ret_adc_mean_errors
self.save_hist_as_png(ch)
def save_hist_as_png(self, ch):
canvas = r.TCanvas()
self._hists_VadcHigh[ch].Draw()
canvas.SaveAs("{0}/{1}/hist.png".format(self._image_dir_path, ch))
class CalibrationDatas:
def __init__(self) -> None:
self._calbDatas: Dict[str, CalibrationData] = {}
self._HVs: List[str] = []
self._calb_line_TGraphs: Dict[str, List[r.TGraphErrors]] = {}
self._calb_line_TF1s: Dict[str, List[r.TF1]] = {}
self._calb_line_TCanvases: Dict[str, List[r.TCanvas]] = {}
self._HV_one_photon_TGraphs: List[r.TGraphErrors] = [None for _ in range(64)]
self._HV_one_photon_TF1s: List[r.TF1] = [None for _ in range(64)]
self._pedestal_data_path: str = None
self._pedestal_adc_means: List[float] = [None for _ in range(64)]
self._pedestal_adc_mean_errors: List[float] = [None for _ in range(64)]
self._initial_photon_number_s: Dict[str, List[int]] = {}
def set_calb_data(self, img_dir_path: str, HV: str) -> None:
self._calbDatas[HV] = CalibrationData(img_dir_path, HV)
self._HVs.append(HV)
self._calb_line_TCanvases[HV] = [None for _ in range(64)]
self._calb_line_TGraphs[HV] = [None for _ in range(64)]
self._calb_line_TF1s[HV] = [None for _ in range(64)]
self._initial_photon_number_s[HV] = [None for _ in range(64)]
self.make_dirs()
def get_calb_data(self, HV: str) -> CalibrationData:
return self._calbDatas[HV]
def fit_adc_nphoton_line(self, HV, ch, initial_photon_num):
# init graph
n_points = len(self._calbDatas[HV]._fitted_adc_means[ch])
photon_nums = [initial_photon_num + i for i in range(n_points)]
photon_num_errors = [0 for _ in range(n_points)]
adc_means = self._calbDatas[HV]._fitted_adc_means[ch]
adc_mean_errors = self._calbDatas[HV]._fitted_adc_mean_errors[ch]
graph = util.TPGraphErrors(
n_points,
photon_nums,
adc_means,
photon_num_errors,
adc_mean_errors
)
graph.SetTitle("{}ch;Photon Number;ADC Value".format(ch))
graph.SetMarkerStyle(8)
graph.SetMarkerSize(1)
# init liner function for fitting and fit
f_fit = r.TF1("f_liner", "[0]*x + [1]", 0, 20)
graph.Fit(f_fit, "R")
# init axis for Tgraph
photon_num_range = (0, photon_nums[-1] + 1)
adc_range = tuple(map(f_fit.Eval, photon_num_range))
axis = r.TH2D(
"axis", "{}ch;Photon Number;ADC Value".format(ch),
0, *photon_num_range,
0, *adc_range
)
axis.SetStats(0)
# draw to canvas
canvas = r.TCanvas()
axis.Draw("AXIS")
graph.Draw("P SAME")
# set to member function & save canvas as png
self._calb_line_TCanvases[HV][ch] = canvas
self._calb_line_TGraphs[HV][ch] = graph
self._calb_line_TF1s[HV][ch] = f_fit
self.save_calb_line_TCanvas(HV, ch)
def fit_all_adc_nphoton_line(self):
for HV in self._HVs:
for ch in range(64):
initial_photon_num = self._initial_photon_number_s[HV][ch]
self.fit_adc_nphoton_line(HV, ch, initial_photon_num)
def save_calb_line_TCanvas(self, HV, ch):
save_str = "{0}/{1}/graph_photon_adc.png".format(
self._calbDatas[HV]._image_dir_path,
ch
)
self._calb_line_TCanvases[HV][ch].SaveAs(save_str)
def fit_HV_one_photon(self, ch):
# fetch graph attr
n_points = len(self._HVs)
HVs = []
HV_errors = []
one_photon_adc_widthes = []
one_photon_adc_width_errors = []
for HV in self._HVs:
HVs.append(float(HV))
one_photon_adc_widthes.append(
self._calb_line_TF1s[HV][ch].GetParameter(0)
)
HV_errors.append(0)
one_photon_adc_width_errors.append(
self._calb_line_TF1s[HV][ch].GetParError(0)
)
# init graph
graph = util.TPGraphErrors(
n_points,
HVs,
one_photon_adc_widthes,
HV_errors,
one_photon_adc_width_errors
)
graph.SetTitle("{}ch;MPPC HV [V];ADC/One Photon".format(ch))
graph.SetMarkerStyle(8)
graph.SetMarkerSize(1)
# init liner function for fitting and fit
f_fit = r.TF1("f_liner", "[0]*x + [1]", 0, 60)
graph.Fit(f_fit, "R")
# set to class member variable & save graph as png
self._HV_one_photon_TGraphs[ch] = graph
self._HV_one_photon_TF1s[ch] = f_fit
self.save_HV_one_photon_TGraph(ch)
def save_HV_one_photon_TGraph(self, ch):
save_str = "{0}/{1}/HV_one_photon_TGraph.png"
canvas = r.TCanvas()
self._HV_one_photon_TGraphs[ch].Draw("AP")
for HV in self._HVs:
canvas.SaveAs(save_str.format(self._calbDatas[HV]._image_dir_path, ch))
def make_dirs(self):
for HV in self._HVs:
os.makedirs(self._calbDatas[HV]._image_dir_path, exist_ok=True)
for i in range(64):
os.makedirs("{0}/{1}".format(self._calbDatas[HV]._image_dir_path, i), exist_ok=True)
def set_pedestal_data(self, pedestal_data_path):
self._pedestal_data_path = pedestal_data_path
hists = [util.getHistMPPC(self._pedestal_data_path, ch) for ch in range(64)]
funcs = [r.TF1("", "gaus", 0, 4096) for _ in range(64)]
for ch in range(64):
hists[ch].Fit(funcs[ch], "R")
self._pedestal_adc_means[ch] = funcs[ch].GetParameter(1)
self._pedestal_adc_mean_errors[ch] = funcs[ch].GetParError(1)
def determine_initial_photon_number(self, HV, ch):
fitted_means = self._calbDatas[HV]._fitted_adc_means[ch]
pedestal_mean = self._pedestal_adc_means[ch]
diff_ped_to_ini = fitted_means[0] - pedestal_mean
aprox_width = fitted_means[1] - fitted_means[0]
initial_photon_number = round(diff_ped_to_ini / aprox_width)
self._initial_photon_number_s[HV][ch] = initial_photon_number
def determine_all_initial_photon_number(self):
for HV in self._HVs:
for ch in range(64):
self.determine_initial_photon_number(HV, ch)
def print_fitted_pedestal(self):
for HV in self._HVs:
print("========== {}V ==========".format(HV))
for ch in range(64):
print(self._calb_line_TF1s[HV][ch].Eval(0))
def get_HV_from_one_photon(self, ch, one_photon_width) -> float:
a = self._HV_one_photon_TF1s[ch].GetParameter(0)
b = self._HV_one_photon_TF1s[ch].GetParameter(1)
return (one_photon_width - b) / a
def make_yml_InputDAC(self, target_width) -> None:
HV_target_s = [self.get_HV_from_one_photon(ch, target_width) for ch in range(64)]
HV_ref = HV_target_s[0]
HV_diff_s = [HV_ref - HV for HV in HV_target_s]
DAC_bit_s = [256 + 128 + int(HV_diff / (4.5/256)) for HV_diff in HV_diff_s]
out_str = "# setHV {}\n".format(HV_ref)
out_str += "---\n"
out_str += "EASIROC1:\n"
out_str += " Input 8-bit DAC:\n"
for ch in range(0, 32):
out_str += " - {}\n".format(DAC_bit_s[ch])
out_str += "EASIROC2:\n"
out_str += " Input 8-bit DAC:\n"
for ch in range(32, 64):
out_str += " - {}\n".format(DAC_bit_s[ch])
with open("InputDAC.yml", 'w') as f:
f.write(out_str)
def set_InputDAC_mesurement_data(self, d: Dict[float, List[List[List[float]]]]) -> None:
"""
d[setHV][ch][0] = [DAC_value (256-511)]\n
d[setHV][ch][1] = [DAC_voltage]
"""
self._InputDAC_mesurement_data = d
os.makedirs("InputDAC_fit", exist_ok=True)
def set_setHV_to_realHV(self, d: Dict[float, float]) -> None:
self._setHV_to_realHV = d
def fit_InputDAC_vaule_voltage_line(
self,
setHV: float,
ch: int,
fit_range: Tuple[float] = (258, 350)
) -> None:
n_points = len(self._InputDAC_mesurement_data[setHV][ch][0])
MPPC_HVs = [self._setHV_to_realHV[setHV] for _ in range(n_points)]
truth_HVs = [
MPPC_HV - DAC_V
for MPPC_HV, DAC_V in zip(MPPC_HVs, self._InputDAC_mesurement_data[setHV][ch][1])
]
g = util.TPGraphErrors(
n_points,
self._InputDAC_mesurement_data[setHV][ch][0],
truth_HVs,
[0 for _ in range(n_points)],
[0 for _ in range(n_points)]
)
f = r.TF1(
"InputDAC_vaule_voltage_{}_{}".format(setHV, ch),
"[0]*x+[1]",
fit_range[0],
fit_range[1]
)
c = r.TCanvas()
g.Fit(f, "R")
g.Draw("AP")
c.SaveAs("InputDAC_fit/setHV{0}_ch{1}.png".format(setHV, ch))
|
the-stack_106_16282
|
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess # nosec: B404, we deliberately use this module
import shlex
from wca.metrics import MetricName, Measurements
log = logging.getLogger(__name__)
# returns tuple with potential read and write bandwidth in GB/s
# all values are theoretical and might deviate from real
# arguments: power in Watts, number of NVDIMMs in interleave set, size in GB
def _calculate_bandwidth(power, count, size):
# bandwidth per NVDIMM type: 10W, 12W, 15W, 18W
# zeroes are where there is no data available
power_set = (10, 12, 15, 18)
read_sdp = (2.4, 4.3, 6.8, 0)
write_sdp = (0.88, 1.48, 1.85, 0)
read_ddp = (0, 4.1, 6.8, 8.3)
write_ddp = (0, 1.45, 2.3, 3.0)
read_qdp = (0, 2.65, 5.3, 7.8)
write_qdp = (0, 1.02, 1.89, 2.68)
use_power_index = 0
for index, it_power in enumerate(power_set):
if int(power) >= int(it_power):
use_power_index = index
# used simplified sizes
if size < 200:
read = read_sdp[use_power_index]
write = write_sdp[use_power_index]
elif 200 <= size < 500:
read = read_ddp[use_power_index]
write = write_ddp[use_power_index]
else:
read = read_qdp[use_power_index]
write = write_qdp[use_power_index]
read = round(count * read, 2)
write = round(count * write, 2)
return read, write
def _get_ipmctl():
"""Execute ipmctl to get platform information"""
try:
# nosec: B603. We deliberately use 'subprocess'. There is a permanent input.
ipmctl_region = subprocess.check_output( # nosec
shlex.split('ipmctl show -a -u B -region')).decode("utf-8")
# typical output. Executing without root yields empty result as there were no pmem modules
# ---ISetID=0x5876eeb8014a2444---
# SocketID=0x0000
# PersistentMemoryType=AppDirect
# Capacity=541165879296 B
# FreeCapacity=541165879296 B
# HealthState=Healthy
# DimmID=0x0001, 0x0101
# ---ISetID=0x4e66eeb83e4c2444---
# SocketID=0x0001
# PersistentMemoryType=AppDirect
# Capacity=541165879296 B
# FreeCapacity=541165879296 B
# HealthState=Healthy
# DimmID=0x1001, 0x1101
# nosec: B603. We deliberately use 'subprocess'. There is a permanent input.
ipmctl_dimm = subprocess.check_output( # nosec
shlex.split('ipmctl show -u B -d '
'AvgPowerBudget,Capacity,'
'SocketID -dimm')).decode("utf-8")
# typical output. Executing without root yields empty result as there were no pmem modules
# ---DimmID=0x0001---
# Capacity=271070789632 B
# SocketID=0x0000
# AvgPowerBudget=15000 mW
# ---DimmID=0x0101---
# Capacity=271070789632 B
# SocketID=0x0000
# AvgPowerBudget=15000 mW
except FileNotFoundError:
log.warning('ipmctl unavailable, cannot read memory mode size')
return None, None
except subprocess.CalledProcessError:
log.warning('ipmctl unavailable, cannot read memory mode size')
return None, None
return ipmctl_region, ipmctl_dimm
def _get_ipmctl_dimm_info(ipmctl_dimm):
"""Parse information from ipmctl dimm output"""
socket_nvdimms = dict()
avg_power_per_nvdimm, capacity_per_nvdimm = None, None
for line in ipmctl_dimm.split():
if line.startswith('Capacity'):
# values should be the same for each nvdimm
capacity_per_nvdimm = line.split('=')[1]
elif line.startswith('AvgPowerBudget'):
# values should be the same for each nvdimm
avg_power_per_nvdimm = int(line.split('=')[1]) / 1000
elif line.startswith('SocketID'):
socket_string = line.split('=')[1]
if socket_string in socket_nvdimms:
socket_nvdimms[socket_string] += 1
else:
socket_nvdimms[socket_string] = 1
return avg_power_per_nvdimm, int(capacity_per_nvdimm), socket_nvdimms
def _get_ipmctl_region_info(ipmctl_region):
"""Parse information from ipmctl region output"""
regions = dict()
iset_id = ''
for line in ipmctl_region.replace(' ', '').split('\n'):
if line.startswith('---ISetID'):
iset_id = line.split('=')[1].replace('-', '')
regions[iset_id] = {}
elif line.startswith(SOCKET):
regions[iset_id].update({SOCKET: line.split('=')[1]})
elif line.startswith(CAPACITY):
regions[iset_id].update(
{CAPACITY: line.split('=')[1].replace(' B', '')})
elif line.startswith(DIMM):
regions[iset_id].update({DIMM: line.split('=')[1].split(', ')})
return regions
SOCKET = 'SocketID'
CAPACITY = 'Capacity'
DIMM = 'DimmID'
def get_bandwidth() -> Measurements:
ipmctl_region, ipmctl_dimm = _get_ipmctl()
if ipmctl_region is None and ipmctl_dimm is None:
return {}
measurements = {MetricName.PLATFORM_NVDIMM_READ_BANDWIDTH_BYTES_PER_SECOND: {},
MetricName.PLATFORM_NVDIMM_WRITE_BANDWIDTH_BYTES_PER_SECOND: {}}
avg_power_per_nvdimm, capacity_per_nvdimm, socket_nvdimms = _get_ipmctl_dimm_info(ipmctl_dimm)
regions = _get_ipmctl_region_info(ipmctl_region)
GB = 1e9
capacity_per_nvdimm_in_gigabytes = capacity_per_nvdimm / GB
def socket_to_label(socket):
"""
Convert socket representation from hex to decimal.
example: '0x003' -> '3'
"""
return str(int(socket, 16))
for region in regions:
nvdimm_count = len(regions[region][DIMM])
rwt = _calculate_bandwidth(avg_power_per_nvdimm, nvdimm_count,
capacity_per_nvdimm_in_gigabytes)
socket_label = socket_to_label(regions[region][SOCKET])
measurements[MetricName.PLATFORM_NVDIMM_READ_BANDWIDTH_BYTES_PER_SECOND].update(
{socket_label: rwt[0] * GB})
measurements[MetricName.PLATFORM_NVDIMM_WRITE_BANDWIDTH_BYTES_PER_SECOND].update(
{socket_label: rwt[1] * GB})
if not regions:
for socket in socket_nvdimms:
rwt = _calculate_bandwidth(avg_power_per_nvdimm,
socket_nvdimms[socket],
capacity_per_nvdimm_in_gigabytes)
socket_label = socket_to_label(socket)
measurements[MetricName.PLATFORM_NVDIMM_READ_BANDWIDTH_BYTES_PER_SECOND].update(
{socket_label: rwt[0] * GB})
measurements[MetricName.PLATFORM_NVDIMM_WRITE_BANDWIDTH_BYTES_PER_SECOND].update(
{socket_label: rwt[1] * GB})
measurements[MetricName.PLATFORM_CAPACITY_PER_NVDIMM_BYTES] = capacity_per_nvdimm
measurements[MetricName.PLATFORM_AVG_POWER_PER_NVDIMM_WATTS] = avg_power_per_nvdimm
return measurements
|
the-stack_106_16283
|
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import synapse.metrics
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
from synapse.types import JsonDict, UserID
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class UserDirectoryHandler(StateDeltasHandler):
"""Handles queries and updates for the user_directory.
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
When a local user searches the user_directory, we report two kinds of users:
- users this server can see are joined to a world_readable or publicly
joinable room, and
- users belonging to a private room shared by that local user.
The two cases are tracked separately in the `users_in_public_rooms` and
`users_who_share_private_rooms` tables. Both kinds of users have their
username and avatar tracked in a `user_directory` table.
This handler has three responsibilities:
1. Forwarding requests to `/user_directory/search` to the UserDirectoryStore.
2. Providing hooks for the application to call when local users are added,
removed, or have their profile changed.
3. Listening for room state changes that indicate remote users have
joined or left a room, or that their profile has changed.
"""
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.store = hs.get_datastores().main
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.update_user_directory = hs.config.server.update_user_directory
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
self.spam_checker = hs.get_spam_checker()
self.auth = hs.get_auth()
# The current position in the current_state_delta stream
self.pos: Optional[int] = None
# Guard to ensure we only process deltas one at a time
self._is_processing = False
if self.update_user_directory:
self.notifier.add_replication_callback(self.notify_new_event)
# We kick this off so that we don't have to wait for a change before
# we start populating the user directory
self.clock.call_later(0, self.notify_new_event)
async def search_users(
self, user_id: str, search_term: str, limit: int
) -> JsonDict:
"""Searches for users in directory
Returns:
dict of the form::
{
"limited": <bool>, # whether there were more results or not
"results": [ # Ordered by best match first
{
"user_id": <user_id>,
"display_name": <display_name>,
"avatar_url": <avatar_url>
}
]
}
"""
results = await self.store.search_user_dir(user_id, search_term, limit)
# Remove any spammy users from the results.
non_spammy_users = []
for user in results["results"]:
if not await self.spam_checker.check_username_for_spam(user):
non_spammy_users.append(user)
results["results"] = non_spammy_users
filtered_users = []
is_vip = await self.auth.is_vip(UserID.from_string(user_id))
if not is_vip:
logger.warning(results)
for user in results["results"]:
if not await self.auth.is_vip(UserID.from_string(user["user_id"])):
filtered_users.append(user)
results["results"] = filtered_users
return results
def notify_new_event(self) -> None:
"""Called when there may be more deltas to process"""
if not self.update_user_directory:
return
if self._is_processing:
return
async def process() -> None:
try:
await self._unsafe_process()
finally:
self._is_processing = False
self._is_processing = True
run_as_background_process("user_directory.notify_new_event", process)
async def handle_local_profile_change(
self, user_id: str, profile: ProfileInfo
) -> None:
"""Called to update index of our local user profiles when they change
irrespective of any rooms the user may be in.
"""
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
if await self.store.should_include_local_user_in_dir(user_id):
await self.store.update_profile_in_user_dir(
user_id, profile.display_name, profile.avatar_url
)
async def handle_local_user_deactivated(self, user_id: str) -> None:
"""Called when a user ID is deactivated"""
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
await self.store.remove_from_user_dir(user_id)
async def _unsafe_process(self) -> None:
# If self.pos is None then means we haven't fetched it from DB
if self.pos is None:
self.pos = await self.store.get_user_directory_stream_pos()
# If still None then the initial background update hasn't happened yet.
if self.pos is None:
return None
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
if self.pos > room_max_stream_ordering:
# apparently, we've processed more events than exist in the database!
# this can happen if events are removed with history purge or similar.
logger.warning(
"Event stream ordering appears to have gone backwards (%i -> %i): "
"rewinding user directory processor",
self.pos,
room_max_stream_ordering,
)
self.pos = room_max_stream_ordering
# Loop round handling deltas until we're up to date
while True:
with Measure(self.clock, "user_dir_delta"):
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
if self.pos == room_max_stream_ordering:
return
logger.debug(
"Processing user stats %s->%s", self.pos, room_max_stream_ordering
)
max_pos, deltas = await self.store.get_current_state_deltas(
self.pos, room_max_stream_ordering
)
logger.debug("Handling %d state deltas", len(deltas))
await self._handle_deltas(deltas)
self.pos = max_pos
# Expose current event processing position to prometheus
synapse.metrics.event_processing_positions.labels("user_dir").set(
max_pos
)
await self.store.update_user_directory_stream_pos(max_pos)
async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None:
"""Called with the state deltas to process"""
for delta in deltas:
typ = delta["type"]
state_key = delta["state_key"]
room_id = delta["room_id"]
event_id = delta["event_id"]
prev_event_id = delta["prev_event_id"]
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
# For join rule and visibility changes we need to check if the room
# may have become public or not and add/remove the users in said room
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
await self._handle_room_publicity_change(
room_id, prev_event_id, event_id, typ
)
elif typ == EventTypes.Member:
await self._handle_room_membership_event(
room_id,
prev_event_id,
event_id,
state_key,
)
else:
logger.debug("Ignoring irrelevant type: %r", typ)
async def _handle_room_publicity_change(
self,
room_id: str,
prev_event_id: Optional[str],
event_id: Optional[str],
typ: str,
) -> None:
"""Handle a room having potentially changed from/to world_readable/publicly
joinable.
Args:
room_id: The ID of the room which changed.
prev_event_id: The previous event before the state change
event_id: The new event after the state change
typ: Type of the event
"""
logger.debug("Handling change for %s: %s", typ, room_id)
if typ == EventTypes.RoomHistoryVisibility:
publicness = await self._get_key_change(
prev_event_id,
event_id,
key_name="history_visibility",
public_value=HistoryVisibility.WORLD_READABLE,
)
elif typ == EventTypes.JoinRules:
publicness = await self._get_key_change(
prev_event_id,
event_id,
key_name="join_rule",
public_value=JoinRules.PUBLIC,
)
else:
raise Exception("Invalid event type")
if publicness is MatchChange.no_change:
logger.debug("No change")
return
# There's been a change to or from being world readable.
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
room_id
)
logger.debug("Publicness change: %r, is_public: %r", publicness, is_public)
if publicness is MatchChange.now_true and not is_public:
# If we became world readable but room isn't currently public then
# we ignore the change
return
elif publicness is MatchChange.now_false and is_public:
# If we stopped being world readable but are still public,
# ignore the change
return
users_in_room = await self.store.get_users_in_room(room_id)
# Remove every user from the sharing tables for that room.
for user_id in users_in_room:
await self.store.remove_user_who_share_room(user_id, room_id)
# Then, re-add all remote users and some local users to the tables.
# NOTE: this is not the most efficient method, as _track_user_joined_room sets
# up local_user -> other_user and other_user_whos_local -> local_user,
# which when ran over an entire room, will result in the same values
# being added multiple times. The batching upserts shouldn't make this
# too bad, though.
for user_id in users_in_room:
if not self.is_mine_id(
user_id
) or await self.store.should_include_local_user_in_dir(user_id):
await self._track_user_joined_room(room_id, user_id)
async def _handle_room_membership_event(
self,
room_id: str,
prev_event_id: str,
event_id: str,
state_key: str,
) -> None:
"""Process a single room membershp event.
We have to do two things:
1. Update the room-sharing tables.
This applies to remote users and non-excluded local users.
2. Update the user_directory and user_directory_search tables.
This applies to remote users only, because we only become aware of
the (and any profile changes) by listening to these events.
The rest of the application knows exactly when local users are
created or their profile changed---it will directly call methods
on this class.
"""
joined = await self._get_key_change(
prev_event_id,
event_id,
key_name="membership",
public_value=Membership.JOIN,
)
# Both cases ignore excluded local users, so start by discarding them.
is_remote = not self.is_mine_id(state_key)
if not is_remote and not await self.store.should_include_local_user_in_dir(
state_key
):
return
if joined is MatchChange.now_false:
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
is_in_room = await self.store.is_host_joined(room_id, self.server_name)
if not is_in_room:
logger.debug("Server left room: %r", room_id)
# Fetch all the users that we marked as being in user
# directory due to being in the room and then check if
# need to remove those users or not
user_ids = await self.store.get_users_in_dir_due_to_room(room_id)
for user_id in user_ids:
await self._handle_remove_user(room_id, user_id)
else:
logger.debug("Server is still in room: %r", room_id)
await self._handle_remove_user(room_id, state_key)
elif joined is MatchChange.no_change:
# Handle any profile changes for remote users.
# (For local users the rest of the application calls
# `handle_local_profile_change`.)
if is_remote:
await self._handle_possible_remote_profile_change(
state_key, room_id, prev_event_id, event_id
)
elif joined is MatchChange.now_true: # The user joined
# This may be the first time we've seen a remote user. If
# so, ensure we have a directory entry for them. (For local users,
# the rest of the application calls `handle_local_profile_change`.)
if is_remote:
await self._upsert_directory_entry_for_remote_user(state_key, event_id)
await self._track_user_joined_room(room_id, state_key)
async def _upsert_directory_entry_for_remote_user(
self, user_id: str, event_id: str
) -> None:
"""A remote user has just joined a room. Ensure they have an entry in
the user directory. The caller is responsible for making sure they're
remote.
"""
event = await self.store.get_event(event_id, allow_none=True)
# It isn't expected for this event to not exist, but we
# don't want the entire background process to break.
if event is None:
return
logger.debug("Adding new user to dir, %r", user_id)
await self.store.update_profile_in_user_dir(
user_id, event.content.get("displayname"), event.content.get("avatar_url")
)
async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
"""Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate.
The caller is responsible for ensuring that the given user should be
included in the user directory.
"""
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
room_id
)
if is_public:
await self.store.add_users_in_public_rooms(room_id, (user_id,))
else:
users_in_room = await self.store.get_users_in_room(room_id)
other_users_in_room = [
other
for other in users_in_room
if other != user_id
and (
not self.is_mine_id(other)
or await self.store.should_include_local_user_in_dir(other)
)
]
to_insert = set()
# First, if they're our user then we need to update for every user
if self.is_mine_id(user_id):
for other_user_id in other_users_in_room:
to_insert.add((user_id, other_user_id))
# Next we need to update for every local user in the room
for other_user_id in other_users_in_room:
if self.is_mine_id(other_user_id):
to_insert.add((other_user_id, user_id))
if to_insert:
await self.store.add_users_who_share_private_room(room_id, to_insert)
async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
"""Called when when someone leaves a room. The user may be local or remote.
(If the person who left was the last local user in this room, the server
is no longer in the room. We call this function to forget that the remaining
remote users are in the room, even though they haven't left. So the name is
a little misleading!)
Args:
room_id: The room ID that user left or stopped being public that
user_id
"""
logger.debug("Removing user %r from room %r", user_id, room_id)
# Remove user from sharing tables
await self.store.remove_user_who_share_room(user_id, room_id)
# Additionally, if they're a remote user and we're no longer joined
# to any rooms they're in, remove them from the user directory.
if not self.is_mine_id(user_id):
rooms_user_is_in = await self.store.get_user_dir_rooms_user_is_in(user_id)
if len(rooms_user_is_in) == 0:
logger.debug("Removing user %r from directory", user_id)
await self.store.remove_from_user_dir(user_id)
async def _handle_possible_remote_profile_change(
self,
user_id: str,
room_id: str,
prev_event_id: Optional[str],
event_id: Optional[str],
) -> None:
"""Check member event changes for any profile changes and update the
database if there are. This is intended for remote users only. The caller
is responsible for checking that the given user is remote.
"""
if not prev_event_id or not event_id:
return
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
event = await self.store.get_event(event_id, allow_none=True)
if not prev_event or not event:
return
if event.membership != Membership.JOIN:
return
prev_name = prev_event.content.get("displayname")
new_name = event.content.get("displayname")
# If the new name is an unexpected form, do not update the directory.
if not isinstance(new_name, str):
new_name = prev_name
prev_avatar = prev_event.content.get("avatar_url")
new_avatar = event.content.get("avatar_url")
# If the new avatar is an unexpected form, do not update the directory.
if not isinstance(new_avatar, str):
new_avatar = prev_avatar
if prev_name != new_name or prev_avatar != new_avatar:
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
|
the-stack_106_16284
|
from contextlib import contextmanager
from functools import partial
import torch
import numpy as np
from torchvision.transforms import Resize
from PIL import Image
import gym
from utils.general_utils import ParamDict, AttrDict
from utils.pytorch_utils import ar2ten, ten2ar
class BaseEnvironment(gym.core.Env):
"""Implements basic environment interface."""
# TODO add frame skip interface
@contextmanager
def val_mode(self):
"""Sets validation parameters if desired. To be used like: with env.val_mode(): ...<do something>..."""
pass; yield; pass
def _default_hparams(self):
default_dict = ParamDict({
'device': None, # device that all tensors should get transferred to
'screen_width': 400, # width of rendered images
'screen_height': 400, # height of rendered images
})
return default_dict
def reset(self):
"""Resets all internal variables of the environment."""
raise NotImplementedError
def step(self, action):
"""Performs one environment step. Returns dict <next observation, reward, done, info>."""
raise NotImplementedError
def render(self, mode='rgb_array'):
"""Renders current environment state. Mode {'rgb_array', 'none'}."""
raise NotImplementedError
def _wrap_observation(self, obs):
"""Process raw observation from the environment before return."""
return np.asarray(obs, dtype=np.float32)
@property
def agent_params(self):
"""Parameters for agent that can be handed over after env is constructed."""
return AttrDict()
class GymEnv(BaseEnvironment):
"""Wrapper around openai/gym environments."""
def __init__(self):
self._hp = self._default_hparams()
self._env = self._make_env(self._hp.name)
print ('Making env in envs/gym/env.py')
from mujoco_py.builder import MujocoException
self._mj_except = MujocoException
def _default_hparams(self):
default_dict = ParamDict({
'name': None, # name of openai/gym environment
'reward_norm': 1., # reward normalization factor
'punish_reward': -100, # reward used when action leads to simulation crash
'unwrap_time': True, # removes time limit wrapper from envs so that done is not set on timeout
})
return super()._default_hparams().overwrite(default_dict)
def reset(self):
obs = self._env.reset()
return self._wrap_observation(obs)
def step(self, action):
if isinstance(action, torch.Tensor): action = ten2ar(action)
try:
obs, reward, done, info = self._env.step(action)
reward = reward / self._hp.reward_norm
except self._mj_except:
# this can happen when agent drives simulation to unstable region (e.g. very fast speeds)
print("Catch env exception!")
obs = self.reset()
reward = self._hp.punish_reward # this avoids that the agent is going to these states again
done = np.array(True) # terminate episode (observation will get overwritten by env reset)
info = {}
return self._wrap_observation(obs), reward, np.array(done), info
def render(self, mode='rgb_array'):
# TODO make env render in the correct size instead of downsizing after for performance
img = Resize((self._hp.screen_height, self._hp.screen_width))(Image.fromarray(self._render_raw(mode=mode)))
return np.array(img) / 255.
def _make_env(self, id):
"""Instantiates the environment given the ID."""
import gym
from gym import wrappers
env = gym.make(id)
if isinstance(env, wrappers.TimeLimit) and self._hp.unwrap_time:
# unwraps env to avoid this bug: https://github.com/openai/gym/issues/1230
env = env.env
return env
def get_episode_info(self):
"""Allows to return logging info about latest episode (sindce last reset)."""
if hasattr(self._env, "get_episode_info"):
return self._env.get_episode_info()
return AttrDict()
def _render_raw(self, mode):
"""Returns rendering as uint8 in range [0...255]"""
return self._env.render(mode=mode)
|
the-stack_106_16286
|
"""LMM testing code"""
import unittest
import scipy as SP
import pdb
import limix.deprecated as dlimix
from .covar import Acovar_test
class CCovSqexpARD_test(unittest.TestCase,Acovar_test):
"""test class for CCovSqexpARD"""
def setUp(self):
SP.random.seed(1)
self.n=10
self.n_dim=10
X=SP.rand(self.n,self.n_dim)
self.C = dlimix.CCovSqexpARD(self.n_dim)
self.name = 'CCovSqexpARD'
self.C.setX(X)
K = self.C.K()
self.n_params=self.C.getNumberParams()
params=SP.exp(SP.randn(self.n_params))
self.C.setParams(params)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_16287
|
from semantic_aware_models.dataset.movielens.movielens_data_model import ItemUnstructuredDataModel
from semantic_aware_models.dataset.movielens.movielens_data_model import ItemStructuredDataModel
import torch
class DeepCBRSDataModel:
def __init__(self):
pass
# Reads the text descriptions associated to each item from a given path.
# Parameters:
# train_path: folder in which item descriptions are stored
# Output:
# dictionary structure which contains the following data:
# items: item descriptions
# item2pos: dictionary which maps item ids to position in the dataset
# pos2item: dictionary which maps position in the dataset to item ids
# token2id: dictionary which maps tokens to word identifiers
# max_item_len: maximum number of words in a text description
def read_items_data(self, train_path):
items = dict()
item2pos = dict()
pos2item = dict()
token2id = dict()
num_items = 1
num_tokens = 1
max_item_len = 0
items_unstructured_data_model = ItemUnstructuredDataModel(train_path, separator='::')
item_ids = items_unstructured_data_model.get_item_ids()
descriptions = items_unstructured_data_model.get_description()
for idx, description in enumerate(descriptions):
item_id = item_ids[idx]
item2pos[item_id] = num_items
pos2item[num_items] = item_id
words = description.split(' ')
# print('words: ', words)
item_words = list()
for word in words:
# print('word: ', word)
if word not in token2id:
token2id[word] = num_tokens
num_tokens += 1
item_words.append(token2id[word])
# print('item_words: ', item_words)
len_words = len(item_words)
if len_words > max_item_len:
max_item_len = len_words
items[item_id] = item_words
num_items += 1
return {'items': items, 'item2pos': item2pos, 'pos2item': pos2item, 'token2id': token2id, 'max_item_len': max_item_len}
# Pads item description according to the maximum number of tokens in the item descriptions.
def pad_items_data(self, items_data):
data = torch.zeros(int(len(items_data['items']) + 1), int(items_data['max_item_len']), dtype=torch.int64) # :zero()
output = dict()
for item_id, tokens in items_data['items'].items():
for i, token in enumerate(tokens):
data[items_data['item2pos'][item_id]][i] = token
output[item_id]= data[items_data['item2pos'][item_id]]
return output
# Loads genres metadata associated to each item.
# Parameters:
# - genres_filename: name of the file containing genres information for each item in JSON format
# - item2pos: maps item ids to item position in the dataset
def load_items_genres(self, genres_filename, item2pos):
genre2id = dict()
id2genre = dict()
genres = dict()
item2pos = dict()
pos2item = dict()
num_genres = 1
max_num_genres = 0
items_structured_data_model = ItemStructuredDataModel(genres_filename, separator='::')
item_ids = items_structured_data_model.get_item_ids()
data = items_structured_data_model.get_genres()
# print('data: ', data)
for idx, item_genres_str in enumerate(data):
item_genres = str(item_genres_str).split('|')
item_id = item_ids[idx]
item2pos[item_id] = idx+1
pos2item[idx+1] = item_id
if item_id:
item = int(item_id)
item_mapped_genres = list()
len_item_genres = len(item_genres)
if len_item_genres > max_num_genres:
max_num_genres = len_item_genres
for item_genre in item_genres:
if item_genre not in genre2id:
genre2id[item_genre] = num_genres
id2genre[num_genres] = item_genre
num_genres += 1
item_mapped_genres.append(genre2id[item_genre])
genres[item] = item_mapped_genres
return {'genres': genres, 'genre2id': genre2id, 'id2genre': id2genre, 'pos2item': pos2item, 'item2pos': item2pos, 'max_num_genres': max_num_genres}
# Pads item genres according to the maximum number of genres associated to each item
def pad_genres_data(self, genres_data):
non_retrieved_genres = 3
data = torch.zeros(len(genres_data['genres']) + non_retrieved_genres, genres_data['max_num_genres'], dtype=torch.int64) # :zero()
output = dict()
# print('genres_data[genres]: ', genres_data['genres'])
for item_pos, genres in genres_data['genres'].items():
# print('genres: ', genres)
for i, genre in enumerate(genres):
data[genres_data['item2pos'][item_pos]][i] = genre
output[item_pos] = data[genres_data['item2pos'][item_pos]]
return output
# Loads authors metadata associated to each item.
# Parameters:
# - authors_filename: name of the file containing authors information for each item in JSON format
# - item2pos: maps item ids to item position in the dataset
def load_items_authors(self, authors_filename, item2pos):
author2id = dict()
id2author = dict()
authors = dict()
item2pos = dict()
pos2item = dict()
num_authors = 1
max_num_authors = 0
items_structured_data_model = ItemStructuredDataModel(authors_filename, separator='::')
item_ids = items_structured_data_model.get_item_ids()
data = items_structured_data_model.get_starring()
#print('data: ', data)
for idx, item_authors_str in enumerate(data):
item_authors = str(item_authors_str).split('|')
item_id = item_ids[idx]
item2pos[item_id] = idx + 1
pos2item[idx + 1] = item_id
if item_id:
item = int(item_id)
item_mapped_authors = list()
len_item_authors = len(item_authors)
if len_item_authors > max_num_authors:
max_num_authors = len_item_authors
for item_author in item_authors:
if item_author not in author2id:
author2id[item_author] = num_authors
id2author[num_authors] = item_author
num_authors += 1
item_mapped_authors.append(author2id[item_author])
authors[item] = item_mapped_authors
return {'authors': authors, 'author2id': author2id, 'id2author': id2author, 'pos2item': pos2item, 'item2pos': item2pos, 'max_num_authors': max_num_authors}
# Pads item authors according to the maximum number of authors associated to each item
def pad_authors_data(self, authors_data):
non_retrieved_authors = 359
data = torch.zeros(len(authors_data['authors']) + non_retrieved_authors, authors_data['max_num_authors'], dtype=torch.int64) # :zero()
output = dict()
for item_pos, authors in authors_data['authors'].items():
for i, author in enumerate(authors):
data[authors_data['item2pos'][item_pos]][i] = author
output[item_pos] = data[authors_data['item2pos'][item_pos]]
return output
# Loads directors metadata associated to each item.
# Parameters:
# - directors_filename: name of the file containing directors information for each item in JSON format
# - item2pos: maps item ids to item position in the dataset
def load_items_directors(self, directors_filename, item2pos):
director2id = dict()
id2director = dict()
directors = dict()
item2pos = dict()
pos2item = dict()
num_directors = 1
max_num_directors = 0
items_structured_data_model = ItemStructuredDataModel(directors_filename, separator='::')
item_ids = items_structured_data_model.get_item_ids()
data = items_structured_data_model.get_director()
#print('data: ', data)
for idx, item_directors_str in enumerate(data):
item_directors = str(item_directors_str).split('|')
item_id = item_ids[idx]
item2pos[item_id] = idx + 1
pos2item[idx + 1] = item_id
if item_id:
item = int(item_id)
item_mapped_directors = list()
len_item_directors = len(item_directors)
if len_item_directors > max_num_directors:
max_num_directors = len_item_directors
for item_director in item_directors:
if item_director not in director2id:
director2id[item_director] = num_directors
id2director[num_directors] = item_director
num_directors += 1
item_mapped_directors.append(director2id[item_director])
directors[item] = item_mapped_directors
return {'directors': directors, 'director2id': director2id, 'id2director': id2director, 'pos2item': pos2item, 'item2pos': item2pos, 'max_num_directors': max_num_directors}
# Pads item directors according to the maximum number of directors associated to each item
def pad_directors_data(self, directors_data):
non_retrieved_directors = 359
data = torch.zeros(len(directors_data['directors']) + non_retrieved_directors, directors_data['max_num_directors'], dtype=torch.int64) # :zero()
output=dict()
for item_pos, directors in directors_data["directors"].items():
for i, director in enumerate(directors):
data[directors_data['item2pos'][item_pos]][i] = director
output[item_pos] = data[directors_data['item2pos'][item_pos]]
return output
# Loads wiki categories metadata associated to each item.
# Parameters:
# - wiki_categories_filename: name of the file containing wiki categories information for each item in JSON format
# - item2pos: maps item ids to item position in the dataset
def load_items_wiki_categories(self, wiki_categories_filename, item2pos):
wiki_category2id = dict()
id2wiki_category = dict()
wiki_categories = dict()
item2pos = dict()
pos2item = dict()
num_wiki_categories = 1
max_num_wiki_categories = 0
items_structured_data_model = ItemStructuredDataModel(wiki_categories_filename, separator='::')
item_ids = items_structured_data_model.get_item_ids()
data = items_structured_data_model.get_subject()
#print('data: ', data)
for idx, item_wiki_categories_str in enumerate(data):
item_wiki_categories = str(item_wiki_categories_str).split('|')
item_id = item_ids[idx]
item2pos[item_id] = idx + 1
pos2item[idx + 1] = item_id
if item_id:
item = int(item_id)
item_mapped_wiki_categories =list()
len_item_wiki_categories = len(item_wiki_categories)
if len_item_wiki_categories > max_num_wiki_categories:
max_num_wiki_categories = len_item_wiki_categories
for item_wiki_category in item_wiki_categories:
if item_wiki_category not in wiki_category2id:
wiki_category2id[item_wiki_category] = num_wiki_categories
id2wiki_category[num_wiki_categories] = item_wiki_category
num_wiki_categories += 1
item_mapped_wiki_categories.append(wiki_category2id[item_wiki_category])
wiki_categories[item] = item_mapped_wiki_categories
return {'wiki_categories': wiki_categories, 'wiki_category2id': wiki_category2id, 'id2wiki_category': id2wiki_category, 'pos2item': pos2item, 'item2pos': item2pos,
'max_num_wiki_categories': max_num_wiki_categories}
# Pads item wiki categories according to the maximum number of wiki categories associated to each item
def pad_wiki_categories_data(self, wiki_categories_data):
non_retrieved_wiki_categories = 359
data = torch.zeros(len(wiki_categories_data['wiki_categories']) + non_retrieved_wiki_categories, wiki_categories_data['max_num_wiki_categories'], dtype=torch.int64) # :zero()
output = dict()
for item_pos, wiki_categories in wiki_categories_data['wiki_categories'].items():
for i, wiki_category in enumerate(wiki_categories):
data[wiki_categories_data['item2pos'][item_pos]][i] = wiki_category
output[item_pos] = data[wiki_categories_data['item2pos'][item_pos]]
return output
# Loads properties metadata associated to each item.
# Parameters:
# - properties_filename: name of the file containing properties information for each item in JSON format
# - item2pos: maps item ids to item position in the dataset
def load_items_properties(self, properties_filename, item2pos):
property2id = dict()
id2property = dict()
properties = dict()
item2pos = dict()
pos2item = dict()
num_properties = 1
max_num_properties = 0
items_structured_data_model = ItemStructuredDataModel(properties_filename, separator='::')
data = list() # TODO ¿?
item_ids = items_structured_data_model.get_item_ids()
for idx, item_properties_str in enumerate(data):
item_properties = str(item_properties_str).split('|')
item_id = item_ids[idx]
if item_id:
item = int(item_id)
item_mapped_properties = list()
item2pos[item_id] = idx + 1
pos2item[idx + 1] = item_id
len_item_properties = len(item_properties)
if len_item_properties > max_num_properties:
max_num_properties = len_item_properties
for item_property in item_properties:
if item_property not in property2id:
property2id[item_property] = num_properties
id2property[num_properties] = item_property
num_properties += 1
item_mapped_properties.append(property2id[item_property])
return {'properties': properties,'property2id': property2id,'id2property': id2property, 'pos2item': pos2item, 'item2pos': item2pos, 'max_num_properties': max_num_properties}
# Pads item properties according to the maximum number of properties associated to each item:
def pad_properties_data(self, properties_data):
non_retrieved_properties = 359
data = torch.zeros(len(properties_data['properties']) + non_retrieved_properties, properties_data['max_num_properties'], dtype=torch.int64) # :zero()
output=dict()
for item_pos, properties in properties_data['properties'].items():
for i, property in enumerate(properties):
data[properties_data['item2pos'][item_pos]][i] = property
output[item_pos] = data[properties_data['item2pos'][item_pos]]
return output
|
the-stack_106_16288
|
from __future__ import division
from pymer4.utils import con2R, R2con, get_resource_path, result_to_table
import pandas as pd
import numpy as np
from pymer4.models import Lm
import os
def test_con2R():
x = np.array([[-1, 0, 0, 1], [-0.5, -0.5, 0.5, 0.5], [-3 / 3, 1 / 3, 1 / 3, 1 / 3]])
out = con2R(x)
assert out.shape == (4, 3)
names = ["1 v s4", "1+2 vs 3+4", "1 vs 2+3+4"]
out = con2R(x, names=names)
assert isinstance(out, pd.DataFrame)
assert [x == y for x, y in zip(out.columns, names)]
assert out.shape == (4, 3)
out = con2R(np.array([-1, 0, 1]))
assert np.allclose(
out, np.array([[-0.5, 0.40824829], [0.0, -0.81649658], [0.5, 0.40824829]])
)
def test_result_to_table():
df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
model = Lm("DV ~ IV1 + IV3", data=df)
model.fit(summarize=False)
formatted = result_to_table(model, drop_intercept=False)
assert isinstance(formatted, pd.DataFrame)
assert formatted.shape == (3, 6)
assert set(["Predictor", "b", "ci", "t", "df", "p"]) == set(formatted.columns)
assert formatted.iloc[0, -1] == "< .001"
formatted = result_to_table(model, drop_intercept=True)
assert isinstance(formatted, pd.DataFrame)
assert formatted.shape == (2, 6)
|
the-stack_106_16290
|
import numpy as np
from random import sample, shuffle, randint
'''
create batches
'''
def create_batches(data_):
qr = list(zip(data_['q'], data_['r'], data_['respect']))
batches = {}
for qi,ri,respecti in qr:
lqi, lri = len(qi), len(ri)
if (lqi,lri) in batches:
batchi = batches[(lqi,lri)]
else:
batchi = []
batchi += [(qi, ri, respecti)]
batches[(lqi,lri)] = batchi
return [ batches[k] for k in batches ]
'''
split data into train (80%), test (20%)
'''
def split_dataset(batches, ratio = [0.8, 0.2] ):
nbatches = len(batches)
num_train = int(ratio[0]*nbatches)
# shuffle batches
shuffle(batches) # why do i even bother to write comments!
trainset = batches[:num_train]
testset = batches[num_train:]
return trainset, testset
'''
generate batches, by random sampling a bunch of items
yield (x_gen, y_gen)
'''
def rand_batch_gen(dataset):
while True: # use try catch here; just repeat idx=.. and batch=...
# why do you us try-catch? clip the idx value based on the data at hand. can't we do that?
# i did that.. but not working; weird exception - out of bounds; you try fixing it then!
# shall I run this once?sure
idx = randint(0, len(dataset)-1) # choose a random batch id
batch = dataset[idx] # fetch the batch
bx = [bi[0] for bi in batch]
by = [bi[1] for bi in batch]
br = [bi[2] for bi in batch]
yield ( np.array(bx, dtype=np.int32).reshape([len(bx), len(bx[0])]),
np.array(by, dtype=np.int32).reshape([len(by), len(by[0])]),
np.array(br, dtype=np.int32) )
'''
a generic decode function
inputs : sequence, lookup
'''
def decode(sequence, lookup, separator=''): # 0 used for padding, is ignored
return separator.join([ lookup[element] for element in sequence if element ])
|
the-stack_106_16291
|
"""
HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21).
Based on wsgiref.simple_server which is part of the standard library since 2.5.
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE!
"""
import logging
import socket
import socketserver
import sys
from wsgiref import simple_server
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import LimitedStream
from django.core.wsgi import get_wsgi_application
from django.utils.module_loading import import_string
__all__ = ('WSGIServer', 'WSGIRequestHandler')
logger = logging.getLogger('django.server')
def get_internal_wsgi_application():
"""
Load and return the WSGI application as configured by the user in
``settings.WSGI_APPLICATION``. With the default ``startproject`` layout,
this will be the ``application`` object in ``projectname/wsgi.py``.
This function, and the ``WSGI_APPLICATION`` setting itself, are only useful
for Django's internal server (runserver); external WSGI servers should just
be configured to point to the correct application object directly.
If settings.WSGI_APPLICATION is not set (is ``None``), return
whatever ``django.core.wsgi.get_wsgi_application`` returns.
"""
from django.conf import settings
app_path = getattr(settings, 'WSGI_APPLICATION')
if app_path is None:
return get_wsgi_application()
try:
return import_string(app_path)
except ImportError as err:
raise ImproperlyConfigured(
"WSGI application '%s' could not be loaded; "
"Error importing module." % app_path
) from err
def is_broken_pipe_error():
exc_type, _, _ = sys.exc_info()
return issubclass(exc_type, (
BrokenPipeError,
ConnectionAbortedError,
ConnectionResetError,
))
class WSGIServer(simple_server.WSGIServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
request_queue_size = 10
def __init__(self, *args, ipv6=False, allow_reuse_address=True, **kwargs):
if ipv6:
self.address_family = socket.AF_INET6
self.allow_reuse_address = allow_reuse_address
super().__init__(*args, **kwargs)
def handle_error(self, request, client_address):
if is_broken_pipe_error():
logger.info("- Broken pipe from %s\n", client_address)
else:
super().handle_error(request, client_address)
class ThreadedWSGIServer(socketserver.ThreadingMixIn, WSGIServer):
"""A threaded version of the WSGIServer"""
daemon_threads = True
class ServerHandler(simple_server.ServerHandler):
http_version = '1.1'
def __init__(self, stdin, stdout, stderr, environ, **kwargs):
"""
Use a LimitedStream so that unread request data will be ignored at
the end of the request. WSGIRequest uses a LimitedStream but it
shouldn't discard the data since the upstream servers usually do this.
This fix applies only for testserver/runserver.
"""
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
super().__init__(LimitedStream(stdin, content_length), stdout, stderr, environ, **kwargs)
def cleanup_headers(self):
super().cleanup_headers()
# HTTP/1.1 requires support for persistent connections. Send 'close' if
# the content length is unknown to prevent clients from reusing the
# connection.
if 'Content-Length' not in self.headers:
self.headers['Connection'] = 'close'
# Persistent connections require threading server.
elif not isinstance(self.request_handler.server, socketserver.ThreadingMixIn):
self.headers['Connection'] = 'close'
# Mark the connection for closing if it's set as such above or if the
# application sent the header.
if self.headers.get('Connection', ) == 'close':
self.request_handler.close_connection = True
def close(self):
self.get_stdin()._read_limited()
super().close()
def handle_error(self):
# Ignore broken pipe errors, otherwise pass on
if not is_broken_pipe_error():
super().handle_error()
class WSGIRequestHandler(simple_server.WSGIRequestHandler):
protocol_version = 'HTTP/1.1'
def address_string(self):
# Short-circuit parent method to not call socket.getfqdn
return self.client_address[0]
def log_message(self, format, *args):
extra = {
'request': self.request,
'server_time': self.log_date_time_string(),
}
if args[1][0] == '4':
# 0x16 = Handshake, 0x03 = SSL 3.0 or TLS 1.x
if args[0].startswith('\x16\x03'):
extra['status_code'] = 500
logger.error(
"You're accessing the development server over HTTPS, but "
"it only supports HTTP.\n", extra=extra,
)
return
if args[1].isdigit() and len(args[1]) == 3:
status_code = int(args[1])
extra['status_code'] = status_code
if status_code >= 500:
level = logger.error
elif status_code >= 400:
level = logger.warning
else:
level = logger.info
else:
level = logger.info
level(format, *args, extra=extra)
def get_environ(self):
# Strip all headers with underscores in the name before constructing
# the WSGI environ. This prevents header-spoofing based on ambiguity
# between underscores and dashes both normalized to underscores in WSGI
# env vars. Nginx and Apache 2.4+ both do this as well.
for k in self.headers:
if '_' in k:
del self.headers[k]
return super().get_environ()
def handle(self):
self.close_connection = True
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
try:
self.connection.shutdown(socket.SHUT_WR)
except (AttributeError, OSError):
pass
def handle_one_request(self):
"""Copy of WSGIRequestHandler.handle() but with different ServerHandler"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging & connection closing
handler.run(self.server.get_app())
def run(addr, port, wsgi_handler, ipv6=False, threading=False, server_cls=WSGIServer):
server_address = (addr, port)
if threading:
httpd_cls = type('WSGIServer', (socketserver.ThreadingMixIn, server_cls), {})
else:
httpd_cls = server_cls
httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6)
if threading:
# ThreadingMixIn.daemon_threads indicates how threads will behave on an
# abrupt shutdown; like quitting the server by the user or restarting
# by the auto-reloader. True means the server will not wait for thread
# termination before it quits. This will make auto-reloader faster
# and will prevent the need to kill the server manually if a thread
# isn't terminating correctly.
httpd.daemon_threads = True
httpd.set_app(wsgi_handler)
httpd.serve_forever()
|
the-stack_106_16292
|
"""Functions for validating JWT Bearer tokens."""
from connexion.exceptions import Unauthorized
import logging
from typing import (Dict, Iterable, List, Optional)
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from flask import current_app
import jwt
from jwt.exceptions import InvalidKeyError
import requests
from requests.exceptions import ConnectionError
import json
# Get logger instance
logger = logging.getLogger(__name__)
def validate_token(token: str) -> Dict:
"""
Validate JSON Web Token (JWT) Bearer token.
Returns:
Token information.
Raises:
connexion.exceptions.Unauthorized: Raised if JWT could not be
successfully validated.
"""
# Set parameters defined by OpenID Connect specification
# Cf. https://openid.net/specs/openid-connect-discovery-1_0.html
oidc_suffix_config: str = ".well-known/openid-configuration"
oidc_config_claim_userinfo: str = 'userinfo_endpoint'
oidc_config_claim_public_keys: str = 'jwks_uri'
# Fetch security parameters
conf = current_app.config['FOCA'].security.auth
add_key_to_claims: bool = conf.add_key_to_claims
allow_expired: bool = conf.allow_expired
audience: Optional[Iterable[str]] = conf.audience
claim_identity: str = conf.claim_identity
claim_issuer: str = conf.claim_issuer
algorithms: Iterable[str] = conf.algorithms
validation_methods: List[str] = [e.value for e in conf.validation_methods]
validation_checks: str = conf.validation_checks.value
# Ensure that validation methods are configured
if not len(validation_methods):
raise Unauthorized(
"Authentication is enabled, but no JWT validation methods "
"configured"
)
# Decode JWT
try:
claims = jwt.decode(
jwt=token,
verify=False,
algorithms=algorithms,
)
except Exception as e:
raise Unauthorized("JWT could not be decoded") from e
logger.debug(f"Decoded claims: {claims}")
# Verify existence of issuer claim
if claim_issuer not in claims:
raise Unauthorized(
f"Required identity claim not available: {claim_identity}"
)
# Get OIDC configuration
url = f"{claims[claim_issuer].rstrip('/')}/{oidc_suffix_config}"
logger.debug(f"Issuer's configuration URL: {url}")
try:
oidc_config = requests.get(url)
oidc_config.raise_for_status()
except Exception as e:
raise Unauthorized(
"Could not fetch issuer's configuration from: {url}"
) from e
# Validate token
passed_any = False
for method in validation_methods:
logger.debug(f"Validating JWT via method: {method}")
try:
if method == 'userinfo':
validate_jwt_userinfo(
token=token,
url=oidc_config.json()[oidc_config_claim_userinfo],
)
if method == 'public_key':
validate_jwt_public_key(
token=token,
url=oidc_config.json()[oidc_config_claim_public_keys],
algorithms=algorithms,
add_key_to_claims=add_key_to_claims,
audience=audience,
allow_expired=allow_expired,
)
except Exception as e:
if validation_checks == 'all':
raise Unauthorized(
"Insufficient number of JWT validation checks passed"
) from e
continue
passed_any = True
if validation_checks == 'any':
break
if not passed_any:
raise Unauthorized("No JWT validation checks passed")
# Verify existence of specified identity claim
if claim_identity not in claims:
raise Unauthorized(
f"Required identity claim '{claim_identity} not available"
)
# Log result
logger.debug(f"Access granted to user: {claims[claim_identity]}")
# Return token info
return {
'jwt': token,
'claims': claims,
'user_id': claims[claim_identity],
'scope': claims.get('scope', ""),
}
def validate_jwt_userinfo(
token: str,
url: str,
header_name: str = 'Authorization',
prefix: str = 'Bearer',
) -> None:
"""Validate JSON Web Token (JWT) via an OpenID Connect-compliant
identity provider's user info endpoint.
Args:
token: JSON Web Token (JWT).
url: URL to OpenID Connect identity provider's user info endpoint.
header_name: Name of the request header field at which the service is
expecting the JWT. Cf. `prefix`.
prefix: Prefix that the app expects to precede the JWT, separated
by whitespace. Together, prefix and JWT constitute the value of
the request header field specified by `--header-name`.
Raises:
requests.exceptions.ConnectionError: Raised if the identity provider's
user info or configuration endpoints could not be reached.
"""
logger.debug(f"Issuer's user info endpoint URL: {url}")
headers = {f"{header_name}": f"{prefix} {token}"}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except Exception as e:
raise ConnectionError(f"Could not connect to endpoint '{url}'") from e
logger.debug("Validation via user info endpoint succeeded")
def validate_jwt_public_key(
token: str,
url: str,
algorithms: Iterable[str] = ['RS256'],
add_key_to_claims: bool = True,
audience: Optional[Iterable[str]] = None,
allow_expired: bool = False,
claim_key_id: str = 'kid',
) -> None:
"""Validate JSON Web Token (JWT) via an OpenID Connect-compliant
identity provider's public key.
Args:
token: JSON Web Token (JWT).
url: URL to OpenID Connect identity provider's public keys endpoint.
algorithms: Lists the JWT-signing algorithms supported by the app.
add_key_to_claims: Whether to allow the application to add the identity
provider's corresponding JSON Web Key (JWK), in PEM format, to the
dictionary of claims when handling requests to
`@jwt_validation`-decorated endpoints.
audience: List of audiences that the app identifies itself with. If
specified, JSON Web Tokens (JWT) that do not contain any of the
specified audiences are rejected. Set to `None` to disable audience
validation.
allow_expired: Allow/disallow expired JSON Web Tokens (JWT).
claim_key_id: The JSON Web Token (JWT) claim used to specify the
identifier of the JSON Web Key (JWK) used to issue that token.
Returns:
Dictionary of JWT claims, or an empty dictionary if claims could not
be successfully decoded.
Raises:
KeyError: Raised if used JSON Web Key (JWK) identifer was not found
among public JWK set.
Unauthorized: Raised if token could not be decoded.
"""
logger.debug(f"Issuer's JWK set endpoint URL: {url}")
# Obtain identity provider's public keys
public_keys = get_public_keys(
url=url,
pem=False,
claim_key_id=claim_key_id,
)
# Extract JWT header claims, if available
try:
header_claims = jwt.get_unverified_header(token)
logger.debug(f"Decoded header claims: {header_claims}")
except Exception:
logger.debug("Could not extract JWT header claims")
header_claims = {}
# Set used JWK identifier, if available
try:
jwk_id = header_claims[claim_key_id]
except KeyError:
logger.debug("JWT key ID not specified, trying all available JWKs")
jwk_id = False
# Verify that used JWK exists and remove all other JWKs
if jwk_id:
try:
public_keys = {jwk_id: public_keys[jwk_id]}
except KeyError:
raise KeyError("JWT key ID not found among issuer's JWKs")
# Set validations
validation_options = {}
if audience is None:
validation_options['verify_aud'] = False
if allow_expired:
validation_options['verify_exp'] = False
# Try public keys one after the other
used_key = {}
claims = {}
for key in public_keys.values():
used_key = key
# Decode JWT and validate via public key
try:
claims = jwt.decode(
jwt=token,
verify=True,
key=key,
algorithms=algorithms,
audience=audience,
options=validation_options,
)
# Wrong or faulty key was used; try next one
except InvalidKeyError as e:
logger.debug(
"JWT could not be decoded with current JWK '{key}': "
f"{type(e).__name__}: {e}"
)
# Key seems okay but token is invalid
except Exception as e:
raise Unauthorized("JWT could not be validated") from e
# Do not try other keys if token was decoded
if claims:
break
# Verify that token was decoded
if not claims:
raise Unauthorized("JWT could not be validated with issuer's JWKs")
# Add public key to claims
if add_key_to_claims:
claims['public_key'] = used_key
# Log success and return claims
logger.debug("Validation via issuer's public keys succeeded")
def get_public_keys(
url: str,
pem: bool = False,
claim_key_id: str = 'kid',
claim_keys: str = 'keys',
) -> Dict[str, RSAPublicKey]:
"""Obtain the identity provider's public JSON Web Key (JWK) set.
Args:
url: Endpoint providing the identity provider's JSON Web Key (JWK) set.
pem: Whether public JSON Web Keys (JWK) shall be returned in Privacy
Enhanced-Mail (PEM) format rather than as JSON dumps.
claim_key_id: The JWT claim encoding a JSON Web Key (JWK) identifier.
claim_keys: The JSON Web Key (JWK)
Returns:
JSON Web Key (JWK) public keys mapped to their identifiers.
Raises:
requests.exceptions.ConnectionError: Raised if the identity provider's
JWK set or configuration could not be reached.
"""
# Get JWK sets from identity provider
try:
response = requests.get(url)
response.raise_for_status()
except Exception as e:
raise ConnectionError(f"Could not connect to endpoint '{url}'") from e
# Iterate over JWK set and store public keys in dictionary
public_keys = {}
for jwk in response.json().get(claim_keys, []):
try:
key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(jwk))
# Ensure key is public
if not isinstance(key, RSAPublicKey):
logger.warning(f"JSON Web Key '{jwk}' is not public.")
continue
# Convert to PEM if requested
if pem:
key = key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
).decode('utf-8').encode('unicode_escape').decode('utf-8')
public_keys[jwk[claim_key_id]] = key
except Exception as e:
logger.warning(
f"JSON Web Key '{jwk}' could not be processed: "
f"{type(e).__name__}: {e}"
)
# Return dictionary of public keys
return public_keys
|
the-stack_106_16293
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# KEA and SKIPJACK Algorithms in CMS
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc2876.txt
#
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5751
id_fortezzaConfidentialityAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.4')
id_fortezzaWrap80 = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.23')
id_kEAKeyEncryptionAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.24')
id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22')
class Skipjack_Parm(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('initialization-vector', univ.OctetString())
)
# Update the Algorithm Identifier map in rfc5280.py.
_algorithmIdentifierMapUpdate = {
id_fortezzaConfidentialityAlgorithm: Skipjack_Parm(),
id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(),
}
rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
# Update the SMIMECapabilities Attribute map in rfc5751.py
_smimeCapabilityMapUpdate = {
id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(),
}
rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
|
the-stack_106_16294
|
from __future__ import print_function, division
import numpy as np
import sys
from pyscf.nao.m_color import color as bc
from pyscf.nao.m_system_vars_dos import system_vars_dos, system_vars_pdos
from pyscf.nao.m_siesta2blanko_csr import _siesta2blanko_csr
from pyscf.nao.m_siesta2blanko_denvec import _siesta2blanko_denvec
from pyscf.nao.m_siesta_ion_add_sp2 import _siesta_ion_add_sp2
from pyscf.nao.m_ao_log import ao_log_c
#
#
#
def get_orb2m(sv):
orb2m = np.empty(sv.norbs, dtype='int64')
orb = 0
for atom,sp in enumerate(sv.atom2sp):
for mu,j in enumerate(sv.sp_mu2j[sp]):
for m in range(-j,j+1): orb2m[orb],orb = m,orb+1
return orb2m
#
#
#
def get_orb2j(sv):
orb2j = np.empty(sv.norbs, dtype='int64')
orb = 0
for atom,sp in enumerate(sv.atom2sp):
for mu,j in enumerate(sv.sp_mu2j[sp]):
for m in range(-j,j+1): orb2j[orb],orb = j,orb+1
return orb2j
#
#
#
def diag_check(sv, atol=1e-5, rtol=1e-4):
from pyscf.nao.m_sv_diag import sv_diag
ksn2e = sv.xml_dict['ksn2e']
ac = True
for k,kvec in enumerate(sv.xml_dict["k2xyzw"]):
for spin in range(sv.nspin):
e,x = sv_diag(sv, kvec=kvec[0:3], spin=spin)
eref = ksn2e[k,spin,:]
acks = np.allclose(eref,e,atol=atol,rtol=rtol)
ac = ac and acks
if(not acks):
aerr = sum(abs(eref-e))/len(e)
print("diag_check: "+bc.RED+str(k)+' '+str(spin)+' '+str(aerr)+bc.ENDC)
return ac
#
#
#
def overlap_check(sv, tol=1e-5, **kvargs):
over = sv.overlap_coo(**kvargs).tocsr()
diff = (sv.hsx.s4_csr-over).sum()
summ = (sv.hsx.s4_csr+over).sum()
ac = diff/summ<tol
if not ac: print(diff, summ)
return ac
#
#
#
class system_vars_c():
def __init__(self):
"""
Constructor of system_vars class: so far can be initialized
with SIESTA orbitals and Hamiltonian and wavefunctions
"""
self.state = 'call an initialize method...'
#
#
#
def init_xyzlike(self, atom, label='pyscf'):
""" This is simple constructor which only initializes geometry info """
from pyscf.lib import logger
from pyscf.data import chemical_symbols
self.verbose = logger.NOTE # To be similar to Mole object...
self.stdout = sys.stdout
self.symmetry = False
self.symmetry_subgroup = None
self.label = label
atom2charge = [atm[0] for atm in atom]
self.atom2coord = np.array([atm[1] for atm in atom])
self.sp2charge = list(set(atom2charge))
self.sp2symbol = [chemical_symbols[z] for z in self.sp2charge]
self.atom2sp = [self.sp2charge.index(charge) for charge in atom2charge]
self.natm=self.natoms=len(self.atom2sp)
self.atom2s = None
self.nspin = 1
self.state = 'should be useful for something'
return self
#
#
#
def init_pyscf_gto(self, gto, label='pyscf', **kvargs):
"""Interpret previous pySCF calculation"""
from pyscf.lib import logger
self.verbose = logger.NOTE # To be similar to Mole object...
self.stdout = sys.stdout
self.symmetry = False
self.symmetry_subgroup = None
self.label = label
self.mol=gto # Only some data must be copied, not the whole object. Otherwise, an eventual deepcopy(...) may fail.
self.natm=self.natoms = gto.natm
a2s = [gto.atom_symbol(ia) for ia in range(gto.natm) ]
self.sp2symbol = sorted(list(set(a2s)))
self.nspecies = len(self.sp2symbol)
self.atom2sp = np.empty((gto.natm), dtype='int64')
for ia,sym in enumerate(a2s): self.atom2sp[ia] = self.sp2symbol.index(sym)
self.sp2charge = [-999]*self.nspecies
for ia,sp in enumerate(self.atom2sp): self.sp2charge[sp]=gto.atom_charge(ia)
self.ao_log = ao_log_c().init_ao_log_gto_suggest_mesh(gto, self, **kvargs)
self.atom2coord = np.zeros((self.natm, 3))
for ia,coord in enumerate(gto.atom_coords()): self.atom2coord[ia,:]=coord # must be in Bohr already?
self.atom2s = np.zeros((self.natm+1), dtype=np.int64)
for atom,sp in enumerate(self.atom2sp): self.atom2s[atom+1]=self.atom2s[atom]+self.ao_log.sp2norbs[sp]
self.norbs = self.norbs_sc = self.atom2s[-1]
self.nspin = 1
self.ucell = 20.0*np.eye(3)
self.atom2mu_s = np.zeros((self.natm+1), dtype=np.int64)
for atom,sp in enumerate(self.atom2sp): self.atom2mu_s[atom+1]=self.atom2mu_s[atom]+self.ao_log.sp2nmult[sp]
self._atom = gto._atom
self.basis = gto.basis
self.init_libnao()
self.state = 'should be useful for something'
return self
#
#
#
def init_ase_atoms(self, Atoms, **kvargs):
""" Initialise system vars using siesta file and Atom object from ASE."""
from pyscf.nao.m_siesta_xml import siesta_xml
from pyscf.nao.m_siesta_wfsx import siesta_wfsx_c
from pyscf.nao.m_siesta_ion_xml import siesta_ion_xml
from pyscf.nao.m_siesta_hsx import siesta_hsx_c
self.label = 'ase' if label is None else label
self.xml_dict = siesta_xml(self.label)
self.wfsx = siesta_wfsx_c(self.label)
self.hsx = siesta_hsx_c(self.label, **kvargs)
self.norbs_sc = self.wfsx.norbs if self.hsx.orb_sc2orb_uc is None else len(self.hsx.orb_sc2orb_uc)
try:
import ase
except:
warn('no ASE installed: try via siesta.xml')
self.init_siesta_xml(**kvargs)
self.Atoms = Atoms
##### The parameters as fields
self.sp2ion = []
species = []
for sp in Atoms.get_chemical_symbols():
if sp not in species:
species.append(sp)
self.sp2ion.append(siesta_ion_xml(sp+'.ion.xml'))
_add_mu_sp2(self, self.sp2ion)
self.sp2ao_log = ao_log_c(self.sp2ion)
self.natm=self.natoms= Atoms.get_positions().shape[0]
self.norbs = self.wfsx.norbs
self.nspin = self.wfsx.nspin
self.nkpoints = self.wfsx.nkpoints
strspecie2sp = {}
for sp in range(len(self.wfsx.sp2strspecie)): strspecie2sp[self.wfsx.sp2strspecie[sp]] = sp
self.atom2sp = np.empty((self.natoms), dtype='int64')
for i, sp in enumerate(Atoms.get_chemical_symbols()):
self.atom2sp[i] = strspecie2sp[sp]
self.atom2s = np.zeros((sv.natm+1), dtype=np.int64)
for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+self.ao_log.sp2norbs[sp]
self.atom2mu_s = np.zeros((self.natm+1), dtype=np.int64)
for atom,sp in enumerate(self.atom2sp): self.atom2mu_s[atom+1]=self.atom2mu_s[atom]+self.ao_log.sp2nmult[sp]
orb2m = get_orb2m(self)
_siesta2blanko_csr(orb2m, self.hsx.s4_csr, self.hsx.orb_sc2orb_uc)
for s in range(self.nspin):
_siesta2blanko_csr(orb2m, self.hsx.spin2h4_csr[s], self.hsx.orb_sc2orb_uc)
for k in range(self.nkpoints):
for s in range(self.nspin):
for n in range(self.norbs):
_siesta2blanko_denvec(orb2m, self.wfsx.X[k,s,n,:,:])
self.sp2symbol = [str(ion['symbol'].replace(' ', '')) for ion in self.sp2ion]
self.sp2charge = self.ao_log.sp2charge
self.state = 'should be useful for something'
return self
#
#
#
def init_siesta_xml(self, label='siesta', cd='.', **kvargs):
from pyscf.nao.m_siesta_xml import siesta_xml
from pyscf.nao.m_siesta_wfsx import siesta_wfsx_c
from pyscf.nao.m_siesta_ion_xml import siesta_ion_xml
from pyscf.nao.m_siesta_hsx import siesta_hsx_c
from timeit import default_timer as timer
"""
Initialise system var using only the siesta files (siesta.xml in particular is needed)
System variables:
-----------------
label (string): calculation label
chdir (string): calculation directory
xml_dict (dict): information extracted from the xml siesta output, see m_siesta_xml
wfsx: class use to extract the information about wavefunctions, see m_siesta_wfsx
hsx: class to store a sparse representation of hamiltonian and overlap, see m_siesta_hsx
norbs_sc (integer): number of orbital
ucell (array, float): unit cell
sp2ion (list): species to ions, list of the species associated to the information from the ion files, see m_siesta_ion_xml
ao_log: Atomic orbital on an logarithmic grid, see m_ao_log
atom2coord (array, float): array containing the coordinates of each atom.
natm, natoms (integer): number of atoms
norbs (integer): number of orbitals
nspin (integer): number of spin
nkpoints (integer): number of kpoints
fermi_energy (float): Fermi energy
atom2sp (list): atom to specie, list associating the atoms to their specie number
atom2s: atom -> first atomic orbital in a global orbital counting
atom2mu_s: atom -> first multiplett (radial orbital) in a global counting of radial orbitals
sp2symbol (list): list soociating the species to them symbol
sp2charge (list): list associating the species to them charge
state (string): this is an internal information on the current status of the class
"""
self.label = label
self.cd = cd
self.xml_dict = siesta_xml(cd+'/'+self.label+'.xml')
self.wfsx = siesta_wfsx_c(label, cd, **kvargs)
self.hsx = siesta_hsx_c(cd+'/'+self.label+'.HSX', **kvargs)
self.norbs_sc = self.wfsx.norbs if self.hsx.orb_sc2orb_uc is None else len(self.hsx.orb_sc2orb_uc)
self.ucell = self.xml_dict["ucell"]
##### The parameters as fields
self.sp2ion = []
for sp in self.wfsx.sp2strspecie: self.sp2ion.append(siesta_ion_xml(cd+'/'+sp+'.ion.xml'))
_siesta_ion_add_sp2(self, self.sp2ion)
self.ao_log = ao_log_c().init_ao_log_ion(self.sp2ion)
self.atom2coord = self.xml_dict['atom2coord']
self.natm=self.natoms=len(self.xml_dict['atom2sp'])
self.norbs = self.wfsx.norbs
self.nspin = self.wfsx.nspin
self.nkpoints = self.wfsx.nkpoints
self.fermi_energy = self.xml_dict['fermi_energy']
strspecie2sp = {}
# initialise a dictionary with species string as key
# associated to the specie number
for sp,strsp in enumerate(self.wfsx.sp2strspecie): strspecie2sp[strsp] = sp
# list of atoms associated to them specie number
self.atom2sp = np.empty((self.natm), dtype=np.int64)
for o,atom in enumerate(self.wfsx.orb2atm):
self.atom2sp[atom-1] = strspecie2sp[self.wfsx.orb2strspecie[o]]
self.atom2s = np.zeros((self.natm+1), dtype=np.int64)
for atom,sp in enumerate(self.atom2sp):
self.atom2s[atom+1]=self.atom2s[atom]+self.ao_log.sp2norbs[sp]
# atom2mu_s list of atom associated to them mu number (defenition of mu??)
# mu number of orbitals by atoms ??
self.atom2mu_s = np.zeros((self.natm+1), dtype=np.int64)
for atom,sp in enumerate(self.atom2sp):
self.atom2mu_s[atom+1]=self.atom2mu_s[atom]+self.ao_log.sp2nmult[sp]
orb2m = self.get_orb2m()
_siesta2blanko_csr(orb2m, self.hsx.s4_csr, self.hsx.orb_sc2orb_uc)
for s in range(self.nspin):
_siesta2blanko_csr(orb2m, self.hsx.spin2h4_csr[s], self.hsx.orb_sc2orb_uc)
#t1 = timer()
for k in range(self.nkpoints):
for s in range(self.nspin):
for n in range(self.norbs):
_siesta2blanko_denvec(orb2m, self.wfsx.x[k,s,n,:,:])
#t2 = timer(); print(t2-t1, 'rsh wfsx'); t1 = timer()
self.sp2symbol = [str(ion['symbol'].replace(' ', '')) for ion in self.sp2ion]
self.sp2charge = self.ao_log.sp2charge
self.init_libnao()
self.state = 'should be useful for something'
# Trying to be similar to mole object from pySCF
self.nelectron = self.hsx.nelec
self.spin = self.nspin
self.verbose = 1
self.stdout = sys.stdout
self.symmetry = False
self.symmetry_subgroup = None
self._built = True
self.max_memory = 20000
self.incore_anyway = False
self._atom = [(self.sp2symbol[sp], list(self.atom2coord[ia,:])) for ia,sp in enumerate(self.atom2sp)]
return self
def init_gpaw(self, calc, label="gpaw", cd='.', **kvargs):
"""
use the data from a GPAW LCAO calculations as input to
initialize system variables.
Input parameters:
-----------------
calc: GPAW calculator
label (optional, string): label used for the calculations
chdir (optional, string): path to the directory in which are stored the
data from gpaw
kvargs (optional, dict): dictionary of optional arguments
We may need a list of optional arguments!
Example:
--------
from ase import Atoms
from gpaw import GPAW
fname = os.path.dirname(os.path.abspath(__file__))+'/h2o.gpw'
if os.path.isfile(fname):
# Import data from a previous gpaw calculations
calc = GPAW(fname, txt=None) # read previous calculation if the file exists
else:
# Run first gpaw to initialize the calculator
from gpaw import PoissonSolver
atoms = Atoms('H2O', positions=[[0.0,-0.757,0.587], [0.0,+0.757,0.587], [0.0,0.0,0.0]])
atoms.center(vacuum=3.5)
convergence = {'density': 1e-7} # Increase accuracy of density for ground state
poissonsolver = PoissonSolver(eps=1e-14, remove_moment=1 + 3) # Increase accuracy of Poisson Solver and apply multipole corrections up to l=1
calc = GPAW(basis='dzp', xc='LDA', h=0.3, nbands=23, convergence=convergence, poissonsolver=poissonsolver, mode='lcao', txt=None) # nbands must be equal to norbs (in this case 23)
atoms.set_calculator(calc)
atoms.get_potential_energy() # Do SCF the ground state
calc.write(fname, mode='all') # write DFT output
from pyscf.nao import system_vars_c
sv = system_vars_c().init_gpaw(calc)
"""
try:
import ase
import gpaw
except:
raise ValueError("ASE and GPAW must be installed for using system_vars_gpaw")
from pyscf.nao.m_system_vars_gpaw import system_vars_gpaw
return system_vars_gpaw(self, calc, label="gpaw", chdir='.', **kvargs)
# More functions for similarity with Mole
def atom_symbol(self, ia): return self.sp2symbol[self.atom2sp[ia]]
def atom_charge(self, ia): return self.sp2charge[self.atom2sp[ia]]
def atom_charges(self): return np.array([self.sp2charge[sp] for sp in self.atom2sp], dtype='int64')
def atom_coord(self, ia): return self.atom2coord[ia,:]
def atom_coords(self): return self.atom2coord
def nao_nr(self): return self.norbs
def atom_nelec_core(self, ia): return self.sp2charge[self.atom2sp[ia]]-self.ao_log.sp2valence[self.atom2sp[ia]]
def intor_symmetric(self, type_str):
""" Uff ... """
if type_str.lower()=='cint1e_ovlp_sph':
mat = self.overlap_coo().todense()
else:
raise RuntimeError('not implemented...')
return mat
# More functions for convenience (see PDoS)
def get_orb2j(self): return get_orb2j(self)
def get_orb2m(self): return get_orb2m(self)
def dos(self, zomegas): return system_vars_dos(self, zomegas)
def pdos(self, zomegas): return system_vars_pdos(self, zomegas)
def overlap_coo(self, **kvargs): # Compute overlap matrix for the given system
from pyscf.nao import overlap_coo
return overlap_coo(self, **kvargs)
def overlap_lil(self, **kvargs): # Compute overlap matrix in list of lists format
from pyscf.nao.m_overlap_lil import overlap_lil
return overlap_lil(self, **kvargs)
def dipole_coo(self, **kvargs): # Compute dipole matrix elements for the given system
from pyscf.nao.m_dipole_coo import dipole_coo
return dipole_coo(self, **kvargs)
def overlap_check(self, tol=1e-5, **kvargs): # Works only after init_siesta_xml(), extend ?
return overlap_check(self, tol=1e-5, **kvargs)
def diag_check(self, atol=1e-5, rtol=1e-4, **kvargs): # Works only after init_siesta_xml(), extend ?
return diag_check(self, atol, rtol, **kvargs)
def vxc_lil(self, dm, xc_code, **kvargs): # Compute exchange-correlation potentials
from pyscf.nao.m_vxc_lil import vxc_lil
return vxc_lil(self, dm, xc_code, deriv=1, **kvargs)
def exc(self, dm, xc_code, **kvargs): # Compute exchange-correlation energies
from pyscf.nao.m_exc import exc
return exc(self, dm, xc_code, **kvargs)
def build_3dgrid(self, level=3):
""" Build a global grid and weights for a molecular integration (integration in 3-dimensional coordinate space) """
from pyscf import dft
from pyscf.nao.m_gauleg import leggauss_ab
grid = dft.gen_grid.Grids(self)
grid.level = level # precision as implemented in pyscf
grid.radi_method=leggauss_ab
atom2rcut=np.zeros(self.natoms)
for ia,sp in enumerate(self.atom2sp): atom2rcut[ia] = self.ao_log.sp2rcut[sp]
grid.build(atom2rcut=atom2rcut)
return grid
def dens_elec(self, coords, dm):
""" Compute electronic density for a given density matrix and on a given set of coordinates """
from pyscf.nao.m_dens_libnao import dens_libnao
from pyscf.nao.m_init_dm_libnao import init_dm_libnao
from pyscf.nao.m_init_dens_libnao import init_dens_libnao
if not self.init_sv_libnao : raise RuntimeError('not self.init_sv_libnao')
if init_dm_libnao(dm) is None : raise RuntimeError('init_dm_libnao(dm) is None')
if init_dens_libnao()!=0 : raise RuntimeError('init_dens_libnao()!=0')
return dens_libnao(coords, self.nspin)
def init_libnao(self, wfsx=None):
""" Initialization of data on libnao site """
from pyscf.nao.m_libnao import libnao
from pyscf.nao.m_sv_chain_data import sv_chain_data
from ctypes import POINTER, c_double, c_int64, c_int32
if wfsx is None:
data = sv_chain_data(self)
# (nkpoints, nspin, norbs, norbs, nreim)
size_x = np.array([1, self.nspin, self.norbs, self.norbs, 1], dtype=np.int32)
libnao.init_sv_libnao.argtypes = (POINTER(c_double), POINTER(c_int64), POINTER(c_int32))
libnao.init_sv_libnao(data.ctypes.data_as(POINTER(c_double)), c_int64(len(data)), size_x.ctypes.data_as(POINTER(c_int32)))
self.init_sv_libnao = True
else:
size_x = np.zeros(len(self.wfsx.x.shape), dtype=np.int32)
for i, sh in enumerate(self.wfsx.x.shape):
size_x[i] = sh
data = sv_chain_data(self)
libnao.init_sv_libnao.argtypes = (POINTER(c_double), POINTER(c_int64), POINTER(c_int32))
libnao.init_sv_libnao(data.ctypes.data_as(POINTER(c_double)), c_int64(len(data)), size_x.ctypes.data_as(POINTER(c_int32)))
self.init_sv_libnao = True
return self
def dens_elec_vec(self, coords, dm):
""" Electronic density: python vectorized version """
from m_dens_elec_vec import dens_elec_vec
return dens_elec_vec(self, coords, dm)
def get_occupations(self, telec=None, ksn2e=None, fermi_energy=None):
""" Compute occupations of electron levels according to Fermi-Dirac distribution """
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
Telec = self.hsx.telec if telec is None else telec
ksn2E = self.wfsx.ksn2e if ksn2e is None else ksn2e
Fermi = self.fermi_energy if fermi_energy is None else fermi_energy
ksn2fd = fermi_dirac_occupations(Telec, ksn2E, Fermi)
ksn2fd = (3.0-self.nspin)*ksn2fd
return ksn2fd
#
# Example of reading pySCF orbitals.
#
if __name__=="__main__":
from pyscf import gto
from pyscf.nao.m_system_vars import system_vars_c
import matplotlib.pyplot as plt
""" Interpreting small Gaussian calculation """
mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0; Be 1 0 0', basis='ccpvtz') # coordinates in Angstrom!
sv = system_vars_c(gto=mol, tol=1e-8, nr=512, rmin=1e-5)
print(sv.ao_log.sp2norbs)
print(sv.ao_log.sp2nmult)
print(sv.ao_log.sp2rcut)
print(sv.ao_log.sp_mu2rcut)
print(sv.ao_log.nr)
print(sv.ao_log.rr[0:4], sv.ao_log.rr[-1:-5:-1])
print(sv.ao_log.psi_log[0].shape, sv.ao_log.psi_log_rl[0].shape)
sp = 0
for mu,[ff,j] in enumerate(zip(sv.ao_log.psi_log[sp], sv.ao_log.sp_mu2j[sp])):
nc = abs(ff).max()
if j==0 : plt.plot(sv.ao_log.rr, ff/nc, '--', label=str(mu)+' j='+str(j))
if j>0 : plt.plot(sv.ao_log.rr, ff/nc, label=str(mu)+' j='+str(j))
plt.legend()
#plt.xlim(0.0, 10.0)
#plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.