repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sanjayankur31/nest-simulator | examples/nest/Potjans_2014/spike_analysis.py | 20 | 6437 | # -*- coding: utf-8 -*-
#
# spike_analysis.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Merges spike files, produces raster plots, calculates and plots firing rates
import numpy as np
import glob
import matplotlib.pyplot as plt
import os
import re
datapath = '.'
# get simulation time and numbers of neurons recorded from sim_params.sli
with open(os.path.join(datapath, 'sim_params.sli'), 'r') as f:
sim_params_contents = f.read()
T = float(re.search(r'/t_sim (.+) def', sim_params_contents).group(1))
record_frac = re.search(r'/record_fraction_neurons_spikes (.+) def', sim_params_contents).group(1) == 'true'
if record_frac:
frac_rec = float(re.search(r'/frac_rec_spikes (.+) def', sim_params_contents).group(1))
else:
n_rec = int(re.search(r'/n_rec_spikes (.+) def', sim_params_contents).group(1))
T_start = 200. # starting point of analysis (to avoid transients)
# load node IDs
node_ids = np.loadtxt(os.path.join(datapath, 'population_nodeIDs.dat'), dtype=int)
print('Global IDs:')
print(node_ids)
print()
# number of populations
num_pops = len(node_ids)
print('Number of populations:')
print(num_pops)
print()
# first node ID in each population
raw_first_node_ids = [node_ids[i][0] for i in np.arange(len(node_ids))]
# population sizes
pop_sizes = [node_ids[i][1] - node_ids[i][0] + 1 for i in np.arange(len(node_ids))]
# numbers of neurons for which spikes were recorded
if record_frac:
rec_sizes = [int(pop_sizes[i] * frac_rec) for i in range(len(pop_sizes))]
else:
rec_sizes = [n_rec] * len(pop_sizes)
# first node ID of each population once device node IDs are dropped
first_node_ids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(len(pop_sizes))]
# last node ID of each population once device node IDs are dropped
last_node_ids = [int(np.sum(pop_sizes[:i + 1]))
for i in np.arange(len(pop_sizes))]
# convert lists to a nicer format, i.e. [[2/3e, 2/3i], []....]
Pop_sizes = [pop_sizes[i:i + 2] for i in range(0, len(pop_sizes), 2)]
print('Population sizes:')
print(Pop_sizes)
print()
Raw_first_node_ids = [raw_first_node_ids[i:i + 2] for i in range(0, len(raw_first_node_ids), 2)]
First_node_ids = [first_node_ids[i:i + 2] for i in range(0, len(first_node_ids), 2)]
Last_node_ids = [last_node_ids[i:i + 2] for i in range(0, len(last_node_ids), 2)]
# total number of neurons in the simulation
num_neurons = last_node_ids[len(last_node_ids) - 1]
print('Total number of neurons:')
print(num_neurons)
print()
# load spikes from gdf files, correct node IDs and merge them in population files,
# and store spike trains
# will contain neuron id resolved spike trains
neuron_spikes = [[] for i in np.arange(num_neurons + 1)]
# container for population-resolved spike data
spike_data = [[[], []], [[], []], [[], []], [[], []], [[], []], [[], []],
[[], []], [[], []]]
counter = 0
for layer in ['0', '1', '2', '3']:
for population in ['0', '1']:
output = os.path.join(datapath,
'population_spikes-{}-{}.gdf'.format(layer,
population))
file_pattern = os.path.join(datapath,
'spikes_{}_{}*'.format(layer, population))
files = glob.glob(file_pattern)
print('Merge ' + str(
len(files)) + ' spike files from L' + layer + 'P' + population)
if files:
merged_file = open(output, 'w')
for f in files:
data = open(f, 'r')
nest_version = next(data)
backend_version = next(data)
column_header = next(data)
for l in data:
a = l.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_node_id = Raw_first_node_ids[int(layer)][int(population)]
first_node_id = First_node_ids[int(layer)][int(population)]
a[0] = a[0] - raw_first_node_id + first_node_id
if (a[1] > T_start): # discard data in the start-up phase
spike_data[counter][0].append(num_neurons - a[0])
spike_data[counter][1].append(a[1] - T_start)
neuron_spikes[a[0]].append(a[1] - T_start)
converted_line = str(a[0]) + '\t' + str(a[1]) + '\n'
merged_file.write(converted_line)
data.close()
merged_file.close()
counter += 1
clrs = ['0', '0.5', '0', '0.5', '0', '0.5', '0', '0.5']
plt.ion()
# raster plot
plt.figure(1)
counter = 1
for j in np.arange(num_pops):
for i in np.arange(first_node_ids[j], first_node_ids[j] + rec_sizes[j]):
plt.plot(neuron_spikes[i],
np.ones_like(neuron_spikes[i]) + sum(rec_sizes) - counter,
'k o', ms=1, mfc=clrs[j], mec=clrs[j])
counter += 1
plt.xlim(0, T - T_start)
plt.ylim(0, sum(rec_sizes))
plt.xlabel(r'time (ms)')
plt.ylabel(r'neuron id')
plt.savefig(os.path.join(datapath, 'rasterplot.png'))
# firing rates
rates = []
temp = 0
for i in np.arange(num_pops):
for j in np.arange(first_node_ids[i], last_node_ids[i]):
temp += len(neuron_spikes[j])
rates.append(temp / (rec_sizes[i] * (T - T_start)) * 1e3)
temp = 0
print()
print('Firing rates:')
print(rates)
plt.figure(2)
ticks = np.arange(num_pops)
plt.bar(ticks, rates, width=0.9, color='k')
xticklabels = ['L2/3e', 'L2/3i', 'L4e', 'L4i', 'L5e', 'L5i', 'L6e', 'L6i']
plt.setp(plt.gca(), xticks=ticks + 0.5, xticklabels=xticklabels)
plt.xlabel(r'subpopulation')
plt.ylabel(r'firing rate (spikes/s)')
plt.savefig(os.path.join(datapath, 'firing_rates.png'))
plt.show()
| gpl-2.0 |
bartosh/zipline | tests/test_bar_data.py | 3 | 45350 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta, time
from itertools import chain
from nose_parameterized import parameterized
import numpy as np
from numpy import nan
from numpy.testing import assert_almost_equal
import pandas as pd
from toolz import concat
from zipline._protocol import handle_non_market_minutes
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
RESTRICTION_STATES,
)
from zipline.testing import (
MockDailyBarReader,
create_daily_df_for_asset,
create_minute_df_for_asset,
str_to_seconds,
)
from zipline.testing.fixtures import (
WithCreateBarData,
WithDataPortal,
ZiplineTestCase,
)
from zipline.utils.calendars import get_calendar
from zipline.utils.calendars.trading_calendar import days_at_time
OHLC = ["open", "high", "low", "close"]
OHLCP = OHLC + ["price"]
ALL_FIELDS = OHLCP + ["volume", "last_traded"]
# offsets used in test data
field_info = {
"open": 1,
"high": 2,
"low": -1,
"close": 0
}
def str_to_ts(dt_str):
return pd.Timestamp(dt_str, tz='UTC')
class WithBarDataChecks(object):
def assert_same(self, val1, val2):
try:
self.assertEqual(val1, val2)
except AssertionError:
if val1 is pd.NaT:
self.assertTrue(val2 is pd.NaT)
elif np.isnan(val1):
self.assertTrue(np.isnan(val2))
else:
raise
def check_internal_consistency(self, bar_data):
df = bar_data.current([self.ASSET1, self.ASSET2], ALL_FIELDS)
asset1_multi_field = bar_data.current(self.ASSET1, ALL_FIELDS)
asset2_multi_field = bar_data.current(self.ASSET2, ALL_FIELDS)
for field in ALL_FIELDS:
asset1_value = bar_data.current(self.ASSET1, field)
asset2_value = bar_data.current(self.ASSET2, field)
multi_asset_series = bar_data.current(
[self.ASSET1, self.ASSET2], field
)
# make sure all the different query forms are internally
# consistent
self.assert_same(multi_asset_series.loc[self.ASSET1], asset1_value)
self.assert_same(multi_asset_series.loc[self.ASSET2], asset2_value)
self.assert_same(df.loc[self.ASSET1][field], asset1_value)
self.assert_same(df.loc[self.ASSET2][field], asset2_value)
self.assert_same(asset1_multi_field[field], asset1_value)
self.assert_same(asset2_multi_field[field], asset2_value)
# also verify that bar_data doesn't expose anything bad
for field in ["data_portal", "simulation_dt_func", "data_frequency",
"_views", "_universe_func", "_last_calculated_universe",
"_universe_last_updatedat"]:
with self.assertRaises(AttributeError):
getattr(bar_data, field)
class TestMinuteBarData(WithCreateBarData,
WithBarDataChecks,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
END_DATE = ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp(
'2016-01-07',
tz='UTC',
)
ASSET_FINDER_EQUITY_SIDS = 1, 2, 3, 4, 5
SPLIT_ASSET_SID = 3
ILLIQUID_SPLIT_ASSET_SID = 4
HILARIOUSLY_ILLIQUID_ASSET_SID = 5
@classmethod
def make_equity_minute_bar_data(cls):
# asset1 has trades every minute
# asset2 has trades every 10 minutes
# split_asset trades every minute
# illiquid_split_asset trades every 10 minutes
for sid in (1, cls.SPLIT_ASSET_SID):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
for sid in (2, cls.ILLIQUID_SPLIT_ASSET_SID):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
10,
)
yield cls.HILARIOUSLY_ILLIQUID_ASSET_SID, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
50,
)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
6: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'ICEUS',
},
7: {
'symbol': 'CLK06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'ICEUS',
},
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.ILLIQUID_SPLIT_ASSET_SID,
},
])
@classmethod
def init_class_fixtures(cls):
super(TestMinuteBarData, cls).init_class_fixtures()
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.ILLIQUID_SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_SPLIT_ASSET_SID,
)
cls.HILARIOUSLY_ILLIQUID_ASSET = cls.asset_finder.retrieve_asset(
cls.HILARIOUSLY_ILLIQUID_ASSET_SID,
)
cls.ASSETS = [cls.ASSET1, cls.ASSET2]
def test_current_session(self):
regular_minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.equity_minute_bar_days[0],
self.equity_minute_bar_days[-1]
)
bts_minutes = days_at_time(
self.equity_minute_bar_days,
time(8, 45),
"US/Eastern"
)
# some other non-market-minute
three_oh_six_am_minutes = days_at_time(
self.equity_minute_bar_days,
time(3, 6),
"US/Eastern"
)
all_minutes = [regular_minutes, bts_minutes, three_oh_six_am_minutes]
for minute in list(concat(all_minutes)):
bar_data = self.create_bardata(lambda: minute)
self.assertEqual(
self.trading_calendar.minute_to_session_label(minute),
bar_data.current_session
)
def test_current_session_minutes(self):
first_day_minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[0]
)
for minute in first_day_minutes:
bar_data = self.create_bardata(lambda: minute)
np.testing.assert_array_equal(
first_day_minutes,
bar_data.current_session_minutes
)
def test_minute_before_assets_trading(self):
# grab minutes that include the day before the asset start
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.previous_session_label(
self.equity_minute_bar_days[0]
)
)
# this entire day is before either asset has started trading
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.check_internal_consistency(bar_data)
self.assertFalse(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET2))
for field in ALL_FIELDS:
for asset in self.ASSETS:
asset_value = bar_data.current(asset, field)
if field in OHLCP:
self.assertTrue(np.isnan(asset_value))
elif field == "volume":
self.assertEqual(0, asset_value)
elif field == "last_traded":
self.assertTrue(asset_value is pd.NaT)
def test_regular_minute(self):
minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[0]
)
for idx, minute in enumerate(minutes):
# day2 has prices
# (every minute for asset1, every 10 minutes for asset2)
# asset1:
# opens: 2-391
# high: 3-392
# low: 0-389
# close: 1-390
# volume: 100-3900 (by 100)
# asset2 is the same thing, but with only every 10th minute
# populated.
# this test covers the "IPO morning" case, because asset2 only
# has data starting on the 10th minute.
bar_data = self.create_bardata(
lambda: minute,
)
self.check_internal_consistency(bar_data)
asset2_has_data = (((idx + 1) % 10) == 0)
self.assertTrue(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET1))
if idx < 9:
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET2))
else:
self.assertTrue(bar_data.can_trade(self.ASSET2))
if asset2_has_data:
self.assertFalse(bar_data.is_stale(self.ASSET2))
else:
self.assertTrue(bar_data.is_stale(self.ASSET2))
for field in ALL_FIELDS:
asset1_value = bar_data.current(self.ASSET1, field)
asset2_value = bar_data.current(self.ASSET2, field)
# now check the actual values
if idx == 0 and field == "low":
# first low value is 0, which is interpreted as NaN
self.assertTrue(np.isnan(asset1_value))
else:
if field in OHLC:
self.assertEqual(
idx + 1 + field_info[field],
asset1_value
)
if asset2_has_data:
self.assertEqual(
idx + 1 + field_info[field],
asset2_value
)
else:
self.assertTrue(np.isnan(asset2_value))
elif field == "volume":
self.assertEqual((idx + 1) * 100, asset1_value)
if asset2_has_data:
self.assertEqual((idx + 1) * 100, asset2_value)
else:
self.assertEqual(0, asset2_value)
elif field == "price":
self.assertEqual(idx + 1, asset1_value)
if asset2_has_data:
self.assertEqual(idx + 1, asset2_value)
elif idx < 9:
# no price to forward fill from
self.assertTrue(np.isnan(asset2_value))
else:
# forward-filled price
self.assertEqual((idx // 10) * 10, asset2_value)
elif field == "last_traded":
self.assertEqual(minute, asset1_value)
if idx < 9:
self.assertTrue(asset2_value is pd.NaT)
elif asset2_has_data:
self.assertEqual(minute, asset2_value)
else:
last_traded_minute = minutes[(idx // 10) * 10]
self.assertEqual(
last_traded_minute - timedelta(minutes=1),
asset2_value
)
def test_minute_of_last_day(self):
minutes = self.trading_calendar.minutes_for_session(
self.equity_daily_bar_days[-1],
)
# this is the last day the assets exist
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertTrue(bar_data.can_trade(self.ASSET1))
self.assertTrue(bar_data.can_trade(self.ASSET2))
def test_minute_after_assets_stopped(self):
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.next_session_label(
self.equity_minute_bar_days[-1]
)
)
last_trading_minute = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[-1]
)[-1]
# this entire day is after both assets have stopped trading
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertFalse(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET2))
self.check_internal_consistency(bar_data)
for field in ALL_FIELDS:
for asset in self.ASSETS:
asset_value = bar_data.current(asset, field)
if field in OHLCP:
self.assertTrue(np.isnan(asset_value))
elif field == "volume":
self.assertEqual(0, asset_value)
elif field == "last_traded":
self.assertEqual(last_trading_minute, asset_value)
def test_get_value_is_unadjusted(self):
# verify there is a split for SPLIT_ASSET
splits = self.adjustment_reader.get_adjustments_for_sid(
"splits",
self.SPLIT_ASSET.sid
)
self.assertEqual(1, len(splits))
split = splits[0]
self.assertEqual(
split[0],
pd.Timestamp("2016-01-06", tz='UTC')
)
# ... but that's it's not applied when using spot value
minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.equity_minute_bar_days[0],
self.equity_minute_bar_days[1]
)
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertEqual(
idx + 1,
bar_data.current(self.SPLIT_ASSET, "price")
)
def test_get_value_is_adjusted_if_needed(self):
# on cls.days[1], the first 9 minutes of ILLIQUID_SPLIT_ASSET are
# missing. let's get them.
day0_minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[0]
)
day1_minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[1]
)
for idx, minute in enumerate(day0_minutes[-10:-1]):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertEqual(
380,
bar_data.current(self.ILLIQUID_SPLIT_ASSET, "price")
)
bar_data = self.create_bardata(
lambda: day0_minutes[-1],
)
self.assertEqual(
390,
bar_data.current(self.ILLIQUID_SPLIT_ASSET, "price")
)
for idx, minute in enumerate(day1_minutes[0:9]):
bar_data = self.create_bardata(
lambda: minute,
)
# should be half of 390, due to the split
self.assertEqual(
195,
bar_data.current(self.ILLIQUID_SPLIT_ASSET, "price")
)
def test_get_value_at_midnight(self):
# make sure that if we try to get a minute price at a non-market
# minute, we use the previous market close's timestamp
day = self.equity_minute_bar_days[1]
eight_fortyfive_am_eastern = \
pd.Timestamp("{0}-{1}-{2} 8:45".format(
day.year, day.month, day.day),
tz='US/Eastern'
)
bar_data = self.create_bardata(
lambda: day,
)
bar_data2 = self.create_bardata(
lambda: eight_fortyfive_am_eastern,
)
with handle_non_market_minutes(bar_data), \
handle_non_market_minutes(bar_data2):
for bd in [bar_data, bar_data2]:
for field in ["close", "price"]:
self.assertEqual(
390,
bd.current(self.ASSET1, field)
)
# make sure that if the asset didn't trade at the previous
# close, we properly ffill (or not ffill)
self.assertEqual(
350,
bd.current(self.HILARIOUSLY_ILLIQUID_ASSET, "price")
)
self.assertTrue(
np.isnan(bd.current(self.HILARIOUSLY_ILLIQUID_ASSET,
"high"))
)
self.assertEqual(
0,
bd.current(self.HILARIOUSLY_ILLIQUID_ASSET, "volume")
)
def test_get_value_during_non_market_hours(self):
# make sure that if we try to get the OHLCV values of ASSET1 during
# non-market hours, we don't get the previous market minute's values
bar_data = self.create_bardata(
simulation_dt_func=lambda:
pd.Timestamp("2016-01-06 4:15", tz="US/Eastern"),
)
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "open")))
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "high")))
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "low")))
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "close")))
self.assertEqual(0, bar_data.current(self.ASSET1, "volume"))
# price should still forward fill
self.assertEqual(390, bar_data.current(self.ASSET1, "price"))
def test_can_trade_equity_same_cal_outside_lifetime(self):
# verify that can_trade returns False for the session before the
# asset's first session
session_before_asset1_start = \
self.trading_calendar.previous_session_label(
self.ASSET1.start_date
)
minutes_for_session = self.trading_calendar.minutes_for_session(
session_before_asset1_start
)
# for good measure, check the minute before the session too
minutes_to_check = chain(
[minutes_for_session[0] - pd.Timedelta(minutes=1)],
minutes_for_session
)
for minute in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertFalse(bar_data.can_trade(self.ASSET1))
# after asset lifetime
session_after_asset1_end = self.trading_calendar.next_session_label(
self.ASSET1.end_date
)
bts_after_asset1_end = session_after_asset1_end.replace(
hour=8, minute=45
).tz_convert(None).tz_localize("US/Eastern")
minutes_to_check = chain(
self.trading_calendar.minutes_for_session(
session_after_asset1_end
),
[bts_after_asset1_end]
)
for minute in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertFalse(bar_data.can_trade(self.ASSET1))
def test_can_trade_equity_same_cal_exchange_closed(self):
# verify that can_trade returns true for minutes that are
# outside the asset's calendar (assuming the asset is alive and
# there is a last price), because the asset is alive on the
# next market minute.
minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.ASSET1.start_date,
self.ASSET1.end_date
)
for minute in minutes:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertTrue(bar_data.can_trade(self.ASSET1))
def test_can_trade_equity_same_cal_no_last_price(self):
# self.HILARIOUSLY_ILLIQUID_ASSET's first trade is at
# 2016-01-05 15:20:00+00:00. Make sure that can_trade returns false
# for all minutes in that session before the first trade, and true
# for all minutes afterwards.
minutes_in_session = \
self.trading_calendar.minutes_for_session(self.ASSET1.start_date)
for minute in minutes_in_session[0:49]:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertFalse(bar_data.can_trade(
self.HILARIOUSLY_ILLIQUID_ASSET)
)
for minute in minutes_in_session[50:]:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertTrue(bar_data.can_trade(
self.HILARIOUSLY_ILLIQUID_ASSET)
)
def test_is_stale_during_non_market_hours(self):
bar_data = self.create_bardata(
lambda: self.equity_minute_bar_days[1],
)
with handle_non_market_minutes(bar_data):
self.assertTrue(bar_data.is_stale(self.HILARIOUSLY_ILLIQUID_ASSET))
def test_overnight_adjustments(self):
# verify there is a split for SPLIT_ASSET
splits = self.adjustment_reader.get_adjustments_for_sid(
"splits",
self.SPLIT_ASSET.sid
)
self.assertEqual(1, len(splits))
split = splits[0]
self.assertEqual(
split[0],
pd.Timestamp("2016-01-06", tz='UTC')
)
# Current day is 1/06/16
day = self.equity_daily_bar_days[1]
eight_fortyfive_am_eastern = \
pd.Timestamp("{0}-{1}-{2} 8:45".format(
day.year, day.month, day.day),
tz='US/Eastern'
)
bar_data = self.create_bardata(
lambda: eight_fortyfive_am_eastern,
)
expected = {
'open': 391 / 2.0,
'high': 392 / 2.0,
'low': 389 / 2.0,
'close': 390 / 2.0,
'volume': 39000 * 2.0,
'price': 390 / 2.0,
}
with handle_non_market_minutes(bar_data):
for field in OHLCP + ['volume']:
value = bar_data.current(self.SPLIT_ASSET, field)
# Assert the price is adjusted for the overnight split
self.assertEqual(value, expected[field])
def test_can_trade_restricted(self):
"""
Test that can_trade will return False for a sid if it is restricted
on that dt
"""
minutes_to_check = [
(str_to_ts("2016-01-05 14:31"), False),
(str_to_ts("2016-01-06 14:31"), False),
(str_to_ts("2016-01-07 14:31"), True),
(str_to_ts("2016-01-07 15:00"), False),
(str_to_ts("2016-01-07 15:30"), True),
]
rlm = HistoricalRestrictions([
Restriction(1, str_to_ts('2016-01-05'),
RESTRICTION_STATES.FROZEN),
Restriction(1, str_to_ts('2016-01-07'),
RESTRICTION_STATES.ALLOWED),
Restriction(1, str_to_ts('2016-01-07 15:00'),
RESTRICTION_STATES.FROZEN),
Restriction(1, str_to_ts('2016-01-07 15:30'),
RESTRICTION_STATES.ALLOWED),
])
for info in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: info[0],
restrictions=rlm,
)
self.assertEqual(bar_data.can_trade(self.ASSET1), info[1])
class TestMinuteBarDataFuturesCalendar(WithCreateBarData,
WithBarDataChecks,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
END_DATE = ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp(
'2016-01-07',
tz='UTC',
)
ASSET_FINDER_EQUITY_SIDS = [1]
@classmethod
def make_equity_minute_bar_data(cls):
# asset1 has trades every minute
yield 1, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
6: {
'symbol': 'CLH16',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2016-01-04', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-19', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-19', tz='UTC'),
'exchange': 'ICEUS',
},
7: {
'symbol': 'FVH16',
'root_symbol': 'FV',
'start_date': pd.Timestamp('2016-01-04', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-22', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-22', tz='UTC'),
'auto_close_date': pd.Timestamp('2016-01-20', tz='UTC'),
'exchange': 'CME',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestMinuteBarDataFuturesCalendar, cls).init_class_fixtures()
cls.trading_calendar = get_calendar('CME')
def test_can_trade_multiple_exchange_closed(self):
nyse_asset = self.asset_finder.retrieve_asset(1)
ice_asset = self.asset_finder.retrieve_asset(6)
# minutes we're going to check (to verify that that the same bardata
# can check multiple exchange calendars, all times Eastern):
# 2016-01-05:
# 20:00 (minute before ICE opens)
# 20:01 (first minute of ICE session)
# 20:02 (second minute of ICE session)
# 00:00 (Cinderella's ride becomes a pumpkin)
# 2016-01-06:
# 9:30 (minute before NYSE opens)
# 9:31 (first minute of NYSE session)
# 9:32 (second minute of NYSE session)
# 15:59 (second-to-last minute of NYSE session)
# 16:00 (last minute of NYSE session)
# 16:01 (minute after NYSE closed)
# 17:59 (second-to-last minute of ICE session)
# 18:00 (last minute of ICE session)
# 18:01 (minute after ICE closed)
# each row is dt, whether-nyse-is-open, whether-ice-is-open
minutes_to_check = [
(pd.Timestamp("2016-01-05 20:00", tz="US/Eastern"), False, False),
(pd.Timestamp("2016-01-05 20:01", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-05 20:02", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 00:00", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 9:30", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 9:31", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 9:32", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 15:59", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 16:00", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 16:01", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 17:59", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 18:00", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 18:01", tz="US/Eastern"), False, False),
]
for info in minutes_to_check:
# use the CME calendar, which covers 24 hours
bar_data = self.create_bardata(
simulation_dt_func=lambda: info[0],
)
series = bar_data.can_trade([nyse_asset, ice_asset])
self.assertEqual(info[1], series.loc[nyse_asset])
self.assertEqual(info[2], series.loc[ice_asset])
def test_can_trade_delisted(self):
"""
Test that can_trade returns False for an asset on or after its auto
close date.
"""
auto_closing_asset = self.asset_finder.retrieve_asset(7)
# Our asset's auto close date is 2016-01-20, which means that as of the
# market open for the 2016-01-20 session, `can_trade` should return
# False.
minutes_to_check = [
(pd.Timestamp('2016-01-19 00:00:00', tz='UTC'), True),
(pd.Timestamp('2016-01-19 23:00:00', tz='UTC'), True),
(pd.Timestamp('2016-01-19 23:01:00', tz='UTC'), False),
(pd.Timestamp('2016-01-19 23:59:00', tz='UTC'), False),
(pd.Timestamp('2016-01-20 00:00:00', tz='UTC'), False),
(pd.Timestamp('2016-01-20 00:01:00', tz='UTC'), False),
(pd.Timestamp('2016-01-21 00:00:00', tz='UTC'), False),
]
for info in minutes_to_check:
bar_data = self.create_bardata(simulation_dt_func=lambda: info[0])
self.assertEqual(bar_data.can_trade(auto_closing_asset), info[1])
class TestDailyBarData(WithCreateBarData,
WithBarDataChecks,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
END_DATE = ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp(
'2016-01-11',
tz='UTC',
)
CREATE_BARDATA_DATA_FREQUENCY = 'daily'
sids = ASSET_FINDER_EQUITY_SIDS = set(range(1, 9))
SPLIT_ASSET_SID = 3
ILLIQUID_SPLIT_ASSET_SID = 4
MERGER_ASSET_SID = 5
ILLIQUID_MERGER_ASSET_SID = 6
DIVIDEND_ASSET_SID = 7
ILLIQUID_DIVIDEND_ASSET_SID = 8
@classmethod
def make_equity_info(cls):
frame = super(TestDailyBarData, cls).make_equity_info()
frame.loc[[1, 2], 'end_date'] = pd.Timestamp('2016-01-08', tz='UTC')
return frame
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds("2016-01-07"),
'ratio': 0.5,
'sid': cls.ILLIQUID_SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-06'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.6,
'sid': cls.ILLIQUID_MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame.from_records([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.ILLIQUID_DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid',
]
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_equity_daily_bar_data(cls):
for sid in cls.sids:
asset = cls.asset_finder.retrieve_asset(sid)
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
asset.start_date,
asset.end_date,
interval=2 - sid % 2
)
@classmethod
def init_class_fixtures(cls):
super(TestDailyBarData, cls).init_class_fixtures()
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.ILLIQUID_SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_SPLIT_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.ILLIQUID_MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_MERGER_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.ILLIQUID_DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_DIVIDEND_ASSET_SID,
)
cls.ASSETS = [cls.ASSET1, cls.ASSET2]
def get_last_minute_of_session(self, session_label):
return self.trading_calendar.open_and_close_for_session(
session_label
)[1]
def test_current_session(self):
for session in self.trading_calendar.sessions_in_range(
self.equity_daily_bar_days[0],
self.equity_daily_bar_days[-1]
):
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
session
)
)
self.assertEqual(session, bar_data.current_session)
def test_day_before_assets_trading(self):
# use the day before self.bcolz_daily_bar_days[0]
minute = self.get_last_minute_of_session(
self.trading_calendar.previous_session_label(
self.equity_daily_bar_days[0]
)
)
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.check_internal_consistency(bar_data)
self.assertFalse(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET2))
for field in ALL_FIELDS:
for asset in self.ASSETS:
asset_value = bar_data.current(asset, field)
if field in OHLCP:
self.assertTrue(np.isnan(asset_value))
elif field == "volume":
self.assertEqual(0, asset_value)
elif field == "last_traded":
self.assertTrue(asset_value is pd.NaT)
def test_semi_active_day(self):
# on self.equity_daily_bar_days[0], only asset1 has data
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
self.equity_daily_bar_days[0]
),
)
self.check_internal_consistency(bar_data)
self.assertTrue(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
# because there is real data
self.assertFalse(bar_data.is_stale(self.ASSET1))
# because there has never been a trade bar yet
self.assertFalse(bar_data.is_stale(self.ASSET2))
self.assertEqual(3, bar_data.current(self.ASSET1, "open"))
self.assertEqual(4, bar_data.current(self.ASSET1, "high"))
self.assertEqual(1, bar_data.current(self.ASSET1, "low"))
self.assertEqual(2, bar_data.current(self.ASSET1, "close"))
self.assertEqual(200, bar_data.current(self.ASSET1, "volume"))
self.assertEqual(2, bar_data.current(self.ASSET1, "price"))
self.assertEqual(self.equity_daily_bar_days[0],
bar_data.current(self.ASSET1, "last_traded"))
for field in OHLCP:
self.assertTrue(np.isnan(bar_data.current(self.ASSET2, field)),
field)
self.assertEqual(0, bar_data.current(self.ASSET2, "volume"))
self.assertTrue(
bar_data.current(self.ASSET2, "last_traded") is pd.NaT
)
def test_fully_active_day(self):
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
self.equity_daily_bar_days[1]
),
)
self.check_internal_consistency(bar_data)
# on self.equity_daily_bar_days[1], both assets have data
for asset in self.ASSETS:
self.assertTrue(bar_data.can_trade(asset))
self.assertFalse(bar_data.is_stale(asset))
self.assertEqual(4, bar_data.current(asset, "open"))
self.assertEqual(5, bar_data.current(asset, "high"))
self.assertEqual(2, bar_data.current(asset, "low"))
self.assertEqual(3, bar_data.current(asset, "close"))
self.assertEqual(300, bar_data.current(asset, "volume"))
self.assertEqual(3, bar_data.current(asset, "price"))
self.assertEqual(
self.equity_daily_bar_days[1],
bar_data.current(asset, "last_traded")
)
def test_last_active_day(self):
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
self.equity_daily_bar_days[-1]
),
)
self.check_internal_consistency(bar_data)
for asset in self.ASSETS:
if asset in (1, 2):
self.assertFalse(bar_data.can_trade(asset))
else:
self.assertTrue(bar_data.can_trade(asset))
self.assertFalse(bar_data.is_stale(asset))
if asset in (1, 2):
assert_almost_equal(nan, bar_data.current(asset, "open"))
assert_almost_equal(nan, bar_data.current(asset, "high"))
assert_almost_equal(nan, bar_data.current(asset, "low"))
assert_almost_equal(nan, bar_data.current(asset, "close"))
assert_almost_equal(0, bar_data.current(asset, "volume"))
assert_almost_equal(nan, bar_data.current(asset, "price"))
else:
self.assertEqual(6, bar_data.current(asset, "open"))
self.assertEqual(7, bar_data.current(asset, "high"))
self.assertEqual(4, bar_data.current(asset, "low"))
self.assertEqual(5, bar_data.current(asset, "close"))
self.assertEqual(500, bar_data.current(asset, "volume"))
self.assertEqual(5, bar_data.current(asset, "price"))
def test_after_assets_dead(self):
session = self.END_DATE
bar_data = self.create_bardata(
simulation_dt_func=lambda: session,
)
self.check_internal_consistency(bar_data)
for asset in self.ASSETS:
self.assertFalse(bar_data.can_trade(asset))
self.assertFalse(bar_data.is_stale(asset))
for field in OHLCP:
self.assertTrue(np.isnan(bar_data.current(asset, field)))
self.assertEqual(0, bar_data.current(asset, "volume"))
last_traded_dt = bar_data.current(asset, "last_traded")
if asset in (self.ASSET1, self.ASSET2):
self.assertEqual(self.equity_daily_bar_days[3],
last_traded_dt)
@parameterized.expand([
("split", 2, 3, 3, 1.5),
("merger", 2, 3, 3, 1.8),
("dividend", 2, 3, 3, 2.88)
])
def test_get_value_adjustments(self,
adjustment_type,
liquid_day_0_price,
liquid_day_1_price,
illiquid_day_0_price,
illiquid_day_1_price_adjusted):
"""Test the behaviour of spot prices during adjustments."""
table_name = adjustment_type + 's'
liquid_asset = getattr(self, (adjustment_type.upper() + "_ASSET"))
illiquid_asset = getattr(
self,
("ILLIQUID_" + adjustment_type.upper() + "_ASSET")
)
# verify there is an adjustment for liquid_asset
adjustments = self.adjustment_reader.get_adjustments_for_sid(
table_name,
liquid_asset.sid
)
self.assertEqual(1, len(adjustments))
adjustment = adjustments[0]
self.assertEqual(
adjustment[0],
pd.Timestamp("2016-01-06", tz='UTC')
)
# ... but that's it's not applied when using spot value
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[0],
)
self.assertEqual(
liquid_day_0_price,
bar_data.current(liquid_asset, "price")
)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[1],
)
self.assertEqual(
liquid_day_1_price,
bar_data.current(liquid_asset, "price")
)
# ... except when we have to forward fill across a day boundary
# ILLIQUID_ASSET has no data on days 0 and 2, and a split on day 2
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[1],
)
self.assertEqual(
illiquid_day_0_price, bar_data.current(illiquid_asset, "price")
)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[2],
)
# 3 (price from previous day) * 0.5 (split ratio)
self.assertAlmostEqual(
illiquid_day_1_price_adjusted,
bar_data.current(illiquid_asset, "price")
)
def test_can_trade_restricted(self):
"""
Test that can_trade will return False for a sid if it is restricted
on that dt
"""
minutes_to_check = [
(pd.Timestamp("2016-01-05", tz="UTC"), False),
(pd.Timestamp("2016-01-06", tz="UTC"), False),
(pd.Timestamp("2016-01-07", tz="UTC"), True),
]
rlm = HistoricalRestrictions([
Restriction(1, str_to_ts('2016-01-05'),
RESTRICTION_STATES.FROZEN),
Restriction(1, str_to_ts('2016-01-07'),
RESTRICTION_STATES.ALLOWED),
])
for info in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: info[0],
restrictions=rlm
)
self.assertEqual(bar_data.can_trade(self.ASSET1), info[1])
| apache-2.0 |
DTMilodowski/LiDAR_canopy | src/BALI_synthesis_contribution/canopy_profiles_oilpalm.py | 1 | 6736 | ###############################################################################################################
# This driver function analyses the sensisitivity of the LiDAR-based metrics to spatial scale and point density
###############################################################################################################
import numpy as np
import sys
from matplotlib import pyplot as plt
from matplotlib import rcParams
sys.path.append('../')
import LiDAR_io as io
import LiDAR_tools as lidar
import LiDAR_MacHorn_LAD_profiles as LAD1
#---------------------------------------------------------------------------------------------------------------
# Some filenames & params
las_list = '/home/dmilodow/DataStore_DTM/BALI/LiDAR/Data/SAFE_las_files/las_list_full_path.txt' ## CHANGE AS REQUIRED
las_files = np.genfromtxt(las_list,delimiter=',',dtype='S256')
n_files = las_files.size
plot = 'OP'
print(plot)
plot_width = 100.
sample_res = 20.
max_height = 80
layer_thickness = 1.
n_layers = np.ceil(max_height/layer_thickness)
minimum_height = 2.
subplot_width=20.
kappa = 0.70
heights = np.arange(0,max_height,layer_thickness)+layer_thickness
#---------------------------------------------------------------------------------------------------------------
# Load the data etc.
center = [550140.405,512920.137]
polygon = np.array([[center[0]-plot_width/2.,center[1]-plot_width/2.],
[center[0]+plot_width/2.,center[1]-plot_width/2.],
[center[0]+plot_width/2.,center[1]+plot_width/2.],
[center[0]-plot_width/2.,center[1]+plot_width/2.]]) # simple square bounding box applied for all sensitivity analyses
plot_lidar_pts, starting_ids, trees = io.load_lidar_data_by_polygon(las_list,polygon,max_pts_per_tree = 5*10**5, laz_files=False)
N_trees = len(trees)
plot_lidar_pts[plot_lidar_pts[:,2]<0,2]=0
n_returns = pts.shape[0]
# Loop through all the spatial scales of interest
print("generating sample grid")
# Now create the subplot grids
rows = int(plot_width/sample_res)
cols = int(plot_width/sample_res)
x=np.arange(0,plot_width+sample_res,sample_res)+[polygon[0,0]]
y=np.arange(0,plot_width+sample_res,sample_res)+[polygon[0,1]]
x_prime,y_prime=np.asarray(np.meshgrid(x,y))
count = 0
subplot_polygons = []
row_idx = []
col_idx = []
for i in range(0,rows):
for j in range(0,cols):
bbox = [ [x_prime[i,j], x_prime[i+1,j], x_prime[i+1,j+1], x_prime[i,j+1], x_prime[i,j]],
[y_prime[i,j], y_prime[i+1,j], y_prime[i+1,j+1], y_prime[i,j+1], y_prime[i,j]] ]
subplot_polygons.append( np.asarray(bbox).transpose() )
row_idx.append(i)
col_idx.append(j)
subplots=np.asarray(subplot_polygons)
#-----------------------------------------------------
# now get highest return in each grid cell to define the CHM
print("generating canopy profiles")
n_subplots = subplots.shape[0]
#------------------------------------------------------------------------------------
# CLIP DATA TO PLOT
# clip LiDAR point cloud to plot level (this makes subsequent processing much faster)
print("canopy height = ", np.percentile(plot_lidar_pts[plot_lidar_pts[:,3]==1,2],99), "m")
#------------------------------------------------------------------------------------
# SET UP ARRAYS TO HOST RESULTS
MacArthurHorn_LAD={}
LAD_MH = np.zeros((n_subplots, heights.size))
#------------------------------------------------------------------------------------
# LOOP THROUGH SUBPLOTS, CALCULATING CANOPY PROFILES
pt_count = 0.
# for each of the subplots, clip point cloud and model PAD and get the metrics
for pp in range(0,n_subplots):
# query the tree to locate points of interest
# note that we will only have one tree for the number of points in sensitivity analysis
centre_x = np.mean(subplots[pp][0:4,0])
centre_y = np.mean(subplots[pp][0:4,1])
radius = np.sqrt(sample_res**2/2.)
ids = trees[0].query_ball_point([centre_x,centre_y], radius)
sp_pts = lidar.filter_lidar_data_by_polygon(plot_lidar_pts[ids],subplots[pp],filter_by_first_return_location=False)
pt_count += sp_pts.shape[0]
# now get MacArthur-Horn profiles
heights,first_return_profile,n_ground_returns = LAD1.bin_returns(sp_pts, max_height, layer_thickness)
LAD_MH[pp,:] = LAD1.estimate_LAD_MacArthurHorn(first_return_profile,
n_ground_returns,
layer_thickness,
kappa,
zero_nodata=False)
# Check for columns for which no pulses hit ground without interception.
# Below the depth at which the canopy is not penetrated by first returns
# some methods are infinite. Thus we need to expand the search radius
# iteratively, so that we can build up a full vertical profile. Note that
# this potentially gives rise to coarsening effective resolution down the
# profile, but this seems preferable to more crude gap-filling schemes.
nodata_test = np.any(~np.isfinite(LAD_MH[pp]))
centre_x = np.mean(subplots[pp][0:4,0])
centre_y = np.mean(subplots[pp][0:4,1])
while nodata_test:
# expand neighbourhood for point cloud sample
ids = trees[0].query_ball_point([centre_x,centre_y], radius)
sp_pts_iter = plot_lidar_pts[ids]
# get MacArthur-Horn profiles
nodata_gaps = ~np.isfinite(LAD_MH[pp])
heights,first_return_profile,n_ground_returns = LAD1.bin_returns(sp_pts_iter, max_height, layer_thickness)
LAD_MH[pp,nodata_gaps] = LAD1.estimate_LAD_MacArthurHorn(first_return_profile,
n_ground_returns,
layer_thickness,
kappa,
zero_nodata=False)[nodata_gaps]
# update check
radius+=1.
nodata_test = np.any(~np.isfinite(LAD_MH[pp]))
print("average point density = ", pt_count/10.**4, " pts/m^2")
#------------------------------------------------------------------------------------
# CLEANING AND STORING
# now we have looped through and created the different profiles, need to account for any NaN's and apply minimum height
# to the LAD distributions
# - remove all profile values below minimum height prior to comparison
mask = heights <= 2.
LAD_MH[:,mask]=np.nan
MacArthurHorn_LAD[plot] = LAD_MH.copy()
#----------------------------------------------------------------------------
np.savez('lidar_canopy_profiles_adaptive_OP.npz',(MacArthurHorn_LAD))
| gpl-3.0 |
mjvakili/ccppabc | ccppabc/code/test_subvol_xi.py | 1 | 6067 | import numpy as np
import matplotlib.pyplot as plt
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.mock_observables import tpcf
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
from util import mask_func
def mk_id_column(table=None):
# set up ids from 0 to 63 for the box split into 4 along each edge
edges = np.linspace(0, 750, 4)
xs = table["halo_x"]
ys = table["halo_y"]
zs = table["halo_z"]
subvol_ids = np.empty(xs.shape)
for i in xrange(len(xs)):
xi = np.where(edges < xs[i])[0][-1]
yi = np.where(edges < ys[i])[0][-1]
zi = np.where(edges < zs[i])[0][-1]
subvol_ids[i] = zi * 16 + yi * 4 + xi
return subvol_ids
def random_shifter(index):
zi = (index / 16) * 250.
i2 = index % 16
yi = (i2 / 4) * 250.
i3 = i2 % 4
xi = (i3) * 250.
return xi , yi , zi
def mk_id_column_2(table=None):
edges = np.linspace(0, 500, 2)
xs = table["halo_x"]
ys = table["halo_y"]
zs = table["halo_z"]
subvol_ids = np.empty(xs.shape)
for i in xrange(len(xs)):
xi = np.where(edges < xs[i])[0][-1]
yi = np.where(edges < ys[i])[0][-1]
zi = np.where(edges < zs[i])[0][-1]
subvol_ids[i] = zi * 4 + yi * 2 + xi
return subvol_ids
def random_shifter_2(index):
zi = (index / 4) * 500.
i2 = index % 4
yi = (i2 / 2) * 500.
i3 = i2 % 2
xi = (i3) * 500.
return xi , yi , zi
xi_bin = np.concatenate([np.array([0.15]),
np.logspace(np.log10(0.5),
np.log10(20.), 15)])
rmax = xi_bin.max()
# Get those randoms
num_randoms = 50 * 800000
xran = np.random.uniform(0, 1000, num_randoms)
yran = np.random.uniform(0, 1000, num_randoms)
zran = np.random.uniform(0, 1000, num_randoms)
full_randoms = np.vstack((xran, yran, zran)).T
# Get the full box mock
model = PrebuiltHodModelFactory('zheng07', threshold=-21)
halocat = CachedHaloCatalog(simname='multidark', redshift=0,
halo_finder='rockstar')
model.populate_mock(halocat, enforce_PBC=False)
pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
# Get full tpcf
print "getting full vol tpcf..."
xi_full_pc = tpcf(pos, xi_bin,
randoms=full_randoms,
do_auto=True, do_cross=False,
max_sample_size=int(pos.shape[0]),
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
print "done"
Nsub = 8
# Now set up for subvol boxes
num_randoms = 50 * 8000
xran = np.random.uniform(0, 250, num_randoms)
yran = np.random.uniform(0, 250, num_randoms)
zran = np.random.uniform(0, 250, num_randoms)
sub_randoms = np.vstack((xran, yran, zran)).T
sub_model = PrebuiltHodModelFactory('zheng07')
sub_model.new_haloprop_func_dict = {'sim_subvol': mk_id_column}
sub_halocat = CachedHaloCatalog(simname='multidark', redshift=0,
halo_finder='rockstar')
print "starting with 250 mpc subvols:"
sub_xi_list = []
for ii in range(1,Nsub+1):
print ii
simsubvol = lambda x: mask_func(x, ii)
sub_model.populate_mock(sub_halocat,
masking_function=simsubvol, enforce_PBC=False)
sub_pos = three_dim_pos_bundle(sub_model.mock.galaxy_table,
'x', 'y', 'z')
xi, yi, zi = random_shifter(ii)
temp_randoms = sub_randoms.copy()
temp_randoms[:,0] += xi
temp_randoms[:,1] += yi
temp_randoms[:,2] += zi
sub_xi = tpcf(sub_pos, xi_bin,
randoms=temp_randoms,
do_auto=True,
do_cross=False,
max_sample_size=int(sub_pos.shape[0]),
estimator='Natural',
approx_cell1_size=full_approx_cell1_size,
approx_cellran_size=full_approx_cellran_size)
sub_xi_list.append(sub_xi)
# And again for the larger subvols
num_randoms = 50 * 100000
xran = np.random.uniform(0, 500, num_randoms)
yran = np.random.uniform(0, 500, num_randoms)
zran = np.random.uniform(0, 500, num_randoms)
sub_randoms = np.vstack((xran, yran, zran)).T
sub_model = PrebuiltHodModelFactory('zheng07')
sub_model.new_haloprop_func_dict = {'sim_subvol': mk_id_column_2}
sub_halocat = CachedHaloCatalog(simname='multidark', redshift=0,
halo_finder='rockstar')
print "starting with 500 mpc subvols"
sub_xi_list_2 = []
for ii in range(1,Nsub+1):
print ii
simsubvol = lambda x: mask_func(x, ii)
sub_model.populate_mock(sub_halocat,
masking_function=simsubvol, enforce_PBC=False)
sub_pos = three_dim_pos_bundle(sub_model.mock.galaxy_table,
'x', 'y', 'z')
xi, yi, zi = random_shifter_2(ii)
temp_randoms = sub_randoms.copy()
temp_randoms[:,0] += xi
temp_randoms[:,1] += yi
temp_randoms[:,2] += zi
sub_xi = tpcf(sub_pos, xi_bin,
randoms=temp_randoms,
do_auto=True,
do_cross=False,
max_sample_size=int(sub_pos.shape[0]),
estimator='Natural',
approx_cell1_size=full_approx_cell1_size,
approx_cellran_size=full_approx_cellran_size,
RR_precomputed=sub_RR,
NR_precomputed=sub_NR)
sub_xi_list_2.append(sub_xi)
# And plot!
fig = plt.figure()
ax = fig.add_subplot(111)
print "plotting..."
lines = []
for ii in range(1,Nsub+1):
# plot small vol ratio
rat1 = sub_xi_list[ii] / xi_full_pc
lines.append(ax.plot(0.5*(xi_bin[:-1] + xi_bin[1:]), rat1,
color='g', alpha=0.2))
# plot large vol ratio
rat2 = sub_xi_list[ii] / xi_full_pc
lines.append(ax.plot(0.5*(xi_bin[:-1] + xi_bin[1:]), rat2,
color='r', alpha=0.2))
ax.set_xscale('log')
ax.set_xlabel('r')
ax.set_ylabel('xi ratio')
fig.savefig("subvol_compare.pdf")
| mit |
vermouthmjl/scikit-learn | sklearn/gaussian_process/kernels.py | 24 | 66334 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
michelrobijns/vandevooren | vandevooren.py | 1 | 3229 | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
def van_de_vooren(tau, E, alpha):
tau = np.radians(tau)
alpha = np.radians(alpha)
TL = 0.5
AK = (2 * np.pi - tau) / np.pi
A = 2 * TL * np.power(E + 1, AK - 1) / np.power(2, AK)
angles = np.arange(361)
x = np.zeros(np.size(angles))
y = np.zeros(np.size(angles))
cp = np.zeros(np.size(angles))
for i in angles:
if (i == 0 | i == 360):
X = 1
Y = 0
CP = 1
x[i] = X
y[i] = Y
if (AK == 2 & (i == 0 | i == 360)):
continue;
cp[i] = CP
TH = i / 180 * np.pi
R1 = sqrt((A * (np.cos(TH) - 1)) ** 2 + (A * np.sin(TH)) ** 2)
R2 = sqrt((A * (np.cos(TH) - E)) ** 2 + (A * np.sin(TH)) ** 2)
if (TH == 0):
TH1 = np.pi / 2
else:
TH1 = np.arctan((A * np.sin(TH)) / (A * (np.cos(TH) - 1))) + np.pi
if ((np.cos(TH) - E) < 0 and np.sin(TH) > 0):
TH2 = np.arctan((A * np.sin(TH)) / (A * (np.cos(TH) - E))) + np.pi
elif ((np.cos(TH) - E) < 0 and np.sin(TH) < 0):
TH2 = np.arctan((A * np.sin(TH)) / (A * (np.cos(TH) - E))) + np.pi
elif ((np.cos(TH) - E) > 0 and np.sin(TH) < 0):
TH2 = np.arctan((A * np.sin(TH)) / (A * (np.cos(TH) - E))) + 2 * np.pi
else:
TH2 = np.arctan((A * np.sin(TH)) / (A * (np.cos(TH) - E)))
# Compute transformed positions
COM1 = (((R1 ** AK) / (R2 ** (AK - 1)) ) /
((np.cos((AK - 1)*TH2)) ** 2 + (np.sin((AK - 1) * TH2)) ** 2))
X = (COM1 * (np.cos(AK * TH1) * np.cos((AK - 1) * TH2) +
np.sin(AK * TH1) * np.sin((AK - 1) * TH2)) + TL)
Y = (COM1 * (np.sin(AK * TH1) * np.cos((AK - 1) * TH2) -
np.cos(AK * TH1) * np.sin((AK - 1) * TH2)) + TL)
x[i] = X
y[i] = Y
# Compute transformed pressure distribution
A1 = np.cos((AK - 1) * TH1) * np.cos(AK * TH2) + np.sin((AK - 1)
* TH1) * np.sin(AK * TH2)
B1 = np.sin((AK - 1) * TH1) * np.cos(AK * TH2) - np.cos((AK - 1)
* TH1) * np.sin(AK * TH2)
C1 = (np.cos(AK * TH2)) ** 2 + (np.sin(AK * TH2)) ** 2
P = A * (1 - AK + AK * E)
D1 = A1 * (A * np.cos(TH) - P) - B1 * A * np.sin(TH)
D2 = A1 * A * np.sin(TH) + B1 * (A * np.cos(TH) - P)
TEMP = 2 * C1 * (np.sin(alpha) - np.sin(alpha - TH)) / (D1 ** 2 + D2 ** 2)
COM2 = TEMP * (R2 ** AK) / (R1 ** (AK - 1))
VX = D1 * np.sin(TH) + D2 * np.cos(TH)
VY = -(D1 * np.cos(TH) - D2 * np.sin(TH))
CP = 1 - COM2 ** 2 * (VX ** 2 + VY ** 2)
cp[i] = CP
length = np.amax(x) - np.amin(x)
thickness = np.amax(y) - np.amin(y)
# Move the leading edge to the origin
x = x - np.amin(x)
y = y - y[-1]
# Scale the airfoil to unit length
x = x / length
y = y / length
return x, y, cp
def main():
pass
if __name__ == '__main__':
main()
| mit |
3manuek/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
GitYiheng/reinforcement_learning_test | test01_cartpendulum/Feb/t7_cartpole_mc_plot.py | 1 | 4579 | import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib.pyplot as plt
# Hyperparameters
RANDOM_NUMBER_SEED = 2
# ENVIRONMENT = "CartPole-v0"
# ENVIRONMENT = "CartPole-v1"
ENVIRONMENT = "morph-v0"
MAX_EPISODES = 1000
HIDDEN_LAYER = True
HIDDEN_SIZE = 6
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
alpha = 0.02205 # Learning rate
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 50 # Generate a video at this interval
CONSECUTIVE_TARGET = 50 # Including previous 50 rewards
env = gym.make(ENVIRONMENT)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = env.observation_space.shape[0]
try:
output_size = env.action_space.shape[0]
except AttributeError:
output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
if HIDDEN_LAYER:
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
else:
dist_W = tf.get_variable("W1", shape=[input_size, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
dist = tf.tanh(tf.matmul(x, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
optimizer = tf.train.RMSPropOptimizer(alpha)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
global HIDDEN_LAYER
if HIDDEN_LAYER:
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
else:
w1 = session.run(dist_W)
b1 = session.run(dist_B)
print(w1, b1)
returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
returns_for_plot = np.array(returns)
plt.plot(returns_for_plot)
plt.title("Returns")
plt.show()
| mit |
bzero/statsmodels | statsmodels/examples/ex_emplike_1.py | 34 | 3682 | """
This is a basic tutorial on how to conduct basic empirical likelihood
inference for descriptive statistics. If matplotlib is installed
it also generates plots.
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
print('Welcome to El')
np.random.seed(634) # No significance of the seed.
# Let's first generate some univariate data.
univariate = np.random.standard_normal(30)
# Now let's play with it
# Initiate an empirical likelihood descriptive statistics instance
eldescriptive = sm.emplike.DescStat(univariate)
# Empirical likelihood is (typically) a method of inference,
# not estimation. Therefore, there is no attribute eldescriptive.mean
# However, we can check the mean:
eldescriptive_mean = eldescriptive.endog.mean() #.42
#Let's conduct a hypothesis test to see if the mean is 0
print('Hypothesis test results for the mean:')
print(eldescriptive.test_mean(0))
# The first value is is -2 *log-likelihood ratio, which is distributed
#chi2. The second value is the p-value.
# Let's see what the variance is:
eldescriptive_var = eldescriptive.endog.var() # 1.01
#Let's test if the variance is 1:
print('Hypothesis test results for the variance:')
print(eldescriptive.test_var(1))
# Let's test if Skewness and Kurtosis are 0
print('Hypothesis test results for Skewness:')
print(eldescriptive.test_skew(0))
print('Hypothesis test results for the Kurtosis:')
print(eldescriptive.test_kurt(0))
# Note that the skewness and Kurtosis take longer. This is because
# we have to optimize over the nuisance parameters (mean, variance).
# We can also test for the joint skewness and kurtoses
print(' Joint Skewness-Kurtosis test')
eldescriptive.test_joint_skew_kurt(0, 0)
# Let's try and get some confidence intervals
print('Confidence interval for the mean')
print(eldescriptive.ci_mean())
print('Confidence interval for the variance')
print(eldescriptive.ci_var())
print('Confidence interval for skewness')
print(eldescriptive.ci_skew())
print('Confidence interval for kurtosis')
print(eldescriptive.ci_kurt())
# if matplotlib is installed, we can get a contour plot for the mean
# and variance.
mean_variance_contour = eldescriptive.plot_contour(-.5, 1.2, .2, 2.5, .05, .05)
# This returns a figure instance. Just type mean_var_contour.show()
# to see the plot.
# Once you close the plot, we can start some multivariate analysis.
x1 = np.random.exponential(2, (30, 1))
x2 = 2 * x1 + np.random.chisquare(4, (30, 1))
mv_data = np.concatenate((x1, x2), axis=1)
mv_elmodel = sm.emplike.DescStat(mv_data)
# For multivariate data, the only methods are mv_test_mean,
# mv mean contour and ci_corr and test_corr.
# Let's test the hypthesis that x1 has a mean of 2 and x2 has a mean of 7
print('Multivaraite mean hypothesis test')
print(mv_elmodel.mv_test_mean(np.array([2, 7])))
# Now let's get the confidence interval for correlation
print('Correlation Coefficient CI')
print(mv_elmodel.ci_corr())
# Note how this took much longer than previous functions. That is
# because the function is optimizing over 4 nuisance parameters.
# We can also do a hypothesis test for correlation
print('Hypothesis test for correlation')
print(mv_elmodel.test_corr(.7))
# Finally, let's create a contour plot for the means of the data
means_contour = mv_elmodel.mv_mean_contour(1, 3, 6,9, .15,.15, plot_dta=1)
# This also returns a fig so we can type mean_contour.show() to see the figure
# Sometimes, the data is very dispersed and we would like to see the confidence
# intervals without the plotted data. Let's see the difference when we set
# plot_dta=0
means_contour2 = mv_elmodel.mv_mean_contour(1, 3, 6,9, .05,.05, plot_dta=0)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e335.py | 2 | 5407 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 1800, 1800],
min_off_durations=[12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.5,
one_target_per_seq=True,
n_seq_per_batch=16,
# subsample_target=8,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=32 + 16 + 8,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
250: 1e-3
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
SiderZhang/p2pns3 | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
hlin117/statsmodels | statsmodels/sandbox/pca.py | 33 | 7098 | #Copyright (c) 2008 Erik Tollerud ([email protected])
from statsmodels.compat.python import zip
import numpy as np
from math import pi
class Pca(object):
"""
A basic class for Principal Component Analysis (PCA).
p is the number of dimensions, while N is the number of data points
"""
_colors=('r','g','b','c','y','m','k') #defaults
def __calc(self):
A = self.A
M=A-np.mean(A,axis=0)
N=M/np.std(M,axis=0)
self.M = M
self.N = N
self._eig = None
def __init__(self,data,names=None):
"""
p X N matrix input
"""
A = np.array(data).T
n,p = A.shape
self.n,self.p = n,p
if p > n:
from warnings import warn
warn('p > n - intentional?', RuntimeWarning)
self.A = A
self._origA=A.copy()
self.__calc()
self._colors= np.tile(self._colors,int((p-1)/len(self._colors))+1)[:p]
if names is not None and len(names) != p:
raise ValueError('names must match data dimension')
self.names = None if names is None else tuple([str(n) for n in names])
def getCovarianceMatrix(self):
"""
returns the covariance matrix for the dataset
"""
return np.cov(self.N.T)
def getEigensystem(self):
"""
returns a tuple of (eigenvalues,eigenvectors) for the data set.
"""
if self._eig is None:
res = np.linalg.eig(self.getCovarianceMatrix())
sorti=np.argsort(res[0])[::-1]
res=(res[0][sorti],res[1][:,sorti])
self._eig=res
return self._eig
def getEigenvalues(self):
return self.getEigensystem()[0]
def getEigenvectors(self):
return self.getEigensystem()[1]
def getEnergies(self):
"""
"energies" are just normalized eigenvectors
"""
v=self.getEigenvalues()
return v/np.sum(v)
def plot2d(self,ix=0,iy=1,clf=True):
"""
Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed)
"""
import matplotlib.pyplot as plt
x,y=self.N[:,ix],self.N[:,iy]
if clf:
plt.clf()
plt.scatter(x,y)
vals,evs=self.getEigensystem()
#evx,evy=evs[:,ix],evs[:,iy]
xl,xu=plt.xlim()
yl,yu=plt.ylim()
dx,dy=(xu-xl),(yu-yl)
for val,vec,c in zip(vals,evs.T,self._colors):
plt.arrow(0,0,val*vec[ix],val*vec[iy],head_width=0.05*(dx*dy/4)**0.5,fc=c,ec=c)
#plt.arrow(0,0,vals[ix]*evs[ix,ix],vals[ix]*evs[iy,ix],head_width=0.05*(dx*dy/4)**0.5,fc='g',ec='g')
#plt.arrow(0,0,vals[iy]*evs[ix,iy],vals[iy]*evs[iy,iy],head_width=0.05*(dx*dy/4)**0.5,fc='r',ec='r')
if self.names is not None:
plt.xlabel('$'+self.names[ix]+'/\\sigma$')
plt.ylabel('$'+self.names[iy]+'/\\sigma$')
def plot3d(self,ix=0,iy=1,iz=2,clf=True):
"""
Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed).
"""
import enthought.mayavi.mlab as M
if clf:
M.clf()
z3=np.zeros(3)
v=(self.getEigenvectors()*self.getEigenvalues())
M.quiver3d(z3,z3,z3,v[ix],v[iy],v[iz],scale_factor=5)
M.points3d(self.N[:,ix],self.N[:,iy],self.N[:,iz],scale_factor=0.3)
if self.names:
M.axes(xlabel=self.names[ix]+'/sigma',ylabel=self.names[iy]+'/sigma',zlabel=self.names[iz]+'/sigma')
else:
M.axes()
def sigclip(self,sigs):
"""
clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions.
"""
if np.isscalar(sigs):
sigs=sigs*np.ones(self.N.shape[1])
sigs = sigs*np.std(self.N,axis=1)
n = self.N.shape[0]
m = np.all(np.abs(self.N) < sigs,axis=1)
self.A=self.A[m]
self.__calc()
return n-sum(m)
def reset(self):
self.A = self._origA.copy()
self.__calc()
def project(self,vals=None,enthresh=None,nPCs=None,cumen=None):
"""
projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array
"""
nonnones = sum([e != None for e in (enthresh,nPCs,cumen)])
if nonnones == 0:
m = slice(None)
elif nonnones > 1:
raise ValueError("can't specify more than one threshold")
else:
if enthresh is not None:
m = self.energies() > enthresh
elif nPCs is not None:
m = slice(None,nPCs)
elif cumen is not None:
m = np.cumsum(self.energies()) < cumen
else:
raise RuntimeError('Should be unreachable')
if vals is None:
vals = self.N.T
else:
vals = np.array(vals,copy=False)
if self.N.T.shape[0] != vals.shape[0]:
raise ValueError("shape for vals doesn't match")
proj = np.matrix(self.getEigenvectors()).T*vals
return proj[m].T
def deproject(self,A,normed=True):
"""
input is an n X q array, where q <= p
output is p X n
"""
A=np.atleast_2d(A)
n,q = A.shape
p = self.A.shape[1]
if q > p :
raise ValueError("q > p")
evinv=np.linalg.inv(np.matrix(self.getEigenvectors()).T)
zs = np.zeros((n,p))
zs[:,:q]=A
proj = evinv*zs.T
if normed:
return np.array(proj.T).T
else:
mns=np.mean(self.A,axis=0)
sds=np.std(self.M,axis=0)
return (np.array(proj.T)*sds+mns).T
def subtractPC(self,pc,vals=None):
"""
pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m)
"""
if vals is None:
vals = self.A
else:
vals = vals.T
if vals.shape[1]!= self.A.shape[1]:
raise ValueError("vals don't have the correct number of components")
pcs=self.project()
zpcs=np.zeros_like(pcs)
zpcs[:,pc]=pcs[:,pc]
upc=self.deproject(zpcs,False)
A = vals.T-upc
B = A.T*np.std(self.M,axis=0)
return B+np.mean(self.A,axis=0)
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/groupby/test_index_as_string.py | 3 | 2023 | import numpy as np
import pytest
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
@pytest.fixture(params=[['inner'], ['inner', 'outer']])
def frame(request):
levels = request.param
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
if levels:
df = df.set_index(levels)
return df
@pytest.fixture()
def series():
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
s = df.set_index(['outer', 'inner', 'B'])['A']
return s
@pytest.mark.parametrize('key_strs,groupers', [
('inner', # Index name
pd.Grouper(level='inner')
),
(['inner'], # List of index name
[pd.Grouper(level='inner')]
),
(['B', 'inner'], # Column and index
['B', pd.Grouper(level='inner')]
),
(['inner', 'B'], # Index and column
[pd.Grouper(level='inner'), 'B'])])
def test_grouper_index_level_as_string(frame, key_strs, groupers):
result = frame.groupby(key_strs).mean()
expected = frame.groupby(groupers).mean()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('levels', [
'inner', 'outer', 'B',
['inner'], ['outer'], ['B'],
['inner', 'outer'], ['outer', 'inner'],
['inner', 'outer', 'B'], ['B', 'outer', 'inner']
])
def test_grouper_index_level_as_string_series(series, levels):
# Compute expected result
if isinstance(levels, list):
groupers = [pd.Grouper(level=lv) for lv in levels]
else:
groupers = pd.Grouper(level=levels)
expected = series.groupby(groupers).mean()
# Compute and check result
result = series.groupby(levels).mean()
assert_series_equal(result, expected)
| bsd-3-clause |
dzbarsky/servo | tests/heartbeats/process_logs.py | 42 | 15777 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
jniediek/mne-python | examples/simulation/plot_simulate_raw_data.py | 14 | 2711 | """
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source
activation multiple times.
"""
# Authors: Yousra Bekhti <[email protected]>
# Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import read_source_spaces, find_events, Epochs, compute_covariance
from mne.datasets import sample
from mne.simulation import simulate_sparse_stc, simulate_raw
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src_fname = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem_fname = (data_path +
'/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif')
# Load real data as the template
raw = mne.io.read_raw_fif(raw_fname)
raw = raw.crop(0., 30.) # 30 sec is enough
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
src = read_source_spaces(src_fname)
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=0)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
fig.show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw, stc, trans_fname, src, bem_fname, cov='simple',
iir_filter=[0.2, -0.2, 0.04], ecg=True, blink=True,
n_jobs=1, verbose=True)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical') # quick calc
evoked = epochs.average()
evoked.plot_white(cov)
| bsd-3-clause |
SKA-ScienceDataProcessor/algorithm-reference-library | deprecated_code/workflows/mpi/reduction_test_mpi.py | 1 | 3444 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#Testing reductions MPI simple version (two reductions)
# In[1]:
import logging
import numpy
from mpi4py import MPI
import astropy.units as u
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from processing_library.image.operations import create_empty_image_like
from processing_library.image.operations import create_image_from_array
#from processing_components.image.operations import qa_image, show_image, export_image_to_fits
from processing_components.image.operations import qa_image
from processing_components.simulation.testing_support import create_test_image
from processing_components.image.gather_scatter import image_gather_facets, image_scatter_facets
from workflows.shared.imaging.imaging_shared import sum_invert_results_local, remove_sumwt
#from matplotlib import pyplot as plt
#from matplotlib import pylab
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'rainbow'
# In[2]:
# Define a simple function to take the square root of an image
def imagerooter(image_list) -> list():
new_image_list = []
for im in image_list:
newim = create_empty_image_like(im)
newim.data = numpy.sqrt(numpy.abs(im.data))
new_image_list.append(newim)
return new_image_list
# In[3]:
# Set up MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
#recvdata = numpy.zeros((2,size),dtype='i')
#recvdata = numpy.zeros((2,size),dtype=numpy.int)
#senddata = numpy.array([(rank+1)*numpy.arange(size,dtype='i'),(rank+1)*numpy.arange(size,dtype='i')])
#senddata = numpy.array([(rank+1)*numpy.arange(size,dtype=numpy.int),(rank+1)*numpy.arange(size,dtype=numpy.int)])
#print('%d:before Reduce: send data = '%rank)
#print(senddata)
#op_sum = MPI.Op.Create(fn_sum, commute=True)
# I have to create a datatype ???
#recvdata=comm.reduce(senddata,root=0,op=op_sum)
#print('%d:after Reduce: data = '%rank)
#print(recvdata)
# Create data
frequency = numpy.array([1e8])
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
model = create_test_image(frequency=frequency, phasecentre=phasecentre, cellsize=0.001,
polarisation_frame=PolarisationFrame('stokesI'))
#print(model)
nchan, npol, ny, nx = model.data.shape
sumwt = numpy.ones([nchan, npol])
print('%d:before Reduce: data = '%rank)
print(sumwt)
#f=show_image(model, title='Model image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(model, context='Model image'))
#plt.show()
# In[5]:
# Accum images into one with weights
result_image = create_empty_image_like(model)
comm.Reduce(model.data,result_image.data,root=0,op=MPI.SUM)
#f=show_image(result_image, title='Result image', cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
if rank==0:
print('%d:after Reduce: data = '%rank)
print(qa_image(result_image,context='Result image'))
# test correctness
assert(result_image.data.shape==model.data.shape)
numpy.testing.assert_array_almost_equal_nulp(result_image.data,
(model.data)*size, 7)
# In[6]:
result_sumwt = numpy.zeros([nchan, npol])
comm.Reduce(sumwt,result_sumwt,root=0,op=MPI.SUM)
if rank==0:
print(result_sumwt)
numpy.testing.assert_array_almost_equal_nulp(result_sumwt,sumwt*size,7)
# In[ ]:
| apache-2.0 |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/old_scripts/fast_noise_sweep_downstairs_ybco.py | 1 | 1994 | import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
ri = baseband.RoachBaseband()
#ri.initialize(use_config=False)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')#[:4]
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f5_2014-02-27.npy')
f0s = np.array([157.315, 201.49])
f0s.sort()
#f0s = f0s*(1-4e-5)
nf = len(f0s)
atonce = 2
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)#[5:15]
#offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = np.concatenate(([-40e3],offsets,[40e3]))/1e6
offsets = offsets*4
print f0s
print len(f0s)
start = time.time()
nsamp = 2**18
step = 1
nstep = 100
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=2**18)
print "loaded waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
n =0
atten_list = [36]#np.linspace(27,34,8)#[30]#[35.5,33.5,46.5,43.5,40.5,37.5]
for atten in atten_list:
df = data_file.DataFile()
ri.set_dac_attenuator(atten)
while True:
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=2)
df.add_sweep(sweep_data)
df.sync()
df.log_hw_state(ri)
df.nc.sync()
n = n + 1
print "done measurement", n
time.sleep(60)
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
| bsd-2-clause |
ishanic/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
szedlakmate/3D-truss-model-updating | truss.py | 1 | 227350 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 18:49:40 2016
3D truss model updater program created by Máté Szedlák (2016, 2017).
Copyright GNU GPL v3.0, Máté Szedlák 2016, 2017.
"""
### Code for "Processing" only
#size(640, 360)
#background(126)
import math
import itertools
from copy import deepcopy
# COMPATIBILITY MODES:
# 0: User defined
# 1: Processing3
# 2: Android
# 3: Most information (with numpy)
# 4: Maximum compatibility
_COMPATIBLE_MODE = 0
_SIMULATION = 1 # Simulating measurements based on input file
PORTS = ['COM1', 'COM2', 'COM3'] # List of possible communication ports
PORTNUMBER = 0 # Applied communication port
if _COMPATIBLE_MODE == 00:
### User defined ###
# Modify as needed #
_MODE_NAME = "User defined"
_LOG = 1 # Logging time
_GRAPHICS = 1 # Graphical features
_SOLVER = 1 # 0: Basic solver, 1: NumPy solver
_OSLIBRARYAVAILABLE = 1 # Basic OS file features (e.g. file size)
_UPDATING = 1 # Model Updating: On/ Off
_ARDUINO = 1 # Arduino input: On/Off
_DEBUG = 1 # Debugging mode
_REALISTICSIMULATION = 0 # Wait as long as it was originally. Only valid with _SIMULATION = 1
##############################################
### DO NOT MODIFY ### #
#
elif _COMPATIBLE_MODE == 01: #
### "Processing 3" mode ### #
_MODE_NAME = "Processing 3" #
_LOG = 1*1 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 0*0 #
_UPDATING = 1*1 #
_ARDUINO = 1*1 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
elif _COMPATIBLE_MODE == 02: #
### Android mode ### #
# DO NOT MODIFY #
_MODE_NAME = "Android" #
_LOG = 1*1 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 1*1 #
_UPDATING = 0*0 #
_ARDUINO = 0*0 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
elif _COMPATIBLE_MODE == 03: #
### Informative ### #
# DO NOT MODIFY #
_MODE_NAME = "Informative mode" #
_LOG = 1*1 #
_GRAPHICS = 1*1 #
_SOLVER = 1*1 #
_OSLIBRARYAVAILABLE = 1*1 #
_UPDATING = 1*1 #
_ARDUINO = 1*1 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
else: #
### Maximum compatibility ### #
# DO NOT MODIFY #
_MODE_NAME = "Maximum compatibility" #
_LOG = 0*0 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 0*0 #
_UPDATING = 0*0 #
_ARDUINO = 0*0 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
### DO NOT MODIFY ### #
##############################################
if _OSLIBRARYAVAILABLE:
import os
if _SIMULATION or not _UPDATING:
_ARDUINO = 0
if _COMPATIBLE_MODE == 2:
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if _LOG:
import time
import datetime
TIC = time.time()
print '------------------------------------'
print 'Truss calculational program'
print 'Created by Máté Szedlák (23/11/2016)'
print 'Compatibility mode: ' + _MODE_NAME
if _SOLVER == 0:
print '- Solver is set to default'
elif _SOLVER == 1:
print '- Solver is set to NumPy'
else:
raise Exception("Solver settings are invalid!")
if _UPDATING:
print '+ Model updating is turned ON'
if _SIMULATION:
print 'Input data is SIMULATED!'
else:
print '- Model updating is turned OFF'
print '------------------------------------'
if _ARDUINO:
SER = 0
try:
import serial
except ImportError:
print "You tried to import \'serial\' in Windows mode without installing \'pySerial\'."
print "Please first install pySerial: http://playground.arduino.cc/Interfacing/Python"
raise Exception('Android mode denied: pyserial not found')
PORTNUMBER -= 1
while SER == 0:
PORTNUMBER += 1
if PORTNUMBER >= len(PORTS):
PORTNUMBER = 0
time.sleep(0.6)
print 'Opening serial at port ' + str(PORTS[PORTNUMBER])
try:
SER.close()
except Exception:
pass
try:
SER = serial.Serial(PORTS[PORTNUMBER], 9600, timeout=0)
except serial.SerialException:
Exception(PORTS[PORTNUMBER] + ' port is busy. It might be occupied by this program or another one :/ Be careful or try resetting this program')
SER = 0
try:
SER.close()
except Exception:
pass
except Exception:
SER = 0
try:
SER.close()
except Exception:
pass
#if _ARDUINO or _SIMULATION:
# try:
# mappingfile = 'arduino_mapping.txt'
# with open(mappingfile, "r") as textfile:
# line = textfile.readline().strip()
# arduino_mapping = line.upper().split(',')
#
# except IOError:
# raise Exception('File not found: ' + mappingfile)
if _SOLVER:
# NumPy library for solving linear equations in another way
import numpy as np
if _GRAPHICS:
# libraries for drawing
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# From here:
def mat_vec_mult(mat_a, vec_b):
"""
Multiplying matrix with a vector, giving the result as a vector
Source:
https://stackoverflow.com/questions/10508021/matrix-multiplication-in-python
"""
vec_c = [0.]*len(mat_a)
for i, row in enumerate(mat_a):
for j, elem in enumerate(vec_b):
vec_c[i] += row[j]*elem
return vec_c
def invert(mat_x):
"""
Invert a matrix X according to gauss-jordan elimination
In gauss-jordan elimination, we perform basic row operations to turn a matrix into
row-echelon form. If we concatenate an identity matrix to our input
matrix during this process, we will turn the identity matrix into our inverse.
X - input list of lists where each list is a matrix row
output - inverse of X
Source:
http://www.vikparuchuri.com/blog/inverting-your-very-own-matrix/
"""
#copy X to avoid altering input
mat_x = deepcopy(mat_x)
#Get dimensions of X
rows = len(mat_x)
cols = len(mat_x[0])
#Get the identity matrix and append it to the right of mat_x
#This is done because our row operations will make the identity into the inverse
identity = make_identity(rows, cols)
for i in xrange(0, rows):
mat_x[i] += identity[i]
i = 0
for j in xrange(0, cols):
#print("On col {0} and row {1}".format(j, i))
#Check to see if there are any nonzero values below the current row in the current column
zero_sum, first_non_zero = check_for_all_zeros(mat_x, i, j)
#If everything is zero, increment the columns
if zero_sum == 0:
if j == cols:
return mat_x
raise Exception("Matrix is singular")
#If mat_x[i][j] is 0, and there is a nonzero value below it, swap the two rows
if first_non_zero != i:
mat_x = swap_row(mat_x, i, first_non_zero)
#Divide mat_x[i] by mat_x[i][j] to make mat_x[i][j] equal 1
mat_x[i] = [m/mat_x[i][j] for m in mat_x[i]]
#Rescale all other rows to make their values 0 below mat_x[i][j]
for k in xrange(0, rows):
if k != i:
scaled_row = [mat_x[k][j] * m for m in mat_x[i]]
mat_x[k] = [mat_x[k][m] - scaled_row[m] for m in xrange(0, len(scaled_row))]
#If either of these is true, we have iterated through the matrix, and are done
if i == rows or j == cols:
break
i += 1
#Get just the right hand matrix, which is now our inverse
for i in xrange(0, rows):
mat_x[i] = mat_x[i][cols:len(mat_x[i])]
return mat_x
def check_for_all_zeros(mat_x, i, j):
"""
Check matrix mat_x to see if only zeros exist at or below row i in column j
mat_x - a list of lists
i - row index
j - column index
returns -
zero_sum - the count of non zero entries
first_non_zero - index of the first non value
"""
non_zeros = []
first_non_zero = -1
for k in xrange(i, len(mat_x)):
non_zero = mat_x[k][j] != 0
non_zeros.append(non_zero)
if first_non_zero == -1 and non_zero:
first_non_zero = k
zero_sum = sum(non_zeros)
return zero_sum, first_non_zero
def swap_row(mat_x, i, j):
"""
Swap row i and row j in a list of lists
mat_x - list of lists
i - row index
j - row index
returns- modified matrix
"""
mat_x[j], mat_x[i] = mat_x[i], mat_x[j]
return mat_x
def swap_col(mat_x, i, j):
"""
Swap colum i and column j in a list of lists
mat_x - list of lists
i - column index
j - column index
returns- modified matrix
"""
for item in mat_x:
item[i], item[j] = item[j], item[i]
return mat_x
def make_identity(row_num, col_num):
"""
Make an identity matrix with dimensions rxc
row_num - number of rows
col_num - number of columns
returns - list of lists corresponding to the identity matrix
"""
identity = []
for i in xrange(0, row_num):
row = []
for j in xrange(0, col_num):
elem = 0
if i == j:
elem = 1
row.append(elem)
identity.append(row)
return identity
if _GRAPHICS:
class Arrow3D(FancyArrowPatch):
"""
Vector drawer module from the internet
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
_xs3d, _ys3d, _zs3d = self._verts3d
_xs, _ys, _zs = proj3d.proj_transform(_xs3d, _ys3d, _zs3d, renderer.M)
self.set_positions((_xs[0], _ys[0]), (_xs[1], _ys[1]))
FancyArrowPatch.draw(self, renderer)
def plotstructure(struct, showorig, showresult, showsupports, \
showforces, showreactions, scaledisp, scale_f, z_corr, showvalues, saveplot):
"""
General plotting method for structures
scaledisp: Scale drwaing of displacements
scale_f: Scale force sign
z_corr: Scale z-axis
"""
plotname = struct.name
plot_width = 18.0 # Plot width in inches
xframe = 0 # Frame width at X direction
yframe = 0 # Frame width at Y direction
zframe = 0 # Frame width at Z direction
scale_sup = scale_f*0.3 # Scale support sign # All the others are input parameters
# Stress coloring settings [R G B] - Examples considering pressure:
# 0: [1, 0, 0] Plain red
# 1: [x, 0, 0] Red to Black
# 2: [1, 1-x, 0-x] Red to White
# 3: [1, (1-x)/2, (1-x)/2] Red to MildRed - Distincts pressure and tension
# 4: [x, 1-x, 0] Red to Green
_coloring = 3 # € [0, 1, 2, 3, 4]
fig = plt.figure()
_ax = fig.add_subplot(111, projection='3d')
if struct.dof == 2:
_ax.view_init(elev=90., azim=-90.)
_ax.w_zaxis.line.set_lw(0.)
_ax.set_zticklabels([])
xmin = min(list(struct.nodalcoord[x][0] for x in range(struct.nodenum)))
xmax = max(list(struct.nodalcoord[x][0] for x in range(struct.nodenum)))
ymin = min(list(struct.nodalcoord[x][1] for x in range(struct.nodenum)))
ymax = max(list(struct.nodalcoord[x][1] for x in range(struct.nodenum)))
zmin = min(list(struct.nodalcoord[x][2] for x in range(struct.nodenum)))
zmax = max(list(struct.nodalcoord[x][2] for x in range(struct.nodenum)))
deltax = xmax - xmin
deltay = ymax - ymin
xframe = max(deltax * 0.05, 2)
yframe = max(deltay * 1.5, 2)
if struct.dof == 3:
plot_height = plot_width * ((deltay + yframe*2)/(deltax + xframe*2)) *0.3
else:
plot_height = plot_width * 0.5
fig.set_size_inches(plot_width, plot_height)
_ax.set_xlim3d(xmin - xframe, xmax + xframe)
_ax.set_ylim3d(ymin - yframe, ymax + yframe)
_ax.set_zlim3d(zmin - zframe, zmax + zframe)
if showorig == showresult:
_coloring = 0
# Giving plot names
if showorig == 1 and showresult == 0 and showsupports == 1 and showreactions == 0:
plotname += ' - Initial structure'
if showforces:
plotname += ' with forces'
elif showorig == 1 and showresult == 1:
plotname += ' - Deformation'
if showreactions == 0:
plotname += ' with reactions'
elif showorig == 0 and showresult == 1:
plotname += ' - Stresses'
if showreactions == 0:
plotname += ' with reactions'
else:
plotname += ' - Unnamed'
print plotname + ": "
if showresult:
dipslaydisplacement = deepcopy(struct.nodalcoord_def)
if scaledisp != 1.0:
if _LOG:
print('Displacements are scaled with factor: ') + str(scaledisp)
for i in range(struct.nodenum):
for j in range(3):
dipslaydisplacement[i][j] = (struct.nodalcoord_def[i][j] -\
struct.nodalcoord[i][j]) * scaledisp + struct.nodalcoord[i][j]
for i in range(struct.elenum):
# Plot undeformed structure
if showorig:
_ax.plot([struct.nodalcoord[struct.node[i][1]][0], struct.nodalcoord[struct.node[i][0]][0]], \
[struct.nodalcoord[struct.node[i][1]][1], struct.nodalcoord[struct.node[i][0]][1]], \
zs=[struct.nodalcoord[struct.node[i][1]][2], struct.nodalcoord[struct.node[i][0]][2]], color='b')
# Plot deformed structure
if showresult:
if struct.postprocessed():
if struct.stresscolor[i] > 0:
if _coloring == 1:
rgb_col = [0, 0, abs(struct.stresscolor[i])]
elif _coloring == 2:
rgb_col = [1-abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i]), 1]
elif _coloring == 3:
rgb_col = [(1-abs(struct.stresscolor[i]))/2, \
(1-abs(struct.stresscolor[i]))/2, 1]
elif _coloring == 4:
rgb_col = [0, 1-abs(struct.stresscolor[i]), \
abs(struct.stresscolor[i])]
else:
rgb_col = [1, 0, 0]
else:
if _coloring == 1:
rgb_col = [abs(struct.stresscolor[i]), 0, 0]
elif _coloring == 2:
rgb_col = [1, 1-abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i])]
elif _coloring == 3:
rgb_col = [1, (1-abs(struct.stresscolor[i]))/2, \
(1-abs(struct.stresscolor[i]))/2]
elif _coloring == 4:
rgb_col = [abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i]), 0]
else:
rgb_col = [1, 0, 0]
else:
print 'Stresses are not calculated'
rgb_col = [1, 0, 0]
_ax.plot([dipslaydisplacement[struct.node[i][1]][0], dipslaydisplacement[struct.node[i][0]][0]], \
[dipslaydisplacement[struct.node[i][1]][1], dipslaydisplacement[struct.node[i][0]][1]], \
zs=[dipslaydisplacement[struct.node[i][1]][2], dipslaydisplacement[struct.node[i][0]][2]], color=rgb_col)
if showforces:
for i in struct.known_f_notzero:
if struct.force[i] < 0:
value = -1.0
else:
value = 1.0
if i % 3 == 0:
f_dir = [value*scale_f, 0., 0.]
elif i % 3 == 1:
f_dir = [0., value*scale_f, 0.]
else:
f_dir = [0., 0., value*scale_f*z_corr]
f_arrow = Arrow3D([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0] + f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1] + f_dir[1]], \
[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2] + f_dir[2]], \
mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
_ax.add_artist(f_arrow)
if showreactions:
e_previous = -100
for i in struct.known_dis_a:
value = 0.0 # Maybe this is useless <XXX>
if struct.force[i] < 0:
value = -1.0
elif struct.force[i] > 0:
value = 1.0
if i % 3 == 0:
f_dir = [value*scale_f, 0., 0.]
elif i % 3 == 1:
f_dir = [0., value*scale_f, 0.]
else:
f_dir = [0., 0., value*scale_f*z_corr]
if abs(struct.force[i]) > 0:
f_arrow = Arrow3D([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0] + f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1] + f_dir[1]], \
[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2] + f_dir[2]], \
mutation_scale=20, lw=1, arrowstyle="-|>", color="darkolivegreen")
_ax.add_artist(f_arrow)
if showvalues:
_ax.set_xticklabels([])
_ax.set_yticklabels([])
_ax.set_zticklabels([])
if not i//3 == e_previous//3:
if struct.dof == 3:
_ax.text(struct.nodalcoord[i//3][0], \
struct.nodalcoord[i//3][1], \
struct.nodalcoord[i//3][2], \
"{:10.2f}".format(struct.force[(i//3)*3+0])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+1])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+2]),\
fontsize=12, horizontalalignment='right')
elif struct.dof == 2:
_ax.text(struct.nodalcoord[i//3][0], \
struct.nodalcoord[i//3][1], \
struct.nodalcoord[i//3][2], \
"{:10.2f}".format(struct.force[(i//3)*3+0])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+1]),\
fontsize=12, horizontalalignment='right')
e_previous = i
if showsupports:
for i in struct.known_dis_a:
if i % 3 == 0:
f_dir = [-1.0 * scale_sup, 0., 0.]
col = 'g'
elif i % 3 == 1:
f_dir = [0., -1.0 * scale_sup, 0.]
col = 'y'
else:
f_dir = [0., 0., -1.0 * scale_sup * z_corr]
col = 'brown'
if i % 3 != 2 or struct.dof == 3:
_ax.plot([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0]+f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1]+f_dir[1]], \
zs=[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2]+f_dir[2]], \
color=col, linewidth=4.0)
plt.show()
if saveplot:
fig.savefig(plotname + '.png')
print '\'' + plotname +'.png\' is saved.'
print '------------------------------------'
return
def endoffile(givenfile, line):
"""
Check if end of file is reached. Implemented due to compatibility reasons.
"""
if _OSLIBRARYAVAILABLE:
return givenfile.tell() < os.fstat(givenfile.fileno()).st_size
else:
return not line == "EOF"
def logtime(prev_time, title):
"""
Calculating and printing the time consumption of tasks
Should be called with the previously saved part-time and the name of the actual task
At the first call, should be called with TIC value. The input argument
should be overwritten by this funvtion's return value.
"""
if _LOG:
new_time = time.time()
print title
print 'Time: ' + str("{:10.3f}".format(new_time - prev_time))
print '------------------------------------'
return new_time
else:
return 0
def error(delta):
"""
Error function using least-square method
"""
sumerr = 0
for deltaelement in delta:
sumerr += deltaelement**2
sumerr = math.sqrt(error)
return sumerr
class Truss(object):
"""
General structure class
"""
def __init__(self, name):
self.name = name # Name of structure
self.known_f_a = [] # Nodes without supports
self.known_f_notzero = [] # Nodes with loads
self.dof = 3 # Truss's degree of freedom
self.node = [] # Element's end nodes
self.constraint = [] # Supports
self.force = [] # Force
self.nodalcoord = [] # Coordinates of nodes
self.nodalcoord_def = [] # Coordinates after deformations
self.area = [] # Cross-sectional areas
self.el_mod = [] # Material data
self.nodenum = 0 # Number of nodes
self.elenum = 0 # Number of elements
self.eledof = [] # Mapping between DOF and node
self.stiffness = [] # Global stiffness matrix
self.mod_stiffnesses = [] # Modified stiffnesses in a hyper-matrix
self.ele_length = [] # Length of the elements
self._norm_stiff = [] # E/L
self._cx = []
self._cy = []
self._cz = []
self._s_loc = []
self._loc_stiff = [] # Local stiffnes matrix
self.dis_new = []
self.force_new = []
self.stiff_new = []
self.displacement = [] # Relative displacements
self._stiffisfresh = 0
self._postprocessed = 0
self.init_disp = []
self.stresscolor = [] # Color mapping for stresses
self.known_dis_a = []
self.stress = [] # Element's stresses
self._io_origin = 0 # Array's first element number during IO. Default is 0.
self.analysis = {}
self._mod_stiffisfresh = 0
self.mod_displacements = []
self.keypoint = []
self.keypnum = 0
self.effect = []
self.toteffect = []
self.sortedeff = []
self.specdof_inputstring = ''
self.tresshold = 0.1
self.effectratio = []
self.processeddata = [] # To store last input line
self.modifications = [] # Storing modifications for model updating
self.readelements = [0]*9
self.arduino_mapping = []
self.errorlimit = 0.5
self.modificationlimit = 0.6
self.unitmodification = 0.05
self.measurement = [0.]
self.numofupdates = [0, 0, 0] # [#Successfully updated model, #Updates with overflow exit, #Updates where there were no more modification option]
self.iterationlimit = 20
def read(self, filename):
"""
Input file for TRUSS.py program
All commands must be written with uppercase characters
*** The values MUST be written in the exact following line of a command
Only lines with the command and nothing more counts.
Everything else will be neglected. Even hastags are useless :)
The order of the commands are indifferent.
Commands and their format (example):
DOF - Degree of freedom: 3
ELEMENTS - Elements given by end-nodes: 0, 1 | 0, 2 ...
COORDINATES - Nodal coordinates: 0., 0., 0., | 0., 3., 0. ...
CROSS-SECTIONS - This data will be evaluated in Python: 3.0*(10**(-4)), 5.0*(10**(-4)) ...
MATERIALS - This data will be evaluated in Python: 70.0*(10**9), 100.0*(10**9) ...
FORCES - Selected DOF + Force: 11, +1000000.0 | 12, +1000000.0 ...
SUPPORTS - Selected DOF + Prescribed displacement: 0, 0.0 | 1, 0.0 ...
SPECDOF - Selected node's DOF will be analysed during Model Updating: 1, xyz | 3 y | 10 xz ...
EOF - For compatibility reasons EOF should be placed after the commands
"""
self._io_origin = 0
readelementnames = ["Origin", "DOF", "Elements", "Coordinates", "Cross-sections", "Materials", "Forces", "Supports", "Measured DOFs"]
with open(filename, "r") as sourcefile:
sourceline = ""
while endoffile(sourcefile, sourceline):
sourceline = sourcefile.readline().strip()
if sourceline.upper() == "_ORIGIN":
sourceline = sourcefile.readline().strip()
self._io_origin = int(sourceline)
self.readelements[0] = 1
if sourceline.upper() == "DOF":
sourceline = sourcefile.readline().strip()
self.setdof(int(sourceline))
self.readelements[1] = 1
if sourceline.upper() == "ELEMENTS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, int(x[1]) - self._io_origin] for x in inpstr]
self.setelements(inpnum)
self.readelements[2] = 1
if sourceline.upper() == "COORDINATES":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
if self.dof == 3:
inpnum = [[float(x[0]), float(x[1]), float(x[2])] for x in inpstr]
elif self.dof == 2:
inpnum = [[float(x[0]), float(x[1]), 0.] for x in inpstr]
self.setcoordinates(inpnum)
self.readelements[3] = 1
if sourceline.upper() == "CROSS-SECTIONS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = sourceline.split(',')
if len(inpstr) == 1:
inpstr = sourceline.split(';')
if '' in inpstr:
inpstr.remove('')
inpnum = [float(eval(x)) for x in inpstr]
self.setcrosssections(inpnum)
self.readelements[4] = 1
if sourceline.upper() == "MATERIALS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = sourceline.split(',')
if len(inpstr) == 1:
inpstr = sourceline.split(';')
if '' in inpstr:
inpstr.remove('')
inpnum = [float(eval(x)) for x in inpstr]
self.setmaterials(inpnum)
self.readelements[5] = 1
if sourceline.upper() == "FORCES":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, float(x[1])] for x in inpstr]
self.setforces(sorted(inpnum))
self.readelements[6] = 1
if sourceline.upper() == "SUPPORTS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, float(x[1])] for x in inpstr]
self.setsupports(sorted(inpnum))
self.readelements[7] = 1
if sourceline.upper() == "MEASUREMENTS":
sourceline = sourcefile.readline().strip()
self.specdof_inputstring = sourceline
inpstr = []
self.arduino_mapping = sourceline.split(',')
self.setspecdofs(self.arduino_mapping)
self.readelements[8] = 1
terminate = False
for i, value in enumerate(self.readelements):
if i > 0 and (i < 8 or _UPDATING):
#if i > 0:
if value == 0:
print "The following was not found: " + readelementnames[i]
terminate = True
if terminate:
raise Exception
def plot(self, showorig, showresult, showsupports, showforces, \
showreactions, scaledisplacement, scaleforce, scalez, saveplot):
"""
Plot function of the Truss class
This method calls the more general plotstructure() method.
Plot settings:
O: Original D: Deformed S: Supports F: Forces R: Reactions
ScD: Scale displacments (Z-axis) (def:1.0) ScF: Scale forces (def:1.0)
ScS: Scale Support signs (Z-axis) (def:1.0)
Save: Save plot to file
plot(O, D, S, F, R, ScD, ScF, ScS, Save)
"""
_showvalues = 1 # Show values of forces
if self._postprocessed == 0:
print 'Postprocess is needed before plotting structure!'
else:
if scaledisplacement == 0:
scaledisplacement = 1.0 # Scale drwaing of displacements
if scaleforce == 0:
scaleforce = 1.0 # Scale force sign
if scalez == 0:
scalez = 0.3 # Scale z-axis
plotstructure(self, showorig, showresult, showsupports, showforces, showreactions, \
scaledisplacement, scaleforce, scalez, _showvalues, saveplot)
def __checkcoordinates(self, ignorable):
"""
Checking coordinates for repeating elements.
ignorable: [True | False] If the warning is ignorable, only message apperas and the input becomes neglected.
If the warning is not ignorable, then exceptions will be raised.
return: [0 | 1] 1 f no error found, otherwise 0.
"""
if len(self.nodalcoord) != len(list(k for k, _ in itertools.groupby(sorted(self.nodalcoord)))):
if ignorable == 0:
raise Exception('Coordinate list has repeating items. Calculation is terminated')
else:
print "This node already exists. Input is ignored."
return 0
else:
return 1
def setdof(self, dof):
"""
Setting problem's degree of freedom
dof: [2 | 3] Model's Degree Of Freedom.
"""
self.dof = dof
if self.dof != 2 and self.dof != 3:
raise Exception('DOF must be 2 or 3.')
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setelements(self, node):
"""
Setting elements (nodal connections) in bulk mode
"""
self.node = node
self.nodenum = len(set(list(itertools.chain.from_iterable(sorted(self.node)))))
self.elenum = len(self.node)
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
# Creating mapping tool for elements
for node in self.node:
self.eledof.append([node[0]*3, node[0]*3+1, node[0]*3+2, \
node[1]*3, node[1]*3+1, node[1]*3+2])
# Initialazing matrix for all matrices
self.init_disp = [0.]*(3*self.nodenum)
self.force = [0.]*(3*self.nodenum)
self.stiffness = [0.]*(3*self.nodenum)
self.known_f_a = []
self.known_f_notzero = []
def setcoordinates(self, coordinates):
"""
Setting coordinates in bulk mode
"""
self.nodalcoord = coordinates
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
if self.nodenum > len(self.nodalcoord):
raise Exception('More coordinates are needed')
elif self.node == []:
raise Exception('Nodes must be set before defining elements')
self.__checkcoordinates(False)
def modcoordinate(self, node, coordinate):
"""
Modify coordinate
"""
if self.__checkcoordinates(True):
self.nodalcoord[node] = coordinate
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
def setcrosssections(self, area):
"""
Setting cross-sections in bulk mode
"""
self.area = area
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def modcrosssection(self, element, area):
"""
Modifying cross-sections by elements
"""
self.area[element] = area
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setmaterials(self, el_mod):
"""
Setting material data in bulk mode
"""
self.el_mod = el_mod
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def modmaterial(self, element, el_mod):
"""
Modifying material data by elements
"""
self.el_mod[element] = el_mod
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setforces(self, forces):
"""
Set forces
"""
for fdof, force in forces:
if self.dof == 3:
self.force[fdof] = force
elif self.dof == 2:
self.force[fdof + (fdof//2)] = force
self._postprocessed = 0
def modforce(self, element, force):
"""
Modifying forces by each
"""
self.force[element] = force
self._postprocessed = 0
def setsupports(self, constraints):
"""
Set supports
"""
for cdof, constraint in constraints:
if self.dof == 3:
self.constraint.append([cdof, constraint])
elif self.dof == 2:
self.constraint.append([cdof + (cdof // 2), constraint])
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setspecdofs(self, specdofs):
"""
Set special nodal DOFs
"""
self.analysis = {}
for dofname in specdofs:
node = int(dofname[:len(dofname)-1])-self._io_origin
if 'X' in dofname:
self.analysis[dofname] = node*3+0
self.keypoint.append(node*3+0)
if 'Y' in dofname:
self.analysis[dofname] = node*3+1
self.keypoint.append(node*3+1)
if 'Z' in dofname:
if self.dof == 3:
self.analysis[dofname] = node*3+2
self.keypoint.append(node*3+2)
else:
print "Z-direction is not allowed in 2D structures. Please check the \'MEASUREMENTS\' section in the input file."
raise Exception
self.keypnum = len(self.analysis)
if self.keypnum == 0 and _UPDATING:
print "There is no valid measured DOF. Please check the \'MEASUREMENTS\' section in the input file."
raise Exception
def calcstiffness(self):
"""
Stiffness matrix compilation
"""
self._postprocessed = 0
if self.dof == 2:
for zdof in range(self.nodenum):
self.constraint.append([int(zdof*3+2), 0.])
self.constraint = list(k for k, _ in itertools.groupby(sorted(self.constraint)))
#Setting known forces
for dofloc in range(3*self.nodenum):
self.known_f_a.append(dofloc)
if self.force[dofloc] != 0:
self.known_f_notzero.append(dofloc)
self.known_dis_a = []
for constr in self.constraint:
self.init_disp[constr[0]] = constr[1]
self.known_dis_a.append(constr[0])
try:
self.known_f_a.remove(constr[0])
self.known_f_notzero.remove(constr[0])
except ValueError:
pass
ele_length = [0.]*self.elenum
self._norm_stiff = [0.]*self.elenum
self._cx = [0.]*self.elenum
self._cy = [0.]*self.elenum
self._cz = [0.]*self.elenum
self._s_loc = [0.]*self.elenum
self._loc_stiff = [0.]*self.elenum
self.stress = [0.]*self.elenum
self.stiffness = [[0.]*(len(self.nodalcoord)*3)]*(len(self.nodalcoord)*3)
for i in range(self.elenum):
ele_length[i] = math.sqrt((self.nodalcoord[self.node[i][1]][0]-self.nodalcoord[self.node[i][0]][0])**2+ \
(self.nodalcoord[self.node[i][1]][1]-self.nodalcoord[self.node[i][0]][1])**2 + \
(self.nodalcoord[self.node[i][1]][2]-self.nodalcoord[self.node[i][0]][2])**2)
self._cx[i] = (self.nodalcoord[self.node[i][1]][0]-self.nodalcoord[self.node[i][0]][0])/ele_length[i]
self._cy[i] = (self.nodalcoord[self.node[i][1]][1]-self.nodalcoord[self.node[i][0]][1])/ele_length[i]
self._cz[i] = (self.nodalcoord[self.node[i][1]][2]-self.nodalcoord[self.node[i][0]][2])/ele_length[i]
self._norm_stiff[i] = self.el_mod[i]/ele_length[i]
self._s_loc[i] = [[self._cx[i]**2, self._cx[i]*self._cy[i], self._cx[i]*self._cz[i], -self._cx[i]**2, -self._cx[i]*self._cy[i], -self._cx[i]*self._cz[i]], \
[self._cx[i]*self._cy[i], self._cy[i]**2, self._cy[i]*self._cz[i], -self._cx[i]*self._cy[i], -self._cy[i]**2, -self._cy[i]*self._cz[i]], \
[self._cx[i]*self._cz[i], self._cy[i]*self._cz[i], self._cz[i]**2, -self._cx[i]*self._cz[i], -self._cy[i]*self._cz[i], -self._cz[i]**2], \
[-self._cx[i]**2, -self._cx[i]*self._cy[i], -self._cx[i]*self._cz[i], self._cx[i]**2, self._cx[i]*self._cy[i], self._cx[i]*self._cz[i]], \
[-self._cx[i]*self._cy[i], -self._cy[i]**2, -self._cy[i]*self._cz[i], self._cx[i]*self._cy[i], self._cy[i]**2, self._cy[i]*self._cz[i]], \
[-self._cx[i]*self._cz[i], -self._cy[i]*self._cz[i], -self._cz[i]**2, self._cx[i]*self._cz[i], self._cy[i]*self._cz[i], self._cz[i]**2]]
self._loc_stiff[i] = [[y* self.area[i]* self._norm_stiff[i] for y in x] for x in self._s_loc[i]]
ele_dof_vec = self.eledof[i]
stiffincrement = [0.]*(len(self.nodalcoord)*3)
for j in range(3*2):
for k in range(3*2):
stiffincrement[ele_dof_vec[k]] = self._loc_stiff[i][j][k]
self.stiffness[ele_dof_vec[j]] = [x + y for x, y in zip(self.stiffness[ele_dof_vec[j]], stiffincrement)]
self._stiffisfresh = 1
def calcmodstiffness(self, index, magnitude):
"""
Convergency step in stiffness matrix modification
"""
if self.mod_stiffnesses == []:
self.mod_stiffnesses = [0.]*(self.elenum+1)
#for loopindex in range(self.elenum):
_mod_stiffnesses_temp = [[0.]*(len(self.nodalcoord)*3)]*(len(self.nodalcoord)*3)
for i in range(self.elenum):
if i == index:
_mod_norm_stiff = self._norm_stiff[i] * (1.0 + self.modifications[i] + magnitude) #self.el_mod[i]/ele_length[i]
else:
_mod_norm_stiff = self._norm_stiff[i] * (1.0 + self.modifications[i]) #self.el_mod[i]/ele_length[i]
_mod_loc_stiff = [[y*self.area[i]*_mod_norm_stiff for y in x] for x in self._s_loc[i]]
ele_dof_vec = self.eledof[i]
stiffincrement = [0.]*(len(self.nodalcoord)*3)
for j in range(3*2):
for k in range(3*2):
stiffincrement[ele_dof_vec[k]] = _mod_loc_stiff[j][k]
_mod_stiffnesses_temp[ele_dof_vec[j]] = [x + y for x, y in zip(_mod_stiffnesses_temp[ele_dof_vec[j]], stiffincrement)]
self.mod_stiffnesses[index] = _mod_stiffnesses_temp
def solve(self):
"""
Main solver of the code
"""
if self._stiffisfresh == 0:
if _LOG:
print 'Stiffness matrix is recalculated'
self.calcstiffness()
self.dis_new = [0.]*(self.nodenum*3-len(self.constraint))
self.force_new = [0.]*(self.nodenum*3-len(self.constraint))
self.stiff_new = [[0.]*(self.nodenum*3-len(self.constraint))]*(self.nodenum*3-len(self.constraint))
# known force array
for i, known_f_a in enumerate(self.known_f_a):
self.force_new[i] = self.force[known_f_a]
stiffincrement = [0.]*(self.nodenum*3-len(self.constraint))
for i, kfai in enumerate(self.known_f_a):
for j, kfaj in enumerate(self.known_f_a):
stiffincrement[j] = self.stiffness[kfai][kfaj]
self.stiff_new[i] = [x + y for x, y in zip(self.stiff_new[i], stiffincrement)]
# SOLVING THE STRUCTURE
if _SOLVER == 0:
if _LOG:
print 'Built-in solver'
self.dis_new = mat_vec_mult(invert(self.stiff_new), self.force_new)
else:
if _LOG:
print 'NumPy solver'
self.dis_new = np.linalg.solve(np.array(self.stiff_new), np.array(self.force_new))
self.displacement = deepcopy(self.init_disp)
for i, known_f_a in enumerate(self.known_f_a):
self.displacement[known_f_a] = self.dis_new[i]
# Deformed shape
self.nodalcoord_def = []
for i in range(self.nodenum):
self.nodalcoord_def.append([self.nodalcoord[i][0]+ self.displacement[i*3+0], \
self.nodalcoord[i][1]+ self.displacement[i*3+1], self.nodalcoord[i][2]+ self.displacement[i*3+2]])
# Postrpocesses
self.postprocess()
self.mod_displacements = [0.]*(self.elenum+1)
def solvemodstruct(self, index):
"""
Solver for the modified structures. 'Index' shows the actual modification number.
"""
self.mod_displacements[index] = [0.]*(self.nodenum*3)
dis_new = [0.]*(self.nodenum*3-len(self.constraint))
stiff_new = [[0.]*(self.nodenum*3-len(self.constraint))]*(self.nodenum*3-len(self.constraint))
stiffincrement = [0.]*(self.nodenum*3-len(self.constraint))
for i, kfai in enumerate(self.known_f_a):
for j, kfaj in enumerate(self.known_f_a):
stiffincrement[j] = self.mod_stiffnesses[index][kfai][kfaj]
stiff_new[i] = [x + y for x, y in zip(stiff_new[i], stiffincrement)]
# SOLVING THE MODIFIED STRUCTURE
if _SOLVER == 0:
dis_new = mat_vec_mult(invert(stiff_new), self.force_new)
else:
dis_new = np.linalg.solve(np.array(stiff_new), np.array(self.force_new))
mod_displacement_temp = deepcopy(self.init_disp)
for i, kfa in enumerate(self.known_f_a):
mod_displacement_temp[kfa] = dis_new[i] - self.dis_new[i]
self.mod_displacements[index] = [x + y for x, y in zip(self.mod_displacements[index], mod_displacement_temp)]
def evaluate(self):
"""
Calculates the relative displacement of each individual available unit-modification
compared to the measured differencies (delta).
delta: [DOF number, difference]
return effect: [efffect on 1. point, effect on 2. point, ..., modification number]
where each line number shows the corresponding modification number
"""
self.effect = [[0.]*(self.keypnum + 2)]*self.elenum
self.toteffect = [0.]*self.keypnum
self.sortedeff = [[[0.]*(self.keypnum + 2)]*self.elenum]*self.keypnum
effect_temp = [0.]*(self.keypnum + 2)
for modnum in range(self.elenum):
effect_temp[self.keypnum] = int(modnum)
for j, dofnum in enumerate(self.keypoint):
try:
effect_temp[j] = self.mod_displacements[modnum][dofnum]
self.effect[modnum] = [x for x in effect_temp]
self.toteffect[j] += abs(self.effect[modnum][j])
except IndexError:
print "Maybe the mapping data is invalid."
print "Please check the \'arduino_mapping.txt\' input whether the given DOFs are correct or not."
raise IndexError
self.effectratio = deepcopy(self.effect)
for i in range(self.elenum):
for j in range(self.keypnum):
if self.toteffect[j] > 0:
self.effectratio[i][j] = abs(self.effectratio[i][j]/self.toteffect[j])
else:
self.effectratio[i][j] = 0
#print " \'effectratio\' is not used yet"
# Sort by effectiveness
for i in range(self.keypnum):
self.sortedeff[i] = deepcopy(self.effect)
# Check sign of the effect
for ktemp in range(self.elenum):
if self.sortedeff[i][ktemp][i] < 0:
for jtemp in range(self.keypnum):
self.sortedeff[i][ktemp][jtemp] = abs(self.sortedeff[i][ktemp][jtemp])
self.sortedeff[i][ktemp][self.keypnum +1] = -1
else:
self.sortedeff[i][ktemp][self.keypnum +1] = +1
for j in range(self.keypnum):
if i != j and j != 0:
self.sortedeff[i] = swap_col(sorted(swap_col(self.sortedeff[i], 0, j), reverse=True), 0, j)
if i != 0:
self.sortedeff[i] = swap_col(sorted(swap_col(self.sortedeff[i], 0, i), reverse=True), 0, i)
else:
self.sortedeff[i] = sorted(self.sortedeff[i], reverse=True)
def difference(self, num_displ, measurement):
"""
Calculate the difference between the Numerical solution and Real-life measurement.
The Real-life measurement should be given the following format:
MEASUREMENT: [[13X, -2.154], [16Y, 5.256], ...]
"""
#Print nodenumber option should be added! <XXX>
delta = []
for loc, measured in measurement:
try:
dof = self.analysis[loc.upper()]
except KeyError:
print 'The given measurement location cannot be matched with the input data.'
print 'The available nodes are: {\'NAMES\': mapping addresses}'
print self.analysis
SER.close()
raise Exception('Watchpoint name error')
delta.append(measured - num_displ[dof])
return delta
def optimize(self, delta):
"""
Modell updating - core function
"""
#modnum = min(10, self.elenum)
modnum = self.elenum
self.modifications = [0.0]*self.elenum
if not _SIMULATION:
appendix = ""
else:
appendix = " - SIMULATED"
newdelta = delta
j = 0
print "-----"
print "Step: 0/"+ str(self.iterationlimit)
while (error(newdelta) > self.errorlimit and j <= self.iterationlimit and (self.capable() or j <= 1)): # Optimization loop
j += 1
print "Error: " + str(error(newdelta))
print "-----"
print "Step: " + str(j) + "/"+ str(self.iterationlimit)
ratio = [0.]*modnum
unit = 0
prevmodifications = self.modifications
for index in range(self.elenum):
self.modifications[index] = min(abs(self.modifications[index] - self.unitmodification), self.modificationlimit) *math.copysign(1, self.modifications[index]- self.unitmodification)
self.calcmodstiffness(index, self.modifications[index])
self.solvemodstruct(index)
self.evaluate()
self.calcmodstiffness(self.elenum, 0)
self.solvemodstruct(self.elenum)
newdelta = self.difference(self.mod_displacements[self.elenum], self.measurement)
for i, effect in enumerate(self.toteffect):
if effect == 0.0:
print "None of the variables has effect on " + str(self.arduino_mapping[i])
print "Model updating has no solution."
raise Exception
for i in range(self.elenum):
modificationnumber = self.sortedeff[0][i][1]
ratio[modificationnumber] = abs(self.sortedeff[0][i][0] / self.toteffect[0])*math.copysign(1, self.sortedeff[0][i][2])
unit += abs(ratio[modificationnumber]*self.sortedeff[0][i][0])
scale = newdelta[0]/unit
for i in range(self.elenum):
modificationnumber = self.sortedeff[0][i][1]
self.modifications[modificationnumber] = min(abs(prevmodifications[modificationnumber] - self.unitmodification*ratio[modificationnumber]), self.modificationlimit)\
*math.copysign(1, prevmodifications[modificationnumber] - self.unitmodification*ratio[modificationnumber])
# the last part is already the sign itself without the sign function
print "Ratio: " + str(scale)
print "Final error: " + str(error(newdelta))
if not self.capable() and j > 1:
print "Optimization could not be finished successfully."
print "The remaining error is: " + str(error(newdelta))
with open(self.name + ' - UpdateResults'+ appendix +'.txt', 'a') as outfile:
if j > 1:
if j <= self.iterationlimit and self.capable():
self.numofupdates[0] += 1
outfile.write("Update state: SUCCESSFUL\n")
if not j <= self.iterationlimit:
self.numofupdates[1] += 1
outfile.write("Update state: Run out of iteration limit\n")
if not self.capable() and j > 1:
self.numofupdates[2] += 1
outfile.write("Update state: No more possible modification\n")
else:
outfile.write("Update state: Optimization was skipped\n")
outfile.write("Requiered iterations: " + str(j) + "\n")
outfile.write("Measurement: " + str(self.measurement) + "\n")
outfile.write("Original delta: " + str(delta) + "\n")
outfile.write("New delta: " + str(newdelta) + " (limit: " + str(self.errorlimit) +")\n")
outfile.write("Final error: " + str(error(newdelta)) + "\n")
outfile.write("Modifications [%]: \n")
outfile.write(str(self.modifications) + "\n")
outfile.write("Original displacements: \n")
outfile.write(str(self.displacement) + "\n")
if j > 1:
outfile.write("New displacements: \n")
outfile.write(str(self.mod_displacements[self.elenum]) + "\n")
outfile.write("----------------------\n")
def capable(self):
"""
Function telling whether there are more options to modify
"""
capable = False
for variable in self.modifications:
if abs(variable) <= 0.95*self.modificationlimit and abs(variable) > 0.01:
capable = True
return capable
def seterrorlimit(self, errorlimit):
"""
Setting general stop parameter for model updating
"""
if errorlimit > 0.0:
self.errorlimit = errorlimit
else:
print "The error limit must be a positive number"
raise Exception
def setmodificationlimit(self, modificationlimit):
"""
Setting modification limit for members (model updating)
"""
if modificationlimit > 0.0 and modificationlimit < 1.0:
self.modificationlimit = modificationlimit
else:
print "The modification limit must be higher than 0.0 and lower than 1.0"
raise Exception
def setunitmodification(self, unitmodification):
"""
Setting modification step (model updating)
"""
if abs(unitmodification) >= 0.01 and abs(unitmodification) < 0.5:
self.unitmodification = unitmodification
else:
print "The absolut value of the unit modification must be minimum 0.01 and maximum 0.5"
raise Exception
def setiterationlimit(self, iterationlimit):
"""
Setting maximum number of iterations (model updating)
"""
if int(iterationlimit) > 1 and int(iterationlimit) <= math.pow(10, 4):
self.iterationlimit = int(iterationlimit)
else:
print "The iterationlimit must be between 2 and 10.000"
raise Exception
def readarduino(self, base, saveinput):
"""
Read data from Arduino
"""
# Read data from Arduino
maxdifference = 0.8 # Maximum input difference treshold in mm
arduinovalues = []
data = [0.]*len(self.arduino_mapping)
newdata = False
bigdifference = False
readerror = False
try:
arduinoline = SER.readline()
if len(arduinoline) > 0:
arduinovalues = arduinoline.split(',')
try:
if arduinovalues[0][len(arduinovalues)-1] == '.':
arduinovalues[0] = arduinovalues[0][:len(arduinovalues[0])-2]
else:
del arduinovalues[len(arduinovalues)-1]
except IndexError:
print "Index Error... continuing"
if len(arduinovalues) == len(self.arduino_mapping):
try:
for i in range(len(self.arduino_mapping)):
data[i] = float(arduinovalues[i]) - float(base[i][1])
if abs(data[i] - self.processeddata[i]) > maxdifference:
bigdifference = True
if abs(float(arduinovalues[i])) < 2.0:
readerror = True
self.processeddata = data
newdata = True
except ValueError:
print "Value error... continuing"
SER.flushInput()
time.sleep(0.5)
except Exception:
print "Type error: " + str(arduinovalues) + "... continuing"
SER.flushInput()
time.sleep(0.5)
SER.flushInput()
except serial.SerialTimeoutException:
print "Data could not be read... continuing"
SER.flushInput()
time.sleep(0.5)
if newdata and not bigdifference and not readerror:
self.measurement = zip(self.arduino_mapping, data)
saveinput.write(str(data) +', '+ str(time.time()) + "\n")
# Calculate differences
delta = self.difference(self.displacement, self.measurement)
print "Delta: " + str(delta)
newdata = False
bigdifference = False
readerror = False
return delta
newdata = False
bigdifference = False
readerror = False
def simulatearduino(self, arduinoline, prevline):
"""
Simulate data, based on previous measurement
"""
arduinovalues = []
data = [0.]*len(self.arduino_mapping)
skip = 0
sleeptime = 0.
try:
try:
prevreadtime = float(str(prevline.split(']')[1]).split(',')[1])
nowreadtime = float(str(arduinoline.split(']')[1]).split(',')[1])
try:
if _REALISTICSIMULATION:
sleeptime = nowreadtime - prevreadtime
except Exception:
pass
except Exception:
skip = 1
sleeptime = 0.
if not skip:
if not sleeptime > 0:
sleeptime = 0.
arduinoline = str(arduinoline.split(']')[0])+"]"
arduinovalues = eval(arduinoline)
try:
for i in range(len(self.arduino_mapping)):
data[i] = float(arduinovalues[i])
self.processeddata = data
except Exception:
print "Type error: " + str(arduinovalues) + "... continuing"
self.measurement = zip(self.arduino_mapping, data)
# Calculate differences
delta = self.difference(self.displacement, self.measurement)
time.sleep(sleeptime)
print delta
return delta
except IndexError:
print "IndexError"
#pass
except Exception:
print "Exception in simulation data"
#pass
def updatemodel(self):
"""
General function to manage model updatin procedure.
"""
self.processeddata = [0.]*len(self.arduino_mapping)
if not _SIMULATION:
base = self.calibrate()
filemode = 'a'
else:
base = ['SIMULATION']
try:
os.remove(self.name + ' - UpdateResults - SIMULATED.txt')
except Exception:
pass
filemode = 'r'
with open(self.name + ' - Input Data.txt', filemode) as inputfile:
# Saving input data
if not _SIMULATION:
inputfile.write('Input data of \'' + self.name + '\':\n\n')
inputfile.write('Start Time: ' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) + "\n")
inputfile.write('Base: ' + str(base) + '\n')
inputline = "[],0.0"
for i in range(1000):
if not _SIMULATION:
delta = self.readarduino(base, inputfile)
self.optimize(delta)
else:
try:
delta = None
previnputline = inputline
inputline = inputfile.readline()
if not inputline == '':
delta = self.simulatearduino(inputline, previnputline)
except Exception:
pass
if not delta is None:
self.optimize(delta)
print "Update statistics:"
print "Totally updated models: " + str(TRUSS.numofupdates[0] + TRUSS.numofupdates[1]++ TRUSS.numofupdates[2])
print " Successfully updated models: " + str(TRUSS.numofupdates[0])
print " Updates with running out of possibilities: " + str(TRUSS.numofupdates[2])
print " Updates did not finshed: " + str(TRUSS.numofupdates[1])
def calibrate(self):
"""
Calibration for Arduino measurement. All the measurements will describe the dispalecements from the claibration-state.
"""
answer_1 = '0'
restart = '0'
accept = '0'
arduinovalues = []
print "Before starting the model updating, the measuring tools must be calibrated."
print "The calibration should be done in load-free state."
while (answer_1 not in ['Y', 'N']):
answer_1 = raw_input('Can we start the calibration? (y/n) ').upper()
if answer_1 == 'N':
SER.close()
raise Exception('Calibration is terminated')
else:
try:
SER.flushInput()
#time.sleep(0.2)
arduinoline = '' #SER.readline()
while len(arduinoline) == 0:
time.sleep(0.2)
arduinoline = SER.readline()
if len(arduinoline) > 0:
arduinovalues = arduinoline.split(',')
del arduinovalues[len(arduinovalues)-1] # if needed!!!
if len(arduinovalues) == len(self.arduino_mapping):
measurement = zip(self.arduino_mapping, arduinovalues)
print "Calibration result:"
print measurement
while (accept not in ['Y', 'N']):
accept = raw_input('Ok? Can we start the main part? Put on the loads! (y/n) ').upper()
if accept == 'N':
restart = 'Y'
else:
print "Data error. Calibartion is restarting."
print "Arduino values:" + str(arduinovalues)
restart = 'Y'
else:
print 'The calibration cannot be done: no data'
while (restart not in ['Y', 'N']):
restart = raw_input('Do you want to restart calibration? (y/n) ').upper()
except Exception:
print 'The calibration cannot be done: exception was raised'
while (restart not in ['Y', 'N']):
restart = raw_input('Do you want to restart calibration? (y/n) ').upper()
if restart == 'Y':
print "Restarting calibration"
SER.flushInput()
return self.calibrate()
elif restart == 'N':
SER.close()
raise Exception('Calibration is terminated')
if accept == 'Y':
return measurement
def postprocess(self):
"""
Calculates reaction forces and stresses
"""
self._reactions()
self._stresses()
self._postprocessed = 1
def _reactions(self):
"""
Calculates reaction forces
"""
for i in self.known_dis_a:
self.force[i] = 0
for j, displ in enumerate(self.displacement):
self.force[i] += self.stiffness[i][j]*displ
def _stresses(self):
"""
Calculates stress in elements
Last part: Coloring elements for graphical output
"""
self.stress = [0.]*self.elenum
for element in range(self.elenum):
locstiff = [-self._cx[element], -self._cy[element], -self._cz[element], \
self._cx[element], self._cy[element], self._cz[element]]
for i in range(3*2):
self.stress[element] += locstiff[i]*self.displacement[self.eledof[element][i]]
self.stress[element] = self.stress[element]*self._norm_stiff[element]
smax = max([abs(min(self.stress)), max(self.stress), 0.000000001])
self.stresscolor = [float(x)/float(smax) for x in self.stress]
def postprocessed(self):
"""
Tells if the structure's postprocess part is already calcuated
"""
return self._postprocessed
def writeresults(self, fname):
"""
Writing results to file.
"""
out_element = ''
for i in self.node:
out_element += str(i[0] + self._io_origin) + ', ' + str(i[1] + self._io_origin) + ' | '
out_coords = ''
for i in self.nodalcoord:
out_coords += str(i[0]) + ', ' + str(i[1]) + ', ' + str(i[2]) + ' | '
out_crsect = ''
for i in self.area:
out_crsect += str(i) + ', '
out_materials = ''
for i in self.el_mod:
out_materials += str(i) + ', '
out_forces = ''
for forcedof in self.known_f_notzero:
if self.dof == 3:
out_forces += str(forcedof + self._io_origin) + ', ' + str(self.force[forcedof]) + ' | '
elif self.dof == 2 and i % 3 != 2:
out_forces += str(forcedof - forcedof//3 + self._io_origin) + ', ' + str(self.force[forcedof]) + ' | '
out_supports = ''
for i in self.constraint:
if self.dof == 3:
out_supports += str(i[0] + self._io_origin) + ', ' + str(i[1]) + ' | '
elif i[0] % 3 != 2:
out_supports += str(i[0] - i[0]//3 + self._io_origin) + ', ' + str(i[1]) + ' | '
# Not elegant solution
out_specdofs = self.specdof_inputstring
with open(fname, 'w') as outfile:
# Writing data
outfile.write('Calculation of \'' + self.name + '\':\n\n')
outfile.write('Reactions\n')
#for i in range(len(self.force)//3):
prev = -1
for i in self.known_dis_a:
if self.dof == 3 or i%3 != 2:
if i//3 != prev:
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
nodalforce = ''
if (i//3)*3+0 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+0]) + ', '
else:
nodalforce += ' '
if (i//3)*3+1 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+1]) + ', '
else:
nodalforce += ' '
if self.dof != 2 and (i//3)*3+2 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+2]) + '\n'
else:
nodalforce += ' \n'
if nodalforce != ' \n':
outfile.write(str(i//3 + self._io_origin) + ', ' + nodalforce)
prev = i//3
outfile.write('\n')
outfile.write('Displacements\n')
for i in range(len(self.displacement)//3):
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
outfile.write(str(i + self._io_origin) + ', ' + "{:10.3f}".format(self.displacement[i*3 +0]) + ', ' \
+ "{:10.3f}".format(self.displacement[i*3 +1]) + ', ' + "{:10.3f}".format(self.displacement[i*3 +2]) + ', ' + '\n')
outfile.write('\n')
outfile.write('Stresses\n')
for i, stress in enumerate(self.stress):
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
outfile.write(str(i + self._io_origin) + ', ' + "{:10.3f}".format(stress) + '\n')
outfile.write('\n')
# Saving original input
outfile.write('----- Original input: -----\n\n')
outfile.write('_ORIGIN\n')
outfile.write(str(self._io_origin) + '\n\n')
outfile.write('DOF\n')
outfile.write(str(self.dof) + '\n\n')
outfile.write('ELEMENTS\n')
outfile.write(out_element + '\n\n')
outfile.write('COORDINATES\n')
outfile.write(out_coords + '\n\n')
outfile.write('CROSS-SECTIONS\n')
outfile.write(out_crsect + '\n\n')
outfile.write('MATERIALS\n')
outfile.write(out_materials + '\n\n')
outfile.write('FORCES\n')
outfile.write(out_forces + '\n\n')
outfile.write('SUPPORTS\n')
outfile.write(out_supports + '\n\n')
outfile.write('SPECDOF\n')
outfile.write(out_specdofs + '\n\n')
outfile.write('EOF\n')
##################################
# BEGINNING OF THE MAIN PART #
##################################
PARTTIME = logtime(TIC, "Initialization")
# Define new truss
TRUSS = Truss('bridge')
if not _DEBUG:
TRUSS.name = raw_input('Test name: ')
else:
print "*** Debug mode ***"
print "*** The following file will be opened: " + TRUSS.name + ".str"
# Read input file
#TRUSS.read('lab_01.txt')
try:
TRUSS.read(TRUSS.name + ".str")
except IOError:
print "The following file could not be opened: " + TRUSS.name + ".str"
print "Please make sure that the structural data is available for the program in the running directory."
raise IOError
#if _ARDUINO or _SIMULATION: # deprecated
# TRUSS.setspecdofs(arduino_mapping)
PARTTIME = logtime(PARTTIME, "Setting up structure")
# Calculate stiffness-matrix
TRUSS.calcstiffness()
#TRUSS.calcstiffness_plate()
PARTTIME = logtime(PARTTIME, "Calculating Stiffness Matrix")
#Solve structure
TRUSS.solve()
#TRUSS.solve_plate()
PARTTIME = logtime(PARTTIME, "Solving")
if _UPDATING:
TRUSS.setunitmodification(0.05)
TRUSS.seterrorlimit(1.2)
TRUSS.setmodificationlimit(0.7)
TRUSS.setiterationlimit(100)
TRUSS.updatemodel()
PARTTIME = logtime(PARTTIME, "Updating numerical model")
if _GRAPHICS:
# Plot settings:
# O: Original D: Deformed S: Supports F: Forces R: Reactions
# ScD: Scale displacments (Z-axis) (def:1.0) ScF: Scale forces (def:1.0)
# ScS: Scale Support signs (Z-axis) (def:1.0)
# Save: Save plot to file
# plot(O, D, S, F, R, ScD, ScF, ScS, Save)
TRUSS.plot(1, 0, 1, 1, 0, 1.0, 0.0, 0.0, True)
TRUSS.plot(1, 1, 1, 0, 0, 1.0, 0.0, 0.0, True)
TRUSS.plot(0, 1, 1, 1, 1, 2.0, 0.0, 0.0, True)
#pass
PARTTIME = logtime(PARTTIME, "Plotting")
# Write results to file
TRUSS.writeresults(TRUSS.name + ' - Results.txt')
PARTTIME = logtime(PARTTIME, "Writing results to the output file")
if _ARDUINO:
# Closing Arduino port
SER.close()
if _LOG:
TAC = time.time()
TOTALTIME = TAC-TIC
if _UPDATING:
print "Update statistics:"
print "Totally updated models: " + str(TRUSS.numofupdates[0] + TRUSS.numofupdates[1]++ TRUSS.numofupdates[2])
print " Successfully updated models: " + str(TRUSS.numofupdates[0])
print " Updates with running out of possibilities: " + str(TRUSS.numofupdates[2])
print " Updates did not finshed: " + str(TRUSS.numofupdates[1])
print 'Total time: ' + str("{:10.3f}".format(TOTALTIME))
_UPDATING = 1 # Model Updating: On/ Off
_ARDUINO = 1 # Arduino input: On/Off
_DEBUG = 1 # Debugging mode
_REALISTICSIMULATION = 0 # Wait as long as it was originally. Only valid with _SIMULATION = 1
##############################################
### DO NOT MODIFY ### #
#
elif _COMPATIBLE_MODE == 01: #
### "Processing 3" mode ### #
_MODE_NAME = "Processing 3" #
_LOG = 1*1 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 0*0 #
_UPDATING = 1*1 #
_ARDUINO = 1*1 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
elif _COMPATIBLE_MODE == 02: #
### Android mode ### #
# DO NOT MODIFY #
_MODE_NAME = "Android" #
_LOG = 1*1 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 1*1 #
_UPDATING = 0*0 #
_ARDUINO = 0*0 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
elif _COMPATIBLE_MODE == 03: #
### Informative ### #
# DO NOT MODIFY #
_MODE_NAME = "Informative mode" #
_LOG = 1*1 #
_GRAPHICS = 1*1 #
_SOLVER = 1*1 #
_OSLIBRARYAVAILABLE = 1*1 #
_UPDATING = 1*1 #
_ARDUINO = 1*1 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
else: #
### Maximum compatibility ### #
# DO NOT MODIFY #
_MODE_NAME = "Maximum compatibility" #
_LOG = 0*0 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 0*0 #
_UPDATING = 0*0 #
_ARDUINO = 0*0 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
### DO NOT MODIFY ### #
##############################################
if _OSLIBRARYAVAILABLE:
import os
if _SIMULATION or not _UPDATING:
_ARDUINO = 0
if _COMPATIBLE_MODE == 2:
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if _LOG:
import time
import datetime
TIC = time.time()
print '------------------------------------'
print 'Truss calculational program'
print 'Created by Máté Szedlák (23/11/2016)'
print 'Compatibility mode: ' + _MODE_NAME
if _SOLVER == 0:
print '- Solver is set to default'
elif _SOLVER == 1:
print '- Solver is set to NumPy'
else:
raise Exception("Solver settings are invalid!")
if _UPDATING:
print '+ Model updating is turned ON'
if _SIMULATION:
print 'Input data is SIMULATED!'
else:
print '- Model updating is turned OFF'
print '------------------------------------'
if _ARDUINO:
SER = 0
try:
import serial
except ImportError:
print "You tried to import \'serial\' in Windows mode without installing \'pySerial\'."
print "Please first install pySerial: http://playground.arduino.cc/Interfacing/Python"
raise Exception('Android mode denied: pyserial not found')
PORTNUMBER -= 1
while SER == 0:
PORTNUMBER += 1
if PORTNUMBER >= len(PORTS):
PORTNUMBER = 0
time.sleep(0.6)
print 'Opening serial at port ' + str(PORTS[PORTNUMBER])
try:
SER.close()
except Exception:
pass
try:
SER = serial.Serial(PORTS[PORTNUMBER], 9600, timeout=0)
except serial.SerialException:
Exception(PORTS[PORTNUMBER] + ' port is busy. It might be occupied by this program or another one :/ Be careful or try resetting this program')
SER = 0
try:
SER.close()
except Exception:
pass
except Exception:
SER = 0
try:
SER.close()
except Exception:
pass
#if _ARDUINO or _SIMULATION:
# try:
# mappingfile = 'arduino_mapping.txt'
# with open(mappingfile, "r") as textfile:
# line = textfile.readline().strip()
# arduino_mapping = line.upper().split(',')
#
# except IOError:
# raise Exception('File not found: ' + mappingfile)
if _SOLVER:
# NumPy library for solving linear equations in another way
import numpy as np
if _GRAPHICS:
# libraries for drawing
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# From here:
def mat_vec_mult(mat_a, vec_b):
"""
Multiplying matrix with a vector, giving the result as a vector
Source:
https://stackoverflow.com/questions/10508021/matrix-multiplication-in-python
"""
vec_c = [0.]*len(mat_a)
for i, row in enumerate(mat_a):
for j, elem in enumerate(vec_b):
vec_c[i] += row[j]*elem
return vec_c
def invert(mat_x):
"""
Invert a matrix X according to gauss-jordan elimination
In gauss-jordan elimination, we perform basic row operations to turn a matrix into
row-echelon form. If we concatenate an identity matrix to our input
matrix during this process, we will turn the identity matrix into our inverse.
X - input list of lists where each list is a matrix row
output - inverse of X
Source:
http://www.vikparuchuri.com/blog/inverting-your-very-own-matrix/
"""
#copy X to avoid altering input
mat_x = deepcopy(mat_x)
#Get dimensions of X
rows = len(mat_x)
cols = len(mat_x[0])
#Get the identity matrix and append it to the right of mat_x
#This is done because our row operations will make the identity into the inverse
identity = make_identity(rows, cols)
for i in xrange(0, rows):
mat_x[i] += identity[i]
i = 0
for j in xrange(0, cols):
#print("On col {0} and row {1}".format(j, i))
#Check to see if there are any nonzero values below the current row in the current column
zero_sum, first_non_zero = check_for_all_zeros(mat_x, i, j)
#If everything is zero, increment the columns
if zero_sum == 0:
if j == cols:
return mat_x
raise Exception("Matrix is singular")
#If mat_x[i][j] is 0, and there is a nonzero value below it, swap the two rows
if first_non_zero != i:
mat_x = swap_row(mat_x, i, first_non_zero)
#Divide mat_x[i] by mat_x[i][j] to make mat_x[i][j] equal 1
mat_x[i] = [m/mat_x[i][j] for m in mat_x[i]]
#Rescale all other rows to make their values 0 below mat_x[i][j]
for k in xrange(0, rows):
if k != i:
scaled_row = [mat_x[k][j] * m for m in mat_x[i]]
mat_x[k] = [mat_x[k][m] - scaled_row[m] for m in xrange(0, len(scaled_row))]
#If either of these is true, we have iterated through the matrix, and are done
if i == rows or j == cols:
break
i += 1
#Get just the right hand matrix, which is now our inverse
for i in xrange(0, rows):
mat_x[i] = mat_x[i][cols:len(mat_x[i])]
return mat_x
def check_for_all_zeros(mat_x, i, j):
"""
Check matrix mat_x to see if only zeros exist at or below row i in column j
mat_x - a list of lists
i - row index
j - column index
returns -
zero_sum - the count of non zero entries
first_non_zero - index of the first non value
"""
non_zeros = []
first_non_zero = -1
for k in xrange(i, len(mat_x)):
non_zero = mat_x[k][j] != 0
non_zeros.append(non_zero)
if first_non_zero == -1 and non_zero:
first_non_zero = k
zero_sum = sum(non_zeros)
return zero_sum, first_non_zero
def swap_row(mat_x, i, j):
"""
Swap row i and row j in a list of lists
mat_x - list of lists
i - row index
j - row index
returns- modified matrix
"""
mat_x[j], mat_x[i] = mat_x[i], mat_x[j]
return mat_x
def swap_col(mat_x, i, j):
"""
Swap colum i and column j in a list of lists
mat_x - list of lists
i - column index
j - column index
returns- modified matrix
"""
for item in mat_x:
item[i], item[j] = item[j], item[i]
return mat_x
def make_identity(row_num, col_num):
"""
Make an identity matrix with dimensions rxc
row_num - number of rows
col_num - number of columns
returns - list of lists corresponding to the identity matrix
"""
identity = []
for i in xrange(0, row_num):
row = []
for j in xrange(0, col_num):
elem = 0
if i == j:
elem = 1
row.append(elem)
identity.append(row)
return identity
if _GRAPHICS:
class Arrow3D(FancyArrowPatch):
"""
Vector drawer module from the internet
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
_xs3d, _ys3d, _zs3d = self._verts3d
_xs, _ys, _zs = proj3d.proj_transform(_xs3d, _ys3d, _zs3d, renderer.M)
self.set_positions((_xs[0], _ys[0]), (_xs[1], _ys[1]))
FancyArrowPatch.draw(self, renderer)
def plotstructure(struct, showorig, showresult, showsupports, \
showforces, showreactions, scaledisp, scale_f, z_corr, showvalues, saveplot):
"""
General plotting method for structures
scaledisp: Scale drwaing of displacements
scale_f: Scale force sign
z_corr: Scale z-axis
"""
plotname = struct.name
plot_width = 18.0 # Plot width in inches
xframe = 0 # Frame width at X direction
yframe = 0 # Frame width at Y direction
zframe = 0 # Frame width at Z direction
scale_sup = scale_f*0.3 # Scale support sign # All the others are input parameters
# Stress coloring settings [R G B] - Examples considering pressure:
# 0: [1, 0, 0] Plain red
# 1: [x, 0, 0] Red to Black
# 2: [1, 1-x, 0-x] Red to White
# 3: [1, (1-x)/2, (1-x)/2] Red to MildRed - Distincts pressure and tension
# 4: [x, 1-x, 0] Red to Green
_coloring = 3 # € [0, 1, 2, 3, 4]
fig = plt.figure()
_ax = fig.add_subplot(111, projection='3d')
if struct.dof == 2:
_ax.view_init(elev=90., azim=-90.)
_ax.w_zaxis.line.set_lw(0.)
_ax.set_zticklabels([])
xmin = min(list(struct.nodalcoord[x][0] for x in range(struct.nodenum)))
xmax = max(list(struct.nodalcoord[x][0] for x in range(struct.nodenum)))
ymin = min(list(struct.nodalcoord[x][1] for x in range(struct.nodenum)))
ymax = max(list(struct.nodalcoord[x][1] for x in range(struct.nodenum)))
zmin = min(list(struct.nodalcoord[x][2] for x in range(struct.nodenum)))
zmax = max(list(struct.nodalcoord[x][2] for x in range(struct.nodenum)))
deltax = xmax - xmin
deltay = ymax - ymin
xframe = max(deltax * 0.05, 2)
yframe = max(deltay * 1.5, 2)
if struct.dof == 3:
plot_height = plot_width * ((deltay + yframe*2)/(deltax + xframe*2)) *0.3
else:
plot_height = plot_width * 0.5
fig.set_size_inches(plot_width, plot_height)
_ax.set_xlim3d(xmin - xframe, xmax + xframe)
_ax.set_ylim3d(ymin - yframe, ymax + yframe)
_ax.set_zlim3d(zmin - zframe, zmax + zframe)
if showorig == showresult:
_coloring = 0
# Giving plot names
if showorig == 1 and showresult == 0 and showsupports == 1 and showreactions == 0:
plotname += ' - Initial structure'
if showforces:
plotname += ' with forces'
elif showorig == 1 and showresult == 1:
plotname += ' - Deformation'
if showreactions == 0:
plotname += ' with reactions'
elif showorig == 0 and showresult == 1:
plotname += ' - Stresses'
if showreactions == 0:
plotname += ' with reactions'
else:
plotname += ' - Unnamed'
print plotname + ": "
if showresult:
dipslaydisplacement = deepcopy(struct.nodalcoord_def)
if scaledisp != 1.0:
if _LOG:
print('Displacements are scaled with factor: ') + str(scaledisp)
for i in range(struct.nodenum):
for j in range(3):
dipslaydisplacement[i][j] = (struct.nodalcoord_def[i][j] -\
struct.nodalcoord[i][j]) * scaledisp + struct.nodalcoord[i][j]
for i in range(struct.elenum):
# Plot undeformed structure
if showorig:
_ax.plot([struct.nodalcoord[struct.node[i][1]][0], struct.nodalcoord[struct.node[i][0]][0]], \
[struct.nodalcoord[struct.node[i][1]][1], struct.nodalcoord[struct.node[i][0]][1]], \
zs=[struct.nodalcoord[struct.node[i][1]][2], struct.nodalcoord[struct.node[i][0]][2]], color='b')
# Plot deformed structure
if showresult:
if struct.postprocessed():
if struct.stresscolor[i] > 0:
if _coloring == 1:
rgb_col = [0, 0, abs(struct.stresscolor[i])]
elif _coloring == 2:
rgb_col = [1-abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i]), 1]
elif _coloring == 3:
rgb_col = [(1-abs(struct.stresscolor[i]))/2, \
(1-abs(struct.stresscolor[i]))/2, 1]
elif _coloring == 4:
rgb_col = [0, 1-abs(struct.stresscolor[i]), \
abs(struct.stresscolor[i])]
else:
rgb_col = [1, 0, 0]
else:
if _coloring == 1:
rgb_col = [abs(struct.stresscolor[i]), 0, 0]
elif _coloring == 2:
rgb_col = [1, 1-abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i])]
elif _coloring == 3:
rgb_col = [1, (1-abs(struct.stresscolor[i]))/2, \
(1-abs(struct.stresscolor[i]))/2]
elif _coloring == 4:
rgb_col = [abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i]), 0]
else:
rgb_col = [1, 0, 0]
else:
print 'Stresses are not calculated'
rgb_col = [1, 0, 0]
_ax.plot([dipslaydisplacement[struct.node[i][1]][0], dipslaydisplacement[struct.node[i][0]][0]], \
[dipslaydisplacement[struct.node[i][1]][1], dipslaydisplacement[struct.node[i][0]][1]], \
zs=[dipslaydisplacement[struct.node[i][1]][2], dipslaydisplacement[struct.node[i][0]][2]], color=rgb_col)
if showforces:
for i in struct.known_f_notzero:
if struct.force[i] < 0:
value = -1.0
else:
value = 1.0
if i % 3 == 0:
f_dir = [value*scale_f, 0., 0.]
elif i % 3 == 1:
f_dir = [0., value*scale_f, 0.]
else:
f_dir = [0., 0., value*scale_f*z_corr]
f_arrow = Arrow3D([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0] + f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1] + f_dir[1]], \
[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2] + f_dir[2]], \
mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
_ax.add_artist(f_arrow)
if showreactions:
e_previous = -100
for i in struct.known_dis_a:
value = 0.0 # Maybe this is useless <XXX>
if struct.force[i] < 0:
value = -1.0
elif struct.force[i] > 0:
value = 1.0
if i % 3 == 0:
f_dir = [value*scale_f, 0., 0.]
elif i % 3 == 1:
f_dir = [0., value*scale_f, 0.]
else:
f_dir = [0., 0., value*scale_f*z_corr]
if abs(struct.force[i]) > 0:
f_arrow = Arrow3D([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0] + f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1] + f_dir[1]], \
[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2] + f_dir[2]], \
mutation_scale=20, lw=1, arrowstyle="-|>", color="darkolivegreen")
_ax.add_artist(f_arrow)
if showvalues:
_ax.set_xticklabels([])
_ax.set_yticklabels([])
_ax.set_zticklabels([])
if not i//3 == e_previous//3:
if struct.dof == 3:
_ax.text(struct.nodalcoord[i//3][0], \
struct.nodalcoord[i//3][1], \
struct.nodalcoord[i//3][2], \
"{:10.2f}".format(struct.force[(i//3)*3+0])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+1])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+2]),\
fontsize=12, horizontalalignment='right')
elif struct.dof == 2:
_ax.text(struct.nodalcoord[i//3][0], \
struct.nodalcoord[i//3][1], \
struct.nodalcoord[i//3][2], \
"{:10.2f}".format(struct.force[(i//3)*3+0])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+1]),\
fontsize=12, horizontalalignment='right')
e_previous = i
if showsupports:
for i in struct.known_dis_a:
if i % 3 == 0:
f_dir = [-1.0 * scale_sup, 0., 0.]
col = 'g'
elif i % 3 == 1:
f_dir = [0., -1.0 * scale_sup, 0.]
col = 'y'
else:
f_dir = [0., 0., -1.0 * scale_sup * z_corr]
col = 'brown'
if i % 3 != 2 or struct.dof == 3:
_ax.plot([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0]+f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1]+f_dir[1]], \
zs=[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2]+f_dir[2]], \
color=col, linewidth=4.0)
plt.show()
if saveplot:
fig.savefig(plotname + '.png')
print '\'' + plotname +'.png\' is saved.'
print '------------------------------------'
return
def endoffile(givenfile, line):
"""
Check if end of file is reached. Implemented due to compatibility reasons.
"""
if _OSLIBRARYAVAILABLE:
return givenfile.tell() < os.fstat(givenfile.fileno()).st_size
else:
return not line == "EOF"
def logtime(prev_time, title):
"""
Calculating and printing the time consumption of tasks
Should be called with the previously saved part-time and the name of the actual task
At the first call, should be called with TIC value. The input argument
should be overwritten by this funvtion's return value.
"""
if _LOG:
new_time = time.time()
print title
print 'Time: ' + str("{:10.3f}".format(new_time - prev_time))
print '------------------------------------'
return new_time
else:
return 0
def error(delta):
"""
Error function using least-square method
"""
sumerr = 0
for deltaelement in delta:
sumerr += deltaelement**2
sumerr = math.sqrt(sumerr)
return sumerr
class Truss(object):
"""
General structure class
"""
def __init__(self, name):
self.name = name # Name of structure
self.known_f_a = [] # Nodes without supports
self.known_f_notzero = [] # Nodes with loads
self.dof = 3 # Truss's degree of freedom
self.node = [] # Element's end nodes
self.constraint = [] # Supports
self.force = [] # Force
self.nodalcoord = [] # Coordinates of nodes
self.nodalcoord_def = [] # Coordinates after deformations
self.area = [] # Cross-sectional areas
self.el_mod = [] # Material data
self.nodenum = 0 # Number of nodes
self.elenum = 0 # Number of elements
self.eledof = [] # Mapping between DOF and node
self.stiffness = [] # Global stiffness matrix
self.mod_stiffnesses = [] # Modified stiffnesses in a hyper-matrix
self.ele_length = [] # Length of the elements
self._norm_stiff = [] # E/L
self._cx = []
self._cy = []
self._cz = []
self._s_loc = []
self._loc_stiff = [] # Local stiffnes matrix
self.dis_new = []
self.force_new = []
self.stiff_new = []
self.displacement = [] # Relative displacements
self._stiffisfresh = 0
self._postprocessed = 0
self.init_disp = []
self.stresscolor = [] # Color mapping for stresses
self.known_dis_a = []
self.stress = [] # Element's stresses
self._io_origin = 0 # Array's first element number during IO. Default is 0.
self.analysis = {}
self._mod_stiffisfresh = 0
self.mod_displacements = []
self.keypoint = []
self.keypnum = 0
self.effect = []
self.toteffect = []
self.sortedeff = []
self.specdof_inputstring = ''
self.tresshold = 0.1
self.effectratio = []
self.processeddata = [] # To store last input line
self.modifications = [] # Storing modifications for model updating
self.readelements = [0]*9
self.arduino_mapping = []
self.errorlimit = 0.5
self.modificationlimit = 0.6
self.unitmodification = 0.05
self.measurement = [0.]
self.numofupdates = [0, 0, 0] # [#Successfully updated model, #Updates with overflow exit, #Updates where there were no more modification option]
self.iterationlimit = 20
def read(self, filename):
"""
Input file for TRUSS.py program
All commands must be written with uppercase characters
*** The values MUST be written in the exact following line of a command
Only lines with the command and nothing more counts.
Everything else will be neglected. Even hastags are useless :)
The order of the commands are indifferent.
Commands and their format (example):
DOF - Degree of freedom: 3
ELEMENTS - Elements given by end-nodes: 0, 1 | 0, 2 ...
COORDINATES - Nodal coordinates: 0., 0., 0., | 0., 3., 0. ...
CROSS-SECTIONS - This data will be evaluated in Python: 3.0*(10**(-4)), 5.0*(10**(-4)) ...
MATERIALS - This data will be evaluated in Python: 70.0*(10**9), 100.0*(10**9) ...
FORCES - Selected DOF + Force: 11, +1000000.0 | 12, +1000000.0 ...
SUPPORTS - Selected DOF + Prescribed displacement: 0, 0.0 | 1, 0.0 ...
SPECDOF - Selected node's DOF will be analysed during Model Updating: 1, xyz | 3 y | 10 xz ...
EOF - For compatibility reasons EOF should be placed after the commands
"""
self._io_origin = 0
readelementnames = ["Origin", "DOF", "Elements", "Coordinates", "Cross-sections", "Materials", "Forces", "Supports", "Measured DOFs"]
with open(filename, "r") as sourcefile:
sourceline = ""
while endoffile(sourcefile, sourceline):
sourceline = sourcefile.readline().strip()
if sourceline.upper() == "_ORIGIN":
sourceline = sourcefile.readline().strip()
self._io_origin = int(sourceline)
self.readelements[0] = 1
if sourceline.upper() == "DOF":
sourceline = sourcefile.readline().strip()
self.setdof(int(sourceline))
self.readelements[1] = 1
if sourceline.upper() == "ELEMENTS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, int(x[1]) - self._io_origin] for x in inpstr]
self.setelements(inpnum)
self.readelements[2] = 1
if sourceline.upper() == "COORDINATES":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
if self.dof == 3:
inpnum = [[float(x[0]), float(x[1]), float(x[2])] for x in inpstr]
elif self.dof == 2:
inpnum = [[float(x[0]), float(x[1]), 0.] for x in inpstr]
self.setcoordinates(inpnum)
self.readelements[3] = 1
if sourceline.upper() == "CROSS-SECTIONS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = sourceline.split(',')
if len(inpstr) == 1:
inpstr = sourceline.split(';')
if '' in inpstr:
inpstr.remove('')
inpnum = [float(eval(x)) for x in inpstr]
self.setcrosssections(inpnum)
self.readelements[4] = 1
if sourceline.upper() == "MATERIALS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = sourceline.split(',')
if len(inpstr) == 1:
inpstr = sourceline.split(';')
if '' in inpstr:
inpstr.remove('')
inpnum = [float(eval(x)) for x in inpstr]
self.setmaterials(inpnum)
self.readelements[5] = 1
if sourceline.upper() == "FORCES":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, float(x[1])] for x in inpstr]
self.setforces(sorted(inpnum))
self.readelements[6] = 1
if sourceline.upper() == "SUPPORTS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, float(x[1])] for x in inpstr]
self.setsupports(sorted(inpnum))
self.readelements[7] = 1
if sourceline.upper() == "MEASUREMENTS":
sourceline = sourcefile.readline().strip()
self.specdof_inputstring = sourceline
inpstr = []
self.arduino_mapping = sourceline.split(',')
self.setspecdofs(self.arduino_mapping)
self.readelements[8] = 1
terminate = False
for i, value in enumerate(self.readelements):
if i > 0 and (i < 8 or _UPDATING):
#if i > 0:
if value == 0:
print "The following was not found: " + readelementnames[i]
terminate = True
if terminate:
raise Exception
def plot(self, showorig, showresult, showsupports, showforces, \
showreactions, scaledisplacement, scaleforce, scalez, saveplot):
"""
Plot function of the Truss class
This method calls the more general plotstructure() method.
Plot settings:
O: Original D: Deformed S: Supports F: Forces R: Reactions
ScD: Scale displacments (Z-axis) (def:1.0) ScF: Scale forces (def:1.0)
ScS: Scale Support signs (Z-axis) (def:1.0)
Save: Save plot to file
plot(O, D, S, F, R, ScD, ScF, ScS, Save)
"""
_showvalues = 1 # Show values of forces
if self._postprocessed == 0:
print 'Postprocess is needed before plotting structure!'
else:
if scaledisplacement == 0:
scaledisplacement = 1.0 # Scale drwaing of displacements
if scaleforce == 0:
scaleforce = 1.0 # Scale force sign
if scalez == 0:
scalez = 0.3 # Scale z-axis
plotstructure(self, showorig, showresult, showsupports, showforces, showreactions, \
scaledisplacement, scaleforce, scalez, _showvalues, saveplot)
def __checkcoordinates(self, ignorable):
"""
Checking coordinates for repeating elements.
ignorable: [True | False] If the warning is ignorable, only message apperas and the input becomes neglected.
If the warning is not ignorable, then exceptions will be raised.
return: [0 | 1] 1 f no error found, otherwise 0.
"""
if len(self.nodalcoord) != len(list(k for k, _ in itertools.groupby(sorted(self.nodalcoord)))):
if ignorable == 0:
raise Exception('Coordinate list has repeating items. Calculation is terminated')
else:
print "This node already exists. Input is ignored."
return 0
else:
return 1
def setdof(self, dof):
"""
Setting problem's degree of freedom
dof: [2 | 3] Model's Degree Of Freedom.
"""
self.dof = dof
if self.dof != 2 and self.dof != 3:
raise Exception('DOF must be 2 or 3.')
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setelements(self, node):
"""
Setting elements (nodal connections) in bulk mode
"""
self.node = node
self.nodenum = len(set(list(itertools.chain.from_iterable(sorted(self.node)))))
self.elenum = len(self.node)
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
# Creating mapping tool for elements
for node in self.node:
self.eledof.append([node[0]*3, node[0]*3+1, node[0]*3+2, \
node[1]*3, node[1]*3+1, node[1]*3+2])
# Initialazing matrix for all matrices
self.init_disp = [0.]*(3*self.nodenum)
self.force = [0.]*(3*self.nodenum)
self.stiffness = [0.]*(3*self.nodenum)
self.known_f_a = []
self.known_f_notzero = []
def setcoordinates(self, coordinates):
"""
Setting coordinates in bulk mode
"""
self.nodalcoord = coordinates
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
if self.nodenum > len(self.nodalcoord):
raise Exception('More coordinates are needed')
elif self.node == []:
raise Exception('Nodes must be set before defining elements')
self.__checkcoordinates(False)
def modcoordinate(self, node, coordinate):
"""
Modify coordinate
"""
if self.__checkcoordinates(True):
self.nodalcoord[node] = coordinate
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
def setcrosssections(self, area):
"""
Setting cross-sections in bulk mode
"""
self.area = area
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def modcrosssection(self, element, area):
"""
Modifying cross-sections by elements
"""
self.area[element] = area
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setmaterials(self, el_mod):
"""
Setting material data in bulk mode
"""
self.el_mod = el_mod
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def modmaterial(self, element, el_mod):
"""
Modifying material data by elements
"""
self.el_mod[element] = el_mod
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setforces(self, forces):
"""
Set forces
"""
for fdof, force in forces:
if self.dof == 3:
self.force[fdof] = force
elif self.dof == 2:
self.force[fdof + (fdof//2)] = force
self._postprocessed = 0
def modforce(self, element, force):
"""
Modifying forces by each
"""
self.force[element] = force
self._postprocessed = 0
def setsupports(self, constraints):
"""
Set supports
"""
for cdof, constraint in constraints:
if self.dof == 3:
self.constraint.append([cdof, constraint])
elif self.dof == 2:
self.constraint.append([cdof + (cdof // 2), constraint])
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setspecdofs(self, specdofs):
"""
Set special nodal DOFs
"""
self.analysis = {}
for dofname in specdofs:
node = int(dofname[:len(dofname)-1])-self._io_origin
if 'X' in dofname:
self.analysis[dofname] = node*3+0
self.keypoint.append(node*3+0)
if 'Y' in dofname:
self.analysis[dofname] = node*3+1
self.keypoint.append(node*3+1)
if 'Z' in dofname:
if self.dof == 3:
self.analysis[dofname] = node*3+2
self.keypoint.append(node*3+2)
else:
print "Z-direction is not allowed in 2D structures. Please check the \'MEASUREMENTS\' section in the input file."
raise Exception
self.keypnum = len(self.analysis)
if self.keypnum == 0 and _UPDATING:
print "There is no valid measured DOF. Please check the \'MEASUREMENTS\' section in the input file."
raise Exception
def calcstiffness(self):
"""
Stiffness matrix compilation
"""
self._postprocessed = 0
if self.dof == 2:
for zdof in range(self.nodenum):
self.constraint.append([int(zdof*3+2), 0.])
self.constraint = list(k for k, _ in itertools.groupby(sorted(self.constraint)))
#Setting known forces
for dofloc in range(3*self.nodenum):
self.known_f_a.append(dofloc)
if self.force[dofloc] != 0:
self.known_f_notzero.append(dofloc)
self.known_dis_a = []
for constr in self.constraint:
self.init_disp[constr[0]] = constr[1]
self.known_dis_a.append(constr[0])
try:
self.known_f_a.remove(constr[0])
self.known_f_notzero.remove(constr[0])
except ValueError:
pass
ele_length = [0.]*self.elenum
self._norm_stiff = [0.]*self.elenum
self._cx = [0.]*self.elenum
self._cy = [0.]*self.elenum
self._cz = [0.]*self.elenum
self._s_loc = [0.]*self.elenum
self._loc_stiff = [0.]*self.elenum
self.stress = [0.]*self.elenum
self.stiffness = [[0.]*(len(self.nodalcoord)*3)]*(len(self.nodalcoord)*3)
for i in range(self.elenum):
ele_length[i] = math.sqrt((self.nodalcoord[self.node[i][1]][0]-self.nodalcoord[self.node[i][0]][0])**2+ \
(self.nodalcoord[self.node[i][1]][1]-self.nodalcoord[self.node[i][0]][1])**2 + \
(self.nodalcoord[self.node[i][1]][2]-self.nodalcoord[self.node[i][0]][2])**2)
self._cx[i] = (self.nodalcoord[self.node[i][1]][0]-self.nodalcoord[self.node[i][0]][0])/ele_length[i]
self._cy[i] = (self.nodalcoord[self.node[i][1]][1]-self.nodalcoord[self.node[i][0]][1])/ele_length[i]
self._cz[i] = (self.nodalcoord[self.node[i][1]][2]-self.nodalcoord[self.node[i][0]][2])/ele_length[i]
self._norm_stiff[i] = self.el_mod[i]/ele_length[i]
self._s_loc[i] = [[self._cx[i]**2, self._cx[i]*self._cy[i], self._cx[i]*self._cz[i], -self._cx[i]**2, -self._cx[i]*self._cy[i], -self._cx[i]*self._cz[i]], \
[self._cx[i]*self._cy[i], self._cy[i]**2, self._cy[i]*self._cz[i], -self._cx[i]*self._cy[i], -self._cy[i]**2, -self._cy[i]*self._cz[i]], \
[self._cx[i]*self._cz[i], self._cy[i]*self._cz[i], self._cz[i]**2, -self._cx[i]*self._cz[i], -self._cy[i]*self._cz[i], -self._cz[i]**2], \
[-self._cx[i]**2, -self._cx[i]*self._cy[i], -self._cx[i]*self._cz[i], self._cx[i]**2, self._cx[i]*self._cy[i], self._cx[i]*self._cz[i]], \
[-self._cx[i]*self._cy[i], -self._cy[i]**2, -self._cy[i]*self._cz[i], self._cx[i]*self._cy[i], self._cy[i]**2, self._cy[i]*self._cz[i]], \
[-self._cx[i]*self._cz[i], -self._cy[i]*self._cz[i], -self._cz[i]**2, self._cx[i]*self._cz[i], self._cy[i]*self._cz[i], self._cz[i]**2]]
self._loc_stiff[i] = [[y* self.area[i]* self._norm_stiff[i] for y in x] for x in self._s_loc[i]]
ele_dof_vec = self.eledof[i]
stiffincrement = [0.]*(len(self.nodalcoord)*3)
for j in range(3*2):
for k in range(3*2):
stiffincrement[ele_dof_vec[k]] = self._loc_stiff[i][j][k]
self.stiffness[ele_dof_vec[j]] = [x + y for x, y in zip(self.stiffness[ele_dof_vec[j]], stiffincrement)]
self._stiffisfresh = 1
def calcmodstiffness(self, index, magnitude):
"""
Convergency step in stiffness matrix modification
"""
if self.mod_stiffnesses == []:
self.mod_stiffnesses = [0.]*(self.elenum+1)
#for loopindex in range(self.elenum):
_mod_stiffnesses_temp = [[0.]*(len(self.nodalcoord)*3)]*(len(self.nodalcoord)*3)
for i in range(self.elenum):
if i == index:
_mod_norm_stiff = self._norm_stiff[i] * (1.0 + self.modifications[i] + magnitude) #self.el_mod[i]/ele_length[i]
else:
_mod_norm_stiff = self._norm_stiff[i] * (1.0 + self.modifications[i]) #self.el_mod[i]/ele_length[i]
_mod_loc_stiff = [[y*self.area[i]*_mod_norm_stiff for y in x] for x in self._s_loc[i]]
ele_dof_vec = self.eledof[i]
stiffincrement = [0.]*(len(self.nodalcoord)*3)
for j in range(3*2):
for k in range(3*2):
stiffincrement[ele_dof_vec[k]] = _mod_loc_stiff[j][k]
_mod_stiffnesses_temp[ele_dof_vec[j]] = [x + y for x, y in zip(_mod_stiffnesses_temp[ele_dof_vec[j]], stiffincrement)]
self.mod_stiffnesses[index] = _mod_stiffnesses_temp
def solve(self):
"""
Main solver of the code
"""
if self._stiffisfresh == 0:
if _LOG:
print 'Stiffness matrix is recalculated'
self.calcstiffness()
self.dis_new = [0.]*(self.nodenum*3-len(self.constraint))
self.force_new = [0.]*(self.nodenum*3-len(self.constraint))
self.stiff_new = [[0.]*(self.nodenum*3-len(self.constraint))]*(self.nodenum*3-len(self.constraint))
# known force array
for i, known_f_a in enumerate(self.known_f_a):
self.force_new[i] = self.force[known_f_a]
stiffincrement = [0.]*(self.nodenum*3-len(self.constraint))
for i, kfai in enumerate(self.known_f_a):
for j, kfaj in enumerate(self.known_f_a):
stiffincrement[j] = self.stiffness[kfai][kfaj]
self.stiff_new[i] = [x + y for x, y in zip(self.stiff_new[i], stiffincrement)]
# SOLVING THE STRUCTURE
if _SOLVER == 0:
if _LOG:
print 'Built-in solver'
self.dis_new = mat_vec_mult(invert(self.stiff_new), self.force_new)
else:
if _LOG:
print 'NumPy solver'
self.dis_new = np.linalg.solve(np.array(self.stiff_new), np.array(self.force_new))
self.displacement = deepcopy(self.init_disp)
for i, known_f_a in enumerate(self.known_f_a):
self.displacement[known_f_a] = self.dis_new[i]
# Deformed shape
self.nodalcoord_def = []
for i in range(self.nodenum):
self.nodalcoord_def.append([self.nodalcoord[i][0]+ self.displacement[i*3+0], \
self.nodalcoord[i][1]+ self.displacement[i*3+1], self.nodalcoord[i][2]+ self.displacement[i*3+2]])
# Postrpocesses
self.postprocess()
self.mod_displacements = [0.]*(self.elenum+1)
def solvemodstruct(self, index):
"""
Solver for the modified structures. 'Index' shows the actual modification number.
"""
self.mod_displacements[index] = [0.]*(self.nodenum*3)
dis_new = [0.]*(self.nodenum*3-len(self.constraint))
stiff_new = [[0.]*(self.nodenum*3-len(self.constraint))]*(self.nodenum*3-len(self.constraint))
stiffincrement = [0.]*(self.nodenum*3-len(self.constraint))
for i, kfai in enumerate(self.known_f_a):
for j, kfaj in enumerate(self.known_f_a):
stiffincrement[j] = self.mod_stiffnesses[index][kfai][kfaj]
stiff_new[i] = [x + y for x, y in zip(stiff_new[i], stiffincrement)]
# SOLVING THE MODIFIED STRUCTURE
if _SOLVER == 0:
dis_new = mat_vec_mult(invert(stiff_new), self.force_new)
else:
dis_new = np.linalg.solve(np.array(stiff_new), np.array(self.force_new))
mod_displacement_temp = deepcopy(self.init_disp)
for i, kfa in enumerate(self.known_f_a):
mod_displacement_temp[kfa] = dis_new[i] - self.dis_new[i]
self.mod_displacements[index] = [x + y for x, y in zip(self.mod_displacements[index], mod_displacement_temp)]
def evaluate(self):
"""
Calculates the relative displacement of each individual available unit-modification
compared to the measured differencies (delta).
delta: [DOF number, difference]
return effect: [efffect on 1. point, effect on 2. point, ..., modification number]
where each line number shows the corresponding modification number
"""
self.effect = [[0.]*(self.keypnum + 2)]*self.elenum
self.toteffect = [0.]*self.keypnum
self.sortedeff = [[[0.]*(self.keypnum + 2)]*self.elenum]*self.keypnum
effect_temp = [0.]*(self.keypnum + 2)
for modnum in range(self.elenum):
effect_temp[self.keypnum] = int(modnum)
for j, dofnum in enumerate(self.keypoint):
try:
effect_temp[j] = self.mod_displacements[modnum][dofnum]
self.effect[modnum] = [x for x in effect_temp]
self.toteffect[j] += abs(self.effect[modnum][j])
except IndexError:
print "Maybe the mapping data is invalid."
print "Please check the \'arduino_mapping.txt\' input whether the given DOFs are correct or not."
raise IndexError
self.effectratio = deepcopy(self.effect)
for i in range(self.elenum):
for j in range(self.keypnum):
if self.toteffect[j] > 0:
self.effectratio[i][j] = abs(self.effectratio[i][j]/self.toteffect[j])
else:
self.effectratio[i][j] = 0
#print " \'effectratio\' is not used yet"
# Sort by effectiveness
for i in range(self.keypnum):
self.sortedeff[i] = deepcopy(self.effect)
# Check sign of the effect
for ktemp in range(self.elenum):
if self.sortedeff[i][ktemp][i] < 0:
for jtemp in range(self.keypnum):
self.sortedeff[i][ktemp][jtemp] = abs(self.sortedeff[i][ktemp][jtemp])
self.sortedeff[i][ktemp][self.keypnum +1] = -1
else:
self.sortedeff[i][ktemp][self.keypnum +1] = +1
for j in range(self.keypnum):
if i != j and j != 0:
self.sortedeff[i] = swap_col(sorted(swap_col(self.sortedeff[i], 0, j), reverse=True), 0, j)
if i != 0:
self.sortedeff[i] = swap_col(sorted(swap_col(self.sortedeff[i], 0, i), reverse=True), 0, i)
else:
self.sortedeff[i] = sorted(self.sortedeff[i], reverse=True)
def difference(self, num_displ, measurement):
"""
Calculate the difference between the Numerical solution and Real-life measurement.
The Real-life measurement should be given the following format:
MEASUREMENT: [[13X, -2.154], [16Y, 5.256], ...]
"""
#Print nodenumber option should be added! <XXX>
delta = []
for loc, measured in measurement:
try:
dof = self.analysis[loc.upper()]
except KeyError:
print 'The given measurement location cannot be matched with the input data.'
print 'The available nodes are: {\'NAMES\': mapping addresses}'
print self.analysis
SER.close()
raise Exception('Watchpoint name error')
delta.append(measured - num_displ[dof])
return delta
def optimize(self, delta):
"""
Modell updating - core function
"""
#modnum = min(10, self.elenum)
modnum = self.elenum
self.modifications = [0.0]*self.elenum
if not _SIMULATION:
appendix = ""
else:
appendix = " - SIMULATED"
newdelta = delta
j = 0
print "-----"
print "Step: 0/"+ str(self.iterationlimit)
while (error(newdelta) > self.errorlimit and j <= self.iterationlimit and (self.capable() or j <= 1)): # Optimization loop
j += 1
print "Error: " + str(error(newdelta))
print "-----"
print "Step: " + str(j) + "/"+ str(self.iterationlimit)
ratio = [0.]*modnum
unit = 0
prevmodifications = self.modifications
for index in range(self.elenum):
self.modifications[index] = min(abs(self.modifications[index] - self.unitmodification), self.modificationlimit) *math.copysign(1, self.modifications[index]- self.unitmodification)
self.calcmodstiffness(index, self.modifications[index])
self.solvemodstruct(index)
self.evaluate()
self.calcmodstiffness(self.elenum, 0)
self.solvemodstruct(self.elenum)
newdelta = self.difference(self.mod_displacements[self.elenum], self.measurement)
for i, effect in enumerate(self.toteffect):
if effect == 0.0:
print "None of the variables has effect on " + str(self.arduino_mapping[i])
print "Model updating has no solution."
raise Exception
for i in range(self.elenum):
modificationnumber = self.sortedeff[0][i][1]
ratio[modificationnumber] = abs(self.sortedeff[0][i][0] / self.toteffect[0])*math.copysign(1, self.sortedeff[0][i][2])
unit += abs(ratio[modificationnumber]*self.sortedeff[0][i][0])
scale = newdelta[0]/unit
for i in range(self.elenum):
modificationnumber = self.sortedeff[0][i][1]
self.modifications[modificationnumber] = min(abs(prevmodifications[modificationnumber] - self.unitmodification*ratio[modificationnumber]), self.modificationlimit)\
*math.copysign(1, prevmodifications[modificationnumber] - self.unitmodification*ratio[modificationnumber])
# the last part is already the sign itself without the sign function
print "Ratio: " + str(scale)
print "Final error: " + str(error(newdelta))
if not self.capable() and j > 1:
print "Optimization could not be finished successfully."
print "The remaining error is: " + str(error(newdelta))
with open(self.name + ' - UpdateResults'+ appendix +'.txt', 'a') as outfile:
if j > 1:
if j <= self.iterationlimit and self.capable():
self.numofupdates[0] += 1
outfile.write("Update state: SUCCESSFUL\n")
if not j <= self.iterationlimit:
self.numofupdates[1] += 1
outfile.write("Update state: Run out of iteration limit\n")
if not self.capable() and j > 1:
self.numofupdates[2] += 1
outfile.write("Update state: No more possible modification\n")
else:
outfile.write("Update state: Optimization was skipped\n")
outfile.write("Requiered iterations: " + str(j) + "\n")
outfile.write("Measurement: " + str(self.measurement) + "\n")
outfile.write("Original delta: " + str(delta) + "\n")
outfile.write("New delta: " + str(newdelta) + " (limit: " + str(self.errorlimit) +")\n")
outfile.write("Final error: " + str(error(newdelta)) + "\n")
outfile.write("Modifications [%]: \n")
outfile.write(str(self.modifications) + "\n")
outfile.write("Original displacements: \n")
outfile.write(str(self.displacement) + "\n")
if j > 1:
outfile.write("New displacements: \n")
outfile.write(str(self.mod_displacements[self.elenum]) + "\n")
outfile.write("----------------------\n")
def capable(self):
"""
Function telling whether there are more options to modify
"""
capable = False
for variable in self.modifications:
if abs(variable) <= 0.95*self.modificationlimit and abs(variable) > 0.01:
capable = True
return capable
def seterrorlimit(self, errorlimit):
"""
Setting general stop parameter for model updating
"""
if errorlimit > 0.0:
self.errorlimit = errorlimit
else:
print "The error limit must be a positive number"
raise Exception
def setmodificationlimit(self, modificationlimit):
"""
Setting modification limit for members (model updating)
"""
if modificationlimit > 0.0 and modificationlimit < 1.0:
self.modificationlimit = modificationlimit
else:
print "The modification limit must be higher than 0.0 and lower than 1.0"
raise Exception
def setunitmodification(self, unitmodification):
"""
Setting modification step (model updating)
"""
if abs(unitmodification) >= 0.01 and abs(unitmodification) < 0.5:
self.unitmodification = unitmodification
else:
print "The absolut value of the unit modification must be minimum 0.01 and maximum 0.5"
raise Exception
def setiterationlimit(self, iterationlimit):
"""
Setting maximum number of iterations (model updating)
"""
if int(iterationlimit) > 1 and int(iterationlimit) <= math.pow(10, 4):
self.iterationlimit = int(iterationlimit)
else:
print "The iterationlimit must be between 2 and 10.000"
raise Exception
def readarduino(self, base, saveinput):
"""
Read data from Arduino
"""
# Read data from Arduino
maxdifference = 0.8 # Maximum input difference treshold in mm
arduinovalues = []
data = [0.]*len(self.arduino_mapping)
newdata = False
bigdifference = False
readerror = False
try:
arduinoline = SER.readline()
if len(arduinoline) > 0:
arduinovalues = arduinoline.split(',')
try:
if arduinovalues[0][len(arduinovalues)-1] == '.':
arduinovalues[0] = arduinovalues[0][:len(arduinovalues[0])-2]
else:
del arduinovalues[len(arduinovalues)-1]
except IndexError:
print "Index Error... continuing"
if len(arduinovalues) == len(self.arduino_mapping):
try:
for i in range(len(self.arduino_mapping)):
data[i] = float(arduinovalues[i]) - float(base[i][1])
if abs(data[i] - self.processeddata[i]) > maxdifference:
bigdifference = True
if abs(float(arduinovalues[i])) < 2.0:
readerror = True
self.processeddata = data
newdata = True
except ValueError:
print "Value error... continuing"
SER.flushInput()
time.sleep(0.5)
except Exception:
print "Type error: " + str(arduinovalues) + "... continuing"
SER.flushInput()
time.sleep(0.5)
SER.flushInput()
except serial.SerialTimeoutException:
print "Data could not be read... continuing"
SER.flushInput()
time.sleep(0.5)
if newdata and not bigdifference and not readerror:
self.measurement = zip(self.arduino_mapping, data)
saveinput.write(str(data) +', '+ str(time.time()) + "\n")
# Calculate differences
delta = self.difference(self.displacement, self.measurement)
print "Delta: " + str(delta)
newdata = False
bigdifference = False
readerror = False
return delta
newdata = False
bigdifference = False
readerror = False
def simulatearduino(self, arduinoline, prevline):
"""
Simulate data, based on previous measurement
"""
arduinovalues = []
data = [0.]*len(self.arduino_mapping)
skip = 0
sleeptime = 0.
try:
try:
prevreadtime = float(str(prevline.split(']')[1]).split(',')[1])
nowreadtime = float(str(arduinoline.split(']')[1]).split(',')[1])
try:# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 18:49:40 2016
!!! ORIGINAL COPYRIGHT NOTES
3D truss program
# In case of special conditions, compatible mode can help get rid of all the unnecessary features
# COMPATIBILITY SETTINGS # OVERWRITES ALL OTHER SETTINGS HERE BELOW
# 0-Basic solver 1-NumPy solver
# Importing modules
# Mathematical library | Vector drawing library | Deepcopy for matrix inversion
Other libraries might be imported later due to compatibility reasons.
"""
### Code for "Processing" only
#size(640, 360)
#background(126)
import math
import itertools
from copy import deepcopy
# COMPATIBILITY MODES:
# 0: User defined
# 1: Processing3
# 2: Android
# 3: Most information (with numpy)
# 4: Maximum compatibility
_COMPATIBLE_MODE = 0
_SIMULATION = 1 # Simulating measurements based on input file
PORTS = ['COM1', 'COM2', 'COM3'] # List of possible communication ports
PORTNUMBER = 0 # Applied communication port
if _COMPATIBLE_MODE == 00:
### User defined ###
# Modify as needed #
_MODE_NAME = "User defined"
_LOG = 1 # Logging time
_GRAPHICS = 1 # Graphical features
_SOLVER = 1 # 0: Basic solver, 1: NumPy solver
_OSLIBRARYAVAILABLE = 1 # Basic OS file features (e.g. file size)
_UPDATING = 1 # Model Updating: On/ Off
_ARDUINO = 1 # Arduino input: On/Off
_DEBUG = 1 # Debugging mode
_REALISTICSIMULATION = 0 # Wait as long as it was originally. Only valid with _SIMULATION = 1
##############################################
### DO NOT MODIFY ### #
#
elif _COMPATIBLE_MODE == 01: #
### "Processing 3" mode ### #
_MODE_NAME = "Processing 3" #
_LOG = 1*1 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 0*0 #
_UPDATING = 1*1 #
_ARDUINO = 1*1 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
elif _COMPATIBLE_MODE == 02: #
### Android mode ### #
# DO NOT MODIFY #
_MODE_NAME = "Android" #
_LOG = 1*1 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 1*1 #
_UPDATING = 0*0 #
_ARDUINO = 0*0 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
elif _COMPATIBLE_MODE == 03: #
### Informative ### #
# DO NOT MODIFY #
_MODE_NAME = "Informative mode" #
_LOG = 1*1 #
_GRAPHICS = 1*1 #
_SOLVER = 1*1 #
_OSLIBRARYAVAILABLE = 1*1 #
_UPDATING = 1*1 #
_ARDUINO = 1*1 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
else: #
### Maximum compatibility ### #
# DO NOT MODIFY #
_MODE_NAME = "Maximum compatibility" #
_LOG = 0*0 #
_GRAPHICS = 0*0 #
_SOLVER = 0*0 #
_OSLIBRARYAVAILABLE = 0*0 #
_UPDATING = 0*0 #
_ARDUINO = 0*0 #
_DEBUG = 0*0 #
_REALISTICSIMULATION = 1*1 #
#
### DO NOT MODIFY ### #
##############################################
if _OSLIBRARYAVAILABLE:
import os
if _SIMULATION or not _UPDATING:
_ARDUINO = 0
if _COMPATIBLE_MODE == 2:
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if _LOG:
import time
import datetime
TIC = time.time()
print '------------------------------------'
print 'Truss calculational program'
print 'Created by Máté Szedlák (23/11/2016)'
print 'Compatibility mode: ' + _MODE_NAME
if _SOLVER == 0:
print '- Solver is set to default'
elif _SOLVER == 1:
print '- Solver is set to NumPy'
else:
raise Exception("Solver settings are invalid!")
if _UPDATING:
print '+ Model updating is turned ON'
if _SIMULATION:
print 'Input data is SIMULATED!'
else:
print '- Model updating is turned OFF'
print '------------------------------------'
if _ARDUINO:
SER = 0
try:
import serial
except ImportError:
print "You tried to import \'serial\' in Windows mode without installing \'pySerial\'."
print "Please first install pySerial: http://playground.arduino.cc/Interfacing/Python"
raise Exception('Android mode denied: pyserial not found')
PORTNUMBER -= 1
while SER == 0:
PORTNUMBER += 1
if PORTNUMBER >= len(PORTS):
PORTNUMBER = 0
time.sleep(0.6)
print 'Opening serial at port ' + str(PORTS[PORTNUMBER])
try:
SER.close()
except Exception:
pass
try:
SER = serial.Serial(PORTS[PORTNUMBER], 9600, timeout=0)
except serial.SerialException:
Exception(PORTS[PORTNUMBER] + ' port is busy. It might be occupied by this program or another one :/ Be careful or try resetting this program')
SER = 0
try:
SER.close()
except Exception:
pass
except Exception:
SER = 0
try:
SER.close()
except Exception:
pass
#if _ARDUINO or _SIMULATION:
# try:
# mappingfile = 'arduino_mapping.txt'
# with open(mappingfile, "r") as textfile:
# line = textfile.readline().strip()
# arduino_mapping = line.upper().split(',')
#
# except IOError:
# raise Exception('File not found: ' + mappingfile)
if _SOLVER:
# NumPy library for solving linear equations in another way
import numpy as np
if _GRAPHICS:
# libraries for drawing
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# From here:
def mat_vec_mult(mat_a, vec_b):
"""
Multiplying matrix with a vector, giving the result as a vector
Source:
https://stackoverflow.com/questions/10508021/matrix-multiplication-in-python
"""
vec_c = [0.]*len(mat_a)
for i, row in enumerate(mat_a):
for j, elem in enumerate(vec_b):
vec_c[i] += row[j]*elem
return vec_c
def invert(mat_x):
"""
Invert a matrix X according to gauss-jordan elimination
In gauss-jordan elimination, we perform basic row operations to turn a matrix into
row-echelon form. If we concatenate an identity matrix to our input
matrix during this process, we will turn the identity matrix into our inverse.
X - input list of lists where each list is a matrix row
output - inverse of X
Source:
http://www.vikparuchuri.com/blog/inverting-your-very-own-matrix/
"""
#copy X to avoid altering input
mat_x = deepcopy(mat_x)
#Get dimensions of X
rows = len(mat_x)
cols = len(mat_x[0])
#Get the identity matrix and append it to the right of mat_x
#This is done because our row operations will make the identity into the inverse
identity = make_identity(rows, cols)
for i in xrange(0, rows):
mat_x[i] += identity[i]
i = 0
for j in xrange(0, cols):
#print("On col {0} and row {1}".format(j, i))
#Check to see if there are any nonzero values below the current row in the current column
zero_sum, first_non_zero = check_for_all_zeros(mat_x, i, j)
#If everything is zero, increment the columns
if zero_sum == 0:
if j == cols:
return mat_x
raise Exception("Matrix is singular")
#If mat_x[i][j] is 0, and there is a nonzero value below it, swap the two rows
if first_non_zero != i:
mat_x = swap_row(mat_x, i, first_non_zero)
#Divide mat_x[i] by mat_x[i][j] to make mat_x[i][j] equal 1
mat_x[i] = [m/mat_x[i][j] for m in mat_x[i]]
#Rescale all other rows to make their values 0 below mat_x[i][j]
for k in xrange(0, rows):
if k != i:
scaled_row = [mat_x[k][j] * m for m in mat_x[i]]
mat_x[k] = [mat_x[k][m] - scaled_row[m] for m in xrange(0, len(scaled_row))]
#If either of these is true, we have iterated through the matrix, and are done
if i == rows or j == cols:
break
i += 1
#Get just the right hand matrix, which is now our inverse
for i in xrange(0, rows):
mat_x[i] = mat_x[i][cols:len(mat_x[i])]
return mat_x
def check_for_all_zeros(mat_x, i, j):
"""
Check matrix mat_x to see if only zeros exist at or below row i in column j
mat_x - a list of lists
i - row index
j - column index
returns -
zero_sum - the count of non zero entries
first_non_zero - index of the first non value
"""
non_zeros = []
first_non_zero = -1
for k in xrange(i, len(mat_x)):
non_zero = mat_x[k][j] != 0
non_zeros.append(non_zero)
if first_non_zero == -1 and non_zero:
first_non_zero = k
zero_sum = sum(non_zeros)
return zero_sum, first_non_zero
def swap_row(mat_x, i, j):
"""
Swap row i and row j in a list of lists
mat_x - list of lists
i - row index
j - row index
returns- modified matrix
"""
mat_x[j], mat_x[i] = mat_x[i], mat_x[j]
return mat_x
def swap_col(mat_x, i, j):
"""
Swap colum i and column j in a list of lists
mat_x - list of lists
i - column index
j - column index
returns- modified matrix
"""
for item in mat_x:
item[i], item[j] = item[j], item[i]
return mat_x
def make_identity(row_num, col_num):
"""
Make an identity matrix with dimensions rxc
row_num - number of rows
col_num - number of columns
returns - list of lists corresponding to the identity matrix
"""
identity = []
for i in xrange(0, row_num):
row = []
for j in xrange(0, col_num):
elem = 0
if i == j:
elem = 1
row.append(elem)
identity.append(row)
return identity
if _GRAPHICS:
class Arrow3D(FancyArrowPatch):
"""
Vector drawer module from the internet
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
_xs3d, _ys3d, _zs3d = self._verts3d
_xs, _ys, _zs = proj3d.proj_transform(_xs3d, _ys3d, _zs3d, renderer.M)
self.set_positions((_xs[0], _ys[0]), (_xs[1], _ys[1]))
FancyArrowPatch.draw(self, renderer)
def plotstructure(struct, showorig, showresult, showsupports, \
showforces, showreactions, scaledisp, scale_f, z_corr, showvalues, saveplot):
"""
General plotting method for structures
scaledisp: Scale drwaing of displacements
scale_f: Scale force sign
z_corr: Scale z-axis
"""
plotname = struct.name
plot_width = 18.0 # Plot width in inches
xframe = 0 # Frame width at X direction
yframe = 0 # Frame width at Y direction
zframe = 0 # Frame width at Z direction
scale_sup = scale_f*0.3 # Scale support sign # All the others are input parameters
# Stress coloring settings [R G B] - Examples considering pressure:
# 0: [1, 0, 0] Plain red
# 1: [x, 0, 0] Red to Black
# 2: [1, 1-x, 0-x] Red to White
# 3: [1, (1-x)/2, (1-x)/2] Red to MildRed - Distincts pressure and tension
# 4: [x, 1-x, 0] Red to Green
_coloring = 3 # € [0, 1, 2, 3, 4]
fig = plt.figure()
_ax = fig.add_subplot(111, projection='3d')
if struct.dof == 2:
_ax.view_init(elev=90., azim=-90.)
_ax.w_zaxis.line.set_lw(0.)
_ax.set_zticklabels([])
xmin = min(list(struct.nodalcoord[x][0] for x in range(struct.nodenum)))
xmax = max(list(struct.nodalcoord[x][0] for x in range(struct.nodenum)))
ymin = min(list(struct.nodalcoord[x][1] for x in range(struct.nodenum)))
ymax = max(list(struct.nodalcoord[x][1] for x in range(struct.nodenum)))
zmin = min(list(struct.nodalcoord[x][2] for x in range(struct.nodenum)))
zmax = max(list(struct.nodalcoord[x][2] for x in range(struct.nodenum)))
deltax = xmax - xmin
deltay = ymax - ymin
xframe = max(deltax * 0.05, 2)
yframe = max(deltay * 1.5, 2)
if struct.dof == 3:
plot_height = plot_width * ((deltay + yframe*2)/(deltax + xframe*2)) *0.3
else:
plot_height = plot_width * 0.5
fig.set_size_inches(plot_width, plot_height)
_ax.set_xlim3d(xmin - xframe, xmax + xframe)
_ax.set_ylim3d(ymin - yframe, ymax + yframe)
_ax.set_zlim3d(zmin - zframe, zmax + zframe)
if showorig == showresult:
_coloring = 0
# Giving plot names
if showorig == 1 and showresult == 0 and showsupports == 1 and showreactions == 0:
plotname += ' - Initial structure'
if showforces:
plotname += ' with forces'
elif showorig == 1 and showresult == 1:
plotname += ' - Deformation'
if showreactions == 0:
plotname += ' with reactions'
elif showorig == 0 and showresult == 1:
plotname += ' - Stresses'
if showreactions == 0:
plotname += ' with reactions'
else:
plotname += ' - Unnamed'
print plotname + ": "
if showresult:
dipslaydisplacement = deepcopy(struct.nodalcoord_def)
if scaledisp != 1.0:
if _LOG:
print('Displacements are scaled with factor: ') + str(scaledisp)
for i in range(struct.nodenum):
for j in range(3):
dipslaydisplacement[i][j] = (struct.nodalcoord_def[i][j] -\
struct.nodalcoord[i][j]) * scaledisp + struct.nodalcoord[i][j]
for i in range(struct.elenum):
# Plot undeformed structure
if showorig:
_ax.plot([struct.nodalcoord[struct.node[i][1]][0], struct.nodalcoord[struct.node[i][0]][0]], \
[struct.nodalcoord[struct.node[i][1]][1], struct.nodalcoord[struct.node[i][0]][1]], \
zs=[struct.nodalcoord[struct.node[i][1]][2], struct.nodalcoord[struct.node[i][0]][2]], color='b')
# Plot deformed structure
if showresult:
if struct.postprocessed():
if struct.stresscolor[i] > 0:
if _coloring == 1:
rgb_col = [0, 0, abs(struct.stresscolor[i])]
elif _coloring == 2:
rgb_col = [1-abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i]), 1]
elif _coloring == 3:
rgb_col = [(1-abs(struct.stresscolor[i]))/2, \
(1-abs(struct.stresscolor[i]))/2, 1]
elif _coloring == 4:
rgb_col = [0, 1-abs(struct.stresscolor[i]), \
abs(struct.stresscolor[i])]
else:
rgb_col = [1, 0, 0]
else:
if _coloring == 1:
rgb_col = [abs(struct.stresscolor[i]), 0, 0]
elif _coloring == 2:
rgb_col = [1, 1-abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i])]
elif _coloring == 3:
rgb_col = [1, (1-abs(struct.stresscolor[i]))/2, \
(1-abs(struct.stresscolor[i]))/2]
elif _coloring == 4:
rgb_col = [abs(struct.stresscolor[i]), \
1-abs(struct.stresscolor[i]), 0]
else:
rgb_col = [1, 0, 0]
else:
print 'Stresses are not calculated'
rgb_col = [1, 0, 0]
_ax.plot([dipslaydisplacement[struct.node[i][1]][0], dipslaydisplacement[struct.node[i][0]][0]], \
[dipslaydisplacement[struct.node[i][1]][1], dipslaydisplacement[struct.node[i][0]][1]], \
zs=[dipslaydisplacement[struct.node[i][1]][2], dipslaydisplacement[struct.node[i][0]][2]], color=rgb_col)
if showforces:
for i in struct.known_f_notzero:
if struct.force[i] < 0:
value = -1.0
else:
value = 1.0
if i % 3 == 0:
f_dir = [value*scale_f, 0., 0.]
elif i % 3 == 1:
f_dir = [0., value*scale_f, 0.]
else:
f_dir = [0., 0., value*scale_f*z_corr]
f_arrow = Arrow3D([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0] + f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1] + f_dir[1]], \
[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2] + f_dir[2]], \
mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
_ax.add_artist(f_arrow)
if showreactions:
e_previous = -100
for i in struct.known_dis_a:
value = 0.0 # Maybe this is useless <XXX>
if struct.force[i] < 0:
value = -1.0
elif struct.force[i] > 0:
value = 1.0
if i % 3 == 0:
f_dir = [value*scale_f, 0., 0.]
elif i % 3 == 1:
f_dir = [0., value*scale_f, 0.]
else:
f_dir = [0., 0., value*scale_f*z_corr]
if abs(struct.force[i]) > 0:
f_arrow = Arrow3D([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0] + f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1] + f_dir[1]], \
[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2] + f_dir[2]], \
mutation_scale=20, lw=1, arrowstyle="-|>", color="darkolivegreen")
_ax.add_artist(f_arrow)
if showvalues:
_ax.set_xticklabels([])
_ax.set_yticklabels([])
_ax.set_zticklabels([])
if not i//3 == e_previous//3:
if struct.dof == 3:
_ax.text(struct.nodalcoord[i//3][0], \
struct.nodalcoord[i//3][1], \
struct.nodalcoord[i//3][2], \
"{:10.2f}".format(struct.force[(i//3)*3+0])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+1])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+2]),\
fontsize=12, horizontalalignment='right')
elif struct.dof == 2:
_ax.text(struct.nodalcoord[i//3][0], \
struct.nodalcoord[i//3][1], \
struct.nodalcoord[i//3][2], \
"{:10.2f}".format(struct.force[(i//3)*3+0])+'\n'+\
"{:10.2f}".format(struct.force[(i//3)*3+1]),\
fontsize=12, horizontalalignment='right')
e_previous = i
if showsupports:
for i in struct.known_dis_a:
if i % 3 == 0:
f_dir = [-1.0 * scale_sup, 0., 0.]
col = 'g'
elif i % 3 == 1:
f_dir = [0., -1.0 * scale_sup, 0.]
col = 'y'
else:
f_dir = [0., 0., -1.0 * scale_sup * z_corr]
col = 'brown'
if i % 3 != 2 or struct.dof == 3:
_ax.plot([struct.nodalcoord[i//3][0], struct.nodalcoord[i//3][0]+f_dir[0]], \
[struct.nodalcoord[i//3][1], struct.nodalcoord[i//3][1]+f_dir[1]], \
zs=[struct.nodalcoord[i//3][2], struct.nodalcoord[i//3][2]+f_dir[2]], \
color=col, linewidth=4.0)
plt.show()
if saveplot:
fig.savefig(plotname + '.png')
print '\'' + plotname +'.png\' is saved.'
print '------------------------------------'
return
def endoffile(givenfile, line):
"""
Check if end of file is reached. Implemented due to compatibility reasons.
"""
if _OSLIBRARYAVAILABLE:
return givenfile.tell() < os.fstat(givenfile.fileno()).st_size
else:
return not line == "EOF"
def logtime(prev_time, title):
"""
Calculating and printing the time consumption of tasks
Should be called with the previously saved part-time and the name of the actual task
At the first call, should be called with TIC value. The input argument
should be overwritten by this funvtion's return value.
"""
if _LOG:
new_time = time.time()
print title
print 'Time: ' + str("{:10.3f}".format(new_time - prev_time))
print '------------------------------------'
return new_time
else:
return 0
def error(delta):
"""
Error function using least-square method
"""
sumerr = 0
for deltaelement in delta:
sumerr += deltaelement**2
sumerr = math.sqrt(error)
return sumerr
class Truss(object):
"""
General structure class
"""
def __init__(self, name):
self.name = name # Name of structure
self.known_f_a = [] # Nodes without supports
self.known_f_notzero = [] # Nodes with loads
self.dof = 3 # Truss's degree of freedom
self.node = [] # Element's end nodes
self.constraint = [] # Supports
self.force = [] # Force
self.nodalcoord = [] # Coordinates of nodes
self.nodalcoord_def = [] # Coordinates after deformations
self.area = [] # Cross-sectional areas
self.el_mod = [] # Material data
self.nodenum = 0 # Number of nodes
self.elenum = 0 # Number of elements
self.eledof = [] # Mapping between DOF and node
self.stiffness = [] # Global stiffness matrix
self.mod_stiffnesses = [] # Modified stiffnesses in a hyper-matrix
self.ele_length = [] # Length of the elements
self._norm_stiff = [] # E/L
self._cx = []
self._cy = []
self._cz = []
self._s_loc = []
self._loc_stiff = [] # Local stiffnes matrix
self.dis_new = []
self.force_new = []
self.stiff_new = []
self.displacement = [] # Relative displacements
self._stiffisfresh = 0
self._postprocessed = 0
self.init_disp = []
self.stresscolor = [] # Color mapping for stresses
self.known_dis_a = []
self.stress = [] # Element's stresses
self._io_origin = 0 # Array's first element number during IO. Default is 0.
self.analysis = {}
self._mod_stiffisfresh = 0
self.mod_displacements = []
self.keypoint = []
self.keypnum = 0
self.effect = []
self.toteffect = []
self.sortedeff = []
self.specdof_inputstring = ''
self.tresshold = 0.1
self.effectratio = []
self.processeddata = [] # To store last input line
self.modifications = [] # Storing modifications for model updating
self.readelements = [0]*9
self.arduino_mapping = []
self.errorlimit = 0.5
self.modificationlimit = 0.6
self.unitmodification = 0.05
self.measurement = [0.]
self.numofupdates = [0, 0, 0] # [#Successfully updated model, #Updates with overflow exit, #Updates where there were no more modification option]
self.iterationlimit = 20
def read(self, filename):
"""
Input file for TRUSS.py program
All commands must be written with uppercase characters
*** The values MUST be written in the exact following line of a command
Only lines with the command and nothing more counts.
Everything else will be neglected. Even hastags are useless :)
The order of the commands are indifferent.
Commands and their format (example):
DOF - Degree of freedom: 3
ELEMENTS - Elements given by end-nodes: 0, 1 | 0, 2 ...
COORDINATES - Nodal coordinates: 0., 0., 0., | 0., 3., 0. ...
CROSS-SECTIONS - This data will be evaluated in Python: 3.0*(10**(-4)), 5.0*(10**(-4)) ...
MATERIALS - This data will be evaluated in Python: 70.0*(10**9), 100.0*(10**9) ...
FORCES - Selected DOF + Force: 11, +1000000.0 | 12, +1000000.0 ...
SUPPORTS - Selected DOF + Prescribed displacement: 0, 0.0 | 1, 0.0 ...
SPECDOF - Selected node's DOF will be analysed during Model Updating: 1, xyz | 3 y | 10 xz ...
EOF - For compatibility reasons EOF should be placed after the commands
"""
self._io_origin = 0
readelementnames = ["Origin", "DOF", "Elements", "Coordinates", "Cross-sections", "Materials", "Forces", "Supports", "Measured DOFs"]
with open(filename, "r") as sourcefile:
sourceline = ""
while endoffile(sourcefile, sourceline):
sourceline = sourcefile.readline().strip()
if sourceline.upper() == "_ORIGIN":
sourceline = sourcefile.readline().strip()
self._io_origin = int(sourceline)
self.readelements[0] = 1
if sourceline.upper() == "DOF":
sourceline = sourcefile.readline().strip()
self.setdof(int(sourceline))
self.readelements[1] = 1
if sourceline.upper() == "ELEMENTS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, int(x[1]) - self._io_origin] for x in inpstr]
self.setelements(inpnum)
self.readelements[2] = 1
if sourceline.upper() == "COORDINATES":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
if self.dof == 3:
inpnum = [[float(x[0]), float(x[1]), float(x[2])] for x in inpstr]
elif self.dof == 2:
inpnum = [[float(x[0]), float(x[1]), 0.] for x in inpstr]
self.setcoordinates(inpnum)
self.readelements[3] = 1
if sourceline.upper() == "CROSS-SECTIONS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = sourceline.split(',')
if len(inpstr) == 1:
inpstr = sourceline.split(';')
if '' in inpstr:
inpstr.remove('')
inpnum = [float(eval(x)) for x in inpstr]
self.setcrosssections(inpnum)
self.readelements[4] = 1
if sourceline.upper() == "MATERIALS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = sourceline.split(',')
if len(inpstr) == 1:
inpstr = sourceline.split(';')
if '' in inpstr:
inpstr.remove('')
inpnum = [float(eval(x)) for x in inpstr]
self.setmaterials(inpnum)
self.readelements[5] = 1
if sourceline.upper() == "FORCES":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, float(x[1])] for x in inpstr]
self.setforces(sorted(inpnum))
self.readelements[6] = 1
if sourceline.upper() == "SUPPORTS":
sourceline = sourcefile.readline().strip()
inpstr = []
inpnum = []
inpstr = [x.split(',') for x in sourceline.split('|')]
if len(inpstr[0]) == 1:
inpstr = [x.split(';') for x in sourceline.split('|')]
if [''] in inpstr:
inpstr.remove([''])
inpnum = [[int(x[0]) - self._io_origin, float(x[1])] for x in inpstr]
self.setsupports(sorted(inpnum))
self.readelements[7] = 1
if sourceline.upper() == "MEASUREMENTS":
sourceline = sourcefile.readline().strip()
self.specdof_inputstring = sourceline
inpstr = []
self.arduino_mapping = sourceline.split(',')
self.setspecdofs(self.arduino_mapping)
self.readelements[8] = 1
terminate = False
for i, value in enumerate(self.readelements):
if i > 0 and (i < 8 or _UPDATING):
#if i > 0:
if value == 0:
print "The following was not found: " + readelementnames[i]
terminate = True
if terminate:
raise Exception
def plot(self, showorig, showresult, showsupports, showforces, \
showreactions, scaledisplacement, scaleforce, scalez, saveplot):
"""
Plot function of the Truss class
This method calls the more general plotstructure() method.
Plot settings:
O: Original D: Deformed S: Supports F: Forces R: Reactions
ScD: Scale displacments (Z-axis) (def:1.0) ScF: Scale forces (def:1.0)
ScS: Scale Support signs (Z-axis) (def:1.0)
Save: Save plot to file
plot(O, D, S, F, R, ScD, ScF, ScS, Save)
"""
_showvalues = 1 # Show values of forces
if self._postprocessed == 0:
print 'Postprocess is needed before plotting structure!'
else:
if scaledisplacement == 0:
scaledisplacement = 1.0 # Scale drwaing of displacements
if scaleforce == 0:
scaleforce = 1.0 # Scale force sign
if scalez == 0:
scalez = 0.3 # Scale z-axis
plotstructure(self, showorig, showresult, showsupports, showforces, showreactions, \
scaledisplacement, scaleforce, scalez, _showvalues, saveplot)
def __checkcoordinates(self, ignorable):
"""
Checking coordinates for repeating elements.
ignorable: [True | False] If the warning is ignorable, only message apperas and the input becomes neglected.
If the warning is not ignorable, then exceptions will be raised.
return: [0 | 1] 1 f no error found, otherwise 0.
"""
if len(self.nodalcoord) != len(list(k for k, _ in itertools.groupby(sorted(self.nodalcoord)))):
if ignorable == 0:
raise Exception('Coordinate list has repeating items. Calculation is terminated')
else:
print "This node already exists. Input is ignored."
return 0
else:
return 1
def setdof(self, dof):
"""
Setting problem's degree of freedom
dof: [2 | 3] Model's Degree Of Freedom.
"""
self.dof = dof
if self.dof != 2 and self.dof != 3:
raise Exception('DOF must be 2 or 3.')
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setelements(self, node):
"""
Setting elements (nodal connections) in bulk mode
"""
self.node = node
self.nodenum = len(set(list(itertools.chain.from_iterable(sorted(self.node)))))
self.elenum = len(self.node)
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
# Creating mapping tool for elements
for node in self.node:
self.eledof.append([node[0]*3, node[0]*3+1, node[0]*3+2, \
node[1]*3, node[1]*3+1, node[1]*3+2])
# Initialazing matrix for all matrices
self.init_disp = [0.]*(3*self.nodenum)
self.force = [0.]*(3*self.nodenum)
self.stiffness = [0.]*(3*self.nodenum)
self.known_f_a = []
self.known_f_notzero = []
def setcoordinates(self, coordinates):
"""
Setting coordinates in bulk mode
"""
self.nodalcoord = coordinates
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
if self.nodenum > len(self.nodalcoord):
raise Exception('More coordinates are needed')
elif self.node == []:
raise Exception('Nodes must be set before defining elements')
self.__checkcoordinates(False)
def modcoordinate(self, node, coordinate):
"""
Modify coordinate
"""
if self.__checkcoordinates(True):
self.nodalcoord[node] = coordinate
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
def setcrosssections(self, area):
"""
Setting cross-sections in bulk mode
"""
self.area = area
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def modcrosssection(self, element, area):
"""
Modifying cross-sections by elements
"""
self.area[element] = area
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setmaterials(self, el_mod):
"""
Setting material data in bulk mode
"""
self.el_mod = el_mod
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def modmaterial(self, element, el_mod):
"""
Modifying material data by elements
"""
self.el_mod[element] = el_mod
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setforces(self, forces):
"""
Set forces
"""
for fdof, force in forces:
if self.dof == 3:
self.force[fdof] = force
elif self.dof == 2:
self.force[fdof + (fdof//2)] = force
self._postprocessed = 0
def modforce(self, element, force):
"""
Modifying forces by each
"""
self.force[element] = force
self._postprocessed = 0
def setsupports(self, constraints):
"""
Set supports
"""
for cdof, constraint in constraints:
if self.dof == 3:
self.constraint.append([cdof, constraint])
elif self.dof == 2:
self.constraint.append([cdof + (cdof // 2), constraint])
self._stiffisfresh = 0
self._mod_stiffisfresh = 0
self._postprocessed = 0
def setspecdofs(self, specdofs):
"""
Set special nodal DOFs
"""
self.analysis = {}
for dofname in specdofs:
node = int(dofname[:len(dofname)-1])-self._io_origin
if 'X' in dofname:
self.analysis[dofname] = node*3+0
self.keypoint.append(node*3+0)
if 'Y' in dofname:
self.analysis[dofname] = node*3+1
self.keypoint.append(node*3+1)
if 'Z' in dofname:
if self.dof == 3:
self.analysis[dofname] = node*3+2
self.keypoint.append(node*3+2)
else:
print "Z-direction is not allowed in 2D structures. Please check the \'MEASUREMENTS\' section in the input file."
raise Exception
self.keypnum = len(self.analysis)
if self.keypnum == 0 and _UPDATING:
print "There is no valid measured DOF. Please check the \'MEASUREMENTS\' section in the input file."
raise Exception
def calcstiffness(self):
"""
Stiffness matrix compilation
"""
self._postprocessed = 0
if self.dof == 2:
for zdof in range(self.nodenum):
self.constraint.append([int(zdof*3+2), 0.])
self.constraint = list(k for k, _ in itertools.groupby(sorted(self.constraint)))
#Setting known forces
for dofloc in range(3*self.nodenum):
self.known_f_a.append(dofloc)
if self.force[dofloc] != 0:
self.known_f_notzero.append(dofloc)
self.known_dis_a = []
for constr in self.constraint:
self.init_disp[constr[0]] = constr[1]
self.known_dis_a.append(constr[0])
try:
self.known_f_a.remove(constr[0])
self.known_f_notzero.remove(constr[0])
except ValueError:
pass
ele_length = [0.]*self.elenum
self._norm_stiff = [0.]*self.elenum
self._cx = [0.]*self.elenum
self._cy = [0.]*self.elenum
self._cz = [0.]*self.elenum
self._s_loc = [0.]*self.elenum
self._loc_stiff = [0.]*self.elenum
self.stress = [0.]*self.elenum
self.stiffness = [[0.]*(len(self.nodalcoord)*3)]*(len(self.nodalcoord)*3)
for i in range(self.elenum):
ele_length[i] = math.sqrt((self.nodalcoord[self.node[i][1]][0]-self.nodalcoord[self.node[i][0]][0])**2+ \
(self.nodalcoord[self.node[i][1]][1]-self.nodalcoord[self.node[i][0]][1])**2 + \
(self.nodalcoord[self.node[i][1]][2]-self.nodalcoord[self.node[i][0]][2])**2)
self._cx[i] = (self.nodalcoord[self.node[i][1]][0]-self.nodalcoord[self.node[i][0]][0])/ele_length[i]
self._cy[i] = (self.nodalcoord[self.node[i][1]][1]-self.nodalcoord[self.node[i][0]][1])/ele_length[i]
self._cz[i] = (self.nodalcoord[self.node[i][1]][2]-self.nodalcoord[self.node[i][0]][2])/ele_length[i]
self._norm_stiff[i] = self.el_mod[i]/ele_length[i]
self._s_loc[i] = [[self._cx[i]**2, self._cx[i]*self._cy[i], self._cx[i]*self._cz[i], -self._cx[i]**2, -self._cx[i]*self._cy[i], -self._cx[i]*self._cz[i]], \
[self._cx[i]*self._cy[i], self._cy[i]**2, self._cy[i]*self._cz[i], -self._cx[i]*self._cy[i], -self._cy[i]**2, -self._cy[i]*self._cz[i]], \
[self._cx[i]*self._cz[i], self._cy[i]*self._cz[i], self._cz[i]**2, -self._cx[i]*self._cz[i], -self._cy[i]*self._cz[i], -self._cz[i]**2], \
[-self._cx[i]**2, -self._cx[i]*self._cy[i], -self._cx[i]*self._cz[i], self._cx[i]**2, self._cx[i]*self._cy[i], self._cx[i]*self._cz[i]], \
[-self._cx[i]*self._cy[i], -self._cy[i]**2, -self._cy[i]*self._cz[i], self._cx[i]*self._cy[i], self._cy[i]**2, self._cy[i]*self._cz[i]], \
[-self._cx[i]*self._cz[i], -self._cy[i]*self._cz[i], -self._cz[i]**2, self._cx[i]*self._cz[i], self._cy[i]*self._cz[i], self._cz[i]**2]]
self._loc_stiff[i] = [[y* self.area[i]* self._norm_stiff[i] for y in x] for x in self._s_loc[i]]
ele_dof_vec = self.eledof[i]
stiffincrement = [0.]*(len(self.nodalcoord)*3)
for j in range(3*2):
for k in range(3*2):
stiffincrement[ele_dof_vec[k]] = self._loc_stiff[i][j][k]
self.stiffness[ele_dof_vec[j]] = [x + y for x, y in zip(self.stiffness[ele_dof_vec[j]], stiffincrement)]
self._stiffisfresh = 1
def calcmodstiffness(self, index, magnitude):
"""
Convergency step in stiffness matrix modification
"""
if self.mod_stiffnesses == []:
self.mod_stiffnesses = [0.]*(self.elenum+1)
#for loopindex in range(self.elenum):
_mod_stiffnesses_temp = [[0.]*(len(self.nodalcoord)*3)]*(len(self.nodalcoord)*3)
for i in range(self.elenum):
if i == index:
_mod_norm_stiff = self._norm_stiff[i] * (1.0 + self.modifications[i] + magnitude) #self.el_mod[i]/ele_length[i]
else:
_mod_norm_stiff = self._norm_stiff[i] * (1.0 + self.modifications[i]) #self.el_mod[i]/ele_length[i]
_mod_loc_stiff = [[y*self.area[i]*_mod_norm_stiff for y in x] for x in self._s_loc[i]]
ele_dof_vec = self.eledof[i]
stiffincrement = [0.]*(len(self.nodalcoord)*3)
for j in range(3*2):
for k in range(3*2):
stiffincrement[ele_dof_vec[k]] = _mod_loc_stiff[j][k]
_mod_stiffnesses_temp[ele_dof_vec[j]] = [x + y for x, y in zip(_mod_stiffnesses_temp[ele_dof_vec[j]], stiffincrement)]
self.mod_stiffnesses[index] = _mod_stiffnesses_temp
def solve(self):
"""
Main solver of the code
"""
if self._stiffisfresh == 0:
if _LOG:
print 'Stiffness matrix is recalculated'
self.calcstiffness()
self.dis_new = [0.]*(self.nodenum*3-len(self.constraint))
self.force_new = [0.]*(self.nodenum*3-len(self.constraint))
self.stiff_new = [[0.]*(self.nodenum*3-len(self.constraint))]*(self.nodenum*3-len(self.constraint))
# known force array
for i, known_f_a in enumerate(self.known_f_a):
self.force_new[i] = self.force[known_f_a]
stiffincrement = [0.]*(self.nodenum*3-len(self.constraint))
for i, kfai in enumerate(self.known_f_a):
for j, kfaj in enumerate(self.known_f_a):
stiffincrement[j] = self.stiffness[kfai][kfaj]
self.stiff_new[i] = [x + y for x, y in zip(self.stiff_new[i], stiffincrement)]
# SOLVING THE STRUCTURE
if _SOLVER == 0:
if _LOG:
print 'Built-in solver'
self.dis_new = mat_vec_mult(invert(self.stiff_new), self.force_new)
else:
if _LOG:
print 'NumPy solver'
self.dis_new = np.linalg.solve(np.array(self.stiff_new), np.array(self.force_new))
self.displacement = deepcopy(self.init_disp)
for i, known_f_a in enumerate(self.known_f_a):
self.displacement[known_f_a] = self.dis_new[i]
# Deformed shape
self.nodalcoord_def = []
for i in range(self.nodenum):
self.nodalcoord_def.append([self.nodalcoord[i][0]+ self.displacement[i*3+0], \
self.nodalcoord[i][1]+ self.displacement[i*3+1], self.nodalcoord[i][2]+ self.displacement[i*3+2]])
# Postrpocesses
self.postprocess()
self.mod_displacements = [0.]*(self.elenum+1)
def solvemodstruct(self, index):
"""
Solver for the modified structures. 'Index' shows the actual modification number.
"""
self.mod_displacements[index] = [0.]*(self.nodenum*3)
dis_new = [0.]*(self.nodenum*3-len(self.constraint))
stiff_new = [[0.]*(self.nodenum*3-len(self.constraint))]*(self.nodenum*3-len(self.constraint))
stiffincrement = [0.]*(self.nodenum*3-len(self.constraint))
for i, kfai in enumerate(self.known_f_a):
for j, kfaj in enumerate(self.known_f_a):
stiffincrement[j] = self.mod_stiffnesses[index][kfai][kfaj]
stiff_new[i] = [x + y for x, y in zip(stiff_new[i], stiffincrement)]
# SOLVING THE MODIFIED STRUCTURE
if _SOLVER == 0:
dis_new = mat_vec_mult(invert(stiff_new), self.force_new)
else:
dis_new = np.linalg.solve(np.array(stiff_new), np.array(self.force_new))
mod_displacement_temp = deepcopy(self.init_disp)
for i, kfa in enumerate(self.known_f_a):
mod_displacement_temp[kfa] = dis_new[i] - self.dis_new[i]
self.mod_displacements[index] = [x + y for x, y in zip(self.mod_displacements[index], mod_displacement_temp)]
def evaluate(self):
"""
Calculates the relative displacement of each individual available unit-modification
compared to the measured differencies (delta).
delta: [DOF number, difference]
return effect: [efffect on 1. point, effect on 2. point, ..., modification number]
where each line number shows the corresponding modification number
"""
self.effect = [[0.]*(self.keypnum + 2)]*self.elenum
self.toteffect = [0.]*self.keypnum
self.sortedeff = [[[0.]*(self.keypnum + 2)]*self.elenum]*self.keypnum
effect_temp = [0.]*(self.keypnum + 2)
for modnum in range(self.elenum):
effect_temp[self.keypnum] = int(modnum)
for j, dofnum in enumerate(self.keypoint):
try:
effect_temp[j] = self.mod_displacements[modnum][dofnum]
self.effect[modnum] = [x for x in effect_temp]
self.toteffect[j] += abs(self.effect[modnum][j])
except IndexError:
print "Maybe the mapping data is invalid."
print "Please check the \'arduino_mapping.txt\' input whether the given DOFs are correct or not."
raise IndexError
self.effectratio = deepcopy(self.effect)
for i in range(self.elenum):
for j in range(self.keypnum):
if self.toteffect[j] > 0:
self.effectratio[i][j] = abs(self.effectratio[i][j]/self.toteffect[j])
else:
self.effectratio[i][j] = 0
#print " \'effectratio\' is not used yet"
# Sort by effectiveness
for i in range(self.keypnum):
self.sortedeff[i] = deepcopy(self.effect)
# Check sign of the effect
for ktemp in range(self.elenum):
if self.sortedeff[i][ktemp][i] < 0:
for jtemp in range(self.keypnum):
self.sortedeff[i][ktemp][jtemp] = abs(self.sortedeff[i][ktemp][jtemp])
self.sortedeff[i][ktemp][self.keypnum +1] = -1
else:
self.sortedeff[i][ktemp][self.keypnum +1] = +1
for j in range(self.keypnum):
if i != j and j != 0:
self.sortedeff[i] = swap_col(sorted(swap_col(self.sortedeff[i], 0, j), reverse=True), 0, j)
if i != 0:
self.sortedeff[i] = swap_col(sorted(swap_col(self.sortedeff[i], 0, i), reverse=True), 0, i)
else:
self.sortedeff[i] = sorted(self.sortedeff[i], reverse=True)
def difference(self, num_displ, measurement):
"""
Calculate the difference between the Numerical solution and Real-life measurement.
The Real-life measurement should be given the following format:
MEASUREMENT: [[13X, -2.154], [16Y, 5.256], ...]
"""
#Print nodenumber option should be added! <XXX>
delta = []
for loc, measured in measurement:
try:
dof = self.analysis[loc.upper()]
except KeyError:
print 'The given measurement location cannot be matched with the input data.'
print 'The available nodes are: {\'NAMES\': mapping addresses}'
print self.analysis
SER.close()
raise Exception('Watchpoint name error')
delta.append(measured - num_displ[dof])
return delta
def optimize(self, delta):
"""
Modell updating - core function
"""
#modnum = min(10, self.elenum)
modnum = self.elenum
self.modifications = [0.0]*self.elenum
if not _SIMULATION:
appendix = ""
else:
appendix = " - SIMULATED"
newdelta = delta
j = 0
print "-----"
print "Step: 0/"+ str(self.iterationlimit)
while (error(newdelta) > self.errorlimit and j <= self.iterationlimit and (self.capable() or j <= 1)): # Optimization loop
j += 1
print "Error: " + str(error(newdelta))
print "-----"
print "Step: " + str(j) + "/"+ str(self.iterationlimit)
ratio = [0.]*modnum
unit = 0
prevmodifications = self.modifications
for index in range(self.elenum):
self.modifications[index] = min(abs(self.modifications[index] - self.unitmodification), self.modificationlimit) *math.copysign(1, self.modifications[index]- self.unitmodification)
self.calcmodstiffness(index, self.modifications[index])
self.solvemodstruct(index)
self.evaluate()
self.calcmodstiffness(self.elenum, 0)
self.solvemodstruct(self.elenum)
newdelta = self.difference(self.mod_displacements[self.elenum], self.measurement)
for i, effect in enumerate(self.toteffect):
if effect == 0.0:
print "None of the variables has effect on " + str(self.arduino_mapping[i])
print "Model updating has no solution."
raise Exception
for i in range(self.elenum):
modificationnumber = self.sortedeff[0][i][1]
ratio[modificationnumber] = abs(self.sortedeff[0][i][0] / self.toteffect[0])*math.copysign(1, self.sortedeff[0][i][2])
unit += abs(ratio[modificationnumber]*self.sortedeff[0][i][0])
scale = newdelta[0]/unit
for i in range(self.elenum):
modificationnumber = self.sortedeff[0][i][1]
self.modifications[modificationnumber] = min(abs(prevmodifications[modificationnumber] - self.unitmodification*ratio[modificationnumber]), self.modificationlimit)\
*math.copysign(1, prevmodifications[modificationnumber] - self.unitmodification*ratio[modificationnumber])
# the last part is already the sign itself without the sign function
print "Ratio: " + str(scale)
print "Final error: " + str(error(newdelta))
if not self.capable() and j > 1:
print "Optimization could not be finished successfully."
print "The remaining error is: " + str(error(newdelta))
with open(self.name + ' - UpdateResults'+ appendix +'.txt', 'a') as outfile:
if j > 1:
if j <= self.iterationlimit and self.capable():
self.numofupdates[0] += 1
outfile.write("Update state: SUCCESSFUL\n")
if not j <= self.iterationlimit:
self.numofupdates[1] += 1
outfile.write("Update state: Run out of iteration limit\n")
if not self.capable() and j > 1:
self.numofupdates[2] += 1
outfile.write("Update state: No more possible modification\n")
else:
outfile.write("Update state: Optimization was skipped\n")
outfile.write("Requiered iterations: " + str(j) + "\n")
outfile.write("Measurement: " + str(self.measurement) + "\n")
outfile.write("Original delta: " + str(delta) + "\n")
outfile.write("New delta: " + str(newdelta) + " (limit: " + str(self.errorlimit) +")\n")
outfile.write("Final error: " + str(error(newdelta)) + "\n")
outfile.write("Modifications [%]: \n")
outfile.write(str(self.modifications) + "\n")
outfile.write("Original displacements: \n")
outfile.write(str(self.displacement) + "\n")
if j > 1:
outfile.write("New displacements: \n")
outfile.write(str(self.mod_displacements[self.elenum]) + "\n")
outfile.write("----------------------\n")
def capable(self):
"""
Function telling whether there are more options to modify
"""
capable = False
for variable in self.modifications:
if abs(variable) <= 0.95*self.modificationlimit and abs(variable) > 0.01:
capable = True
return capable
def seterrorlimit(self, errorlimit):
"""
Setting general stop parameter for model updating
"""
if errorlimit > 0.0:
self.errorlimit = errorlimit
else:
print "The error limit must be a positive number"
raise Exception
def setmodificationlimit(self, modificationlimit):
"""
Setting modification limit for members (model updating)
"""
if modificationlimit > 0.0 and modificationlimit < 1.0:
self.modificationlimit = modificationlimit
else:
print "The modification limit must be higher than 0.0 and lower than 1.0"
raise Exception
def setunitmodification(self, unitmodification):
"""
Setting modification step (model updating)
"""
if abs(unitmodification) >= 0.01 and abs(unitmodification) < 0.5:
self.unitmodification = unitmodification
else:
print "The absolut value of the unit modification must be minimum 0.01 and maximum 0.5"
raise Exception
def setiterationlimit(self, iterationlimit):
"""
Setting maximum number of iterations (model updating)
"""
if int(iterationlimit) > 1 and int(iterationlimit) <= math.pow(10, 4):
self.iterationlimit = int(iterationlimit)
else:
print "The iterationlimit must be between 2 and 10.000"
raise Exception
def readarduino(self, base, saveinput):
"""
Read data from Arduino
"""
# Read data from Arduino
maxdifference = 0.8 # Maximum input difference treshold in mm
arduinovalues = []
data = [0.]*len(self.arduino_mapping)
newdata = False
bigdifference = False
readerror = False
try:
arduinoline = SER.readline()
if len(arduinoline) > 0:
arduinovalues = arduinoline.split(',')
try:
if arduinovalues[0][len(arduinovalues)-1] == '.':
arduinovalues[0] = arduinovalues[0][:len(arduinovalues[0])-2]
else:
del arduinovalues[len(arduinovalues)-1]
except IndexError:
print "Index Error... continuing"
if len(arduinovalues) == len(self.arduino_mapping):
try:
for i in range(len(self.arduino_mapping)):
data[i] = float(arduinovalues[i]) - float(base[i][1])
if abs(data[i] - self.processeddata[i]) > maxdifference:
bigdifference = True
if abs(float(arduinovalues[i])) < 2.0:
readerror = True
self.processeddata = data
newdata = True
except ValueError:
print "Value error... continuing"
SER.flushInput()
time.sleep(0.5)
except Exception:
print "Type error: " + str(arduinovalues) + "... continuing"
SER.flushInput()
time.sleep(0.5)
SER.flushInput()
except serial.SerialTimeoutException:
print "Data could not be read... continuing"
SER.flushInput()
time.sleep(0.5)
if newdata and not bigdifference and not readerror:
self.measurement = zip(self.arduino_mapping, data)
saveinput.write(str(data) +', '+ str(time.time()) + "\n")
# Calculate differences
delta = self.difference(self.displacement, self.measurement)
print "Delta: " + str(delta)
newdata = False
bigdifference = False
readerror = False
return delta
newdata = False
bigdifference = False
readerror = False
def simulatearduino(self, arduinoline, prevline):
"""
Simulate data, based on previous measurement
"""
arduinovalues = []
data = [0.]*len(self.arduino_mapping)
skip = 0
sleeptime = 0.
try:
try:
prevreadtime = float(str(prevline.split(']')[1]).split(',')[1])
nowreadtime = float(str(arduinoline.split(']')[1]).split(',')[1])
try:
if _REALISTICSIMULATION:
sleeptime = nowreadtime - prevreadtime
except Exception:
pass
except Exception:
skip = 1
sleeptime = 0.
if not skip:
if not sleeptime > 0:
sleeptime = 0.
arduinoline = str(arduinoline.split(']')[0])+"]"
arduinovalues = eval(arduinoline)
try:
for i in range(len(self.arduino_mapping)):
data[i] = float(arduinovalues[i])
self.processeddata = data
except Exception:
print "Type error: " + str(arduinovalues) + "... continuing"
self.measurement = zip(self.arduino_mapping, data)
# Calculate differences
delta = self.difference(self.displacement, self.measurement)
time.sleep(sleeptime)
print delta
return delta
except IndexError:
print "IndexError"
#pass
except Exception:
print "Exception in simulation data"
#pass
def updatemodel(self):
"""
General function to manage model updatin procedure.
"""
self.processeddata = [0.]*len(self.arduino_mapping)
if not _SIMULATION:
base = self.calibrate()
filemode = 'a'
else:
base = ['SIMULATION']
try:
os.remove(self.name + ' - UpdateResults - SIMULATED.txt')
except Exception:
pass
filemode = 'r'
with open(self.name + ' - Input Data.txt', filemode) as inputfile:
# Saving input data
if not _SIMULATION:
inputfile.write('Input data of \'' + self.name + '\':\n\n')
inputfile.write('Start Time: ' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) + "\n")
inputfile.write('Base: ' + str(base) + '\n')
inputline = "[],0.0"
for i in range(1000):
if not _SIMULATION:
delta = self.readarduino(base, inputfile)
self.optimize(delta)
else:
try:
delta = None
previnputline = inputline
inputline = inputfile.readline()
if not inputline == '':
delta = self.simulatearduino(inputline, previnputline)
except Exception:
pass
if not delta is None:
self.optimize(delta)
print "Update statistics:"
print "Totally updated models: " + str(TRUSS.numofupdates[0] + TRUSS.numofupdates[1]++ TRUSS.numofupdates[2])
print " Successfully updated models: " + str(TRUSS.numofupdates[0])
print " Updates with running out of possibilities: " + str(TRUSS.numofupdates[2])
print " Updates did not finshed: " + str(TRUSS.numofupdates[1])
def calibrate(self):
"""
Calibration for Arduino measurement. All the measurements will describe the dispalecements from the claibration-state.
"""
answer_1 = '0'
restart = '0'
accept = '0'
arduinovalues = []
print "Before starting the model updating, the measuring tools must be calibrated."
print "The calibration should be done in load-free state."
while (answer_1 not in ['Y', 'N']):
answer_1 = raw_input('Can we start the calibration? (y/n) ').upper()
if answer_1 == 'N':
SER.close()
raise Exception('Calibration is terminated')
else:
try:
SER.flushInput()
#time.sleep(0.2)
arduinoline = '' #SER.readline()
while len(arduinoline) == 0:
time.sleep(0.2)
arduinoline = SER.readline()
if len(arduinoline) > 0:
arduinovalues = arduinoline.split(',')
del arduinovalues[len(arduinovalues)-1] # if needed!!!
if len(arduinovalues) == len(self.arduino_mapping):
measurement = zip(self.arduino_mapping, arduinovalues)
print "Calibration result:"
print measurement
while (accept not in ['Y', 'N']):
accept = raw_input('Ok? Can we start the main part? Put on the loads! (y/n) ').upper()
if accept == 'N':
restart = 'Y'
else:
print "Data error. Calibartion is restarting."
print "Arduino values:" + str(arduinovalues)
restart = 'Y'
else:
print 'The calibration cannot be done: no data'
while (restart not in ['Y', 'N']):
restart = raw_input('Do you want to restart calibration? (y/n) ').upper()
except Exception:
print 'The calibration cannot be done: exception was raised'
while (restart not in ['Y', 'N']):
restart = raw_input('Do you want to restart calibration? (y/n) ').upper()
if restart == 'Y':
print "Restarting calibration"
SER.flushInput()
return self.calibrate()
elif restart == 'N':
SER.close()
raise Exception('Calibration is terminated')
if accept == 'Y':
return measurement
def postprocess(self):
"""
Calculates reaction forces and stresses
"""
self._reactions()
self._stresses()
self._postprocessed = 1
def _reactions(self):
"""
Calculates reaction forces
"""
for i in self.known_dis_a:
self.force[i] = 0
for j, displ in enumerate(self.displacement):
self.force[i] += self.stiffness[i][j]*displ
def _stresses(self):
"""
Calculates stress in elements
Last part: Coloring elements for graphical output
"""
self.stress = [0.]*self.elenum
for element in range(self.elenum):
locstiff = [-self._cx[element], -self._cy[element], -self._cz[element], \
self._cx[element], self._cy[element], self._cz[element]]
for i in range(3*2):
self.stress[element] += locstiff[i]*self.displacement[self.eledof[element][i]]
self.stress[element] = self.stress[element]*self._norm_stiff[element]
smax = max([abs(min(self.stress)), max(self.stress), 0.000000001])
self.stresscolor = [float(x)/float(smax) for x in self.stress]
def postprocessed(self):
"""
Tells if the structure's postprocess part is already calcuated
"""
return self._postprocessed
def writeresults(self, fname):
"""
Writing results to file.
"""
out_element = ''
for i in self.node:
out_element += str(i[0] + self._io_origin) + ', ' + str(i[1] + self._io_origin) + ' | '
out_coords = ''
for i in self.nodalcoord:
out_coords += str(i[0]) + ', ' + str(i[1]) + ', ' + str(i[2]) + ' | '
out_crsect = ''
for i in self.area:
out_crsect += str(i) + ', '
out_materials = ''
for i in self.el_mod:
out_materials += str(i) + ', '
out_forces = ''
for forcedof in self.known_f_notzero:
if self.dof == 3:
out_forces += str(forcedof + self._io_origin) + ', ' + str(self.force[forcedof]) + ' | '
elif self.dof == 2 and i % 3 != 2:
out_forces += str(forcedof - forcedof//3 + self._io_origin) + ', ' + str(self.force[forcedof]) + ' | '
out_supports = ''
for i in self.constraint:
if self.dof == 3:
out_supports += str(i[0] + self._io_origin) + ', ' + str(i[1]) + ' | '
elif i[0] % 3 != 2:
out_supports += str(i[0] - i[0]//3 + self._io_origin) + ', ' + str(i[1]) + ' | '
# Not elegant solution
out_specdofs = self.specdof_inputstring
with open(fname, 'w') as outfile:
# Writing data
outfile.write('Calculation of \'' + self.name + '\':\n\n')
outfile.write('Reactions\n')
#for i in range(len(self.force)//3):
prev = -1
for i in self.known_dis_a:
if self.dof == 3 or i%3 != 2:
if i//3 != prev:
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
nodalforce = ''
if (i//3)*3+0 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+0]) + ', '
else:
nodalforce += ' '
if (i//3)*3+1 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+1]) + ', '
else:
nodalforce += ' '
if self.dof != 2 and (i//3)*3+2 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+2]) + '\n'
else:
nodalforce += ' \n'
if nodalforce != ' \n':
outfile.write(str(i//3 + self._io_origin) + ', ' + nodalforce)
prev = i//3
outfile.write('\n')
outfile.write('Displacements\n')
for i in range(len(self.displacement)//3):
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
outfile.write(str(i + self._io_origin) + ', ' + "{:10.3f}".format(self.displacement[i*3 +0]) + ', ' \
+ "{:10.3f}".format(self.displacement[i*3 +1]) + ', ' + "{:10.3f}".format(self.displacement[i*3 +2]) + ', ' + '\n')
outfile.write('\n')
outfile.write('Stresses\n')
for i, stress in enumerate(self.stress):
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
outfile.write(str(i + self._io_origin) + ', ' + "{:10.3f}".format(stress) + '\n')
outfile.write('\n')
# Saving original input
outfile.write('----- Original input: -----\n\n')
outfile.write('_ORIGIN\n')
outfile.write(str(self._io_origin) + '\n\n')
outfile.write('DOF\n')
outfile.write(str(self.dof) + '\n\n')
outfile.write('ELEMENTS\n')
outfile.write(out_element + '\n\n')
outfile.write('COORDINATES\n')
outfile.write(out_coords + '\n\n')
outfile.write('CROSS-SECTIONS\n')
outfile.write(out_crsect + '\n\n')
outfile.write('MATERIALS\n')
outfile.write(out_materials + '\n\n')
outfile.write('FORCES\n')
outfile.write(out_forces + '\n\n')
outfile.write('SUPPORTS\n')
outfile.write(out_supports + '\n\n')
outfile.write('SPECDOF\n')
outfile.write(out_specdofs + '\n\n')
outfile.write('EOF\n')
##################################
# BEGINNING OF THE MAIN PART #
##################################
PARTTIME = logtime(TIC, "Initialization")
# Define new truss
TRUSS = Truss('bridge')
if not _DEBUG:
TRUSS.name = raw_input('Test name: ')
else:
print "*** Debug mode ***"
print "*** The following file will be opened: " + TRUSS.name + ".str"
# Read input file
#TRUSS.read('lab_01.txt')
try:
TRUSS.read(TRUSS.name + ".str")
except IOError:
print "The following file could not be opened: " + TRUSS.name + ".str"
print "Please make sure that the structural data is available for the program in the running directory."
raise IOError
#if _ARDUINO or _SIMULATION: # deprecated
# TRUSS.setspecdofs(arduino_mapping)
PARTTIME = logtime(PARTTIME, "Setting up structure")
# Calculate stiffness-matrix
TRUSS.calcstiffness()
#TRUSS.calcstiffness_plate()
PARTTIME = logtime(PARTTIME, "Calculating Stiffness Matrix")
#Solve structure
TRUSS.solve()
#TRUSS.solve_plate()
PARTTIME = logtime(PARTTIME, "Solving")
if _UPDATING:
TRUSS.setunitmodification(0.05)
TRUSS.seterrorlimit(1.2)
TRUSS.setmodificationlimit(0.7)
TRUSS.setiterationlimit(100)
TRUSS.updatemodel()
PARTTIME = logtime(PARTTIME, "Updating numerical model")
if _GRAPHICS:
# Plot settings:
# O: Original D: Deformed S: Supports F: Forces R: Reactions
# ScD: Scale displacments (Z-axis) (def:1.0) ScF: Scale forces (def:1.0)
# ScS: Scale Support signs (Z-axis) (def:1.0)
# Save: Save plot to file
# plot(O, D, S, F, R, ScD, ScF, ScS, Save)
TRUSS.plot(1, 0, 1, 1, 0, 1.0, 0.0, 0.0, True)
TRUSS.plot(1, 1, 1, 0, 0, 1.0, 0.0, 0.0, True)
TRUSS.plot(0, 1, 1, 1, 1, 2.0, 0.0, 0.0, True)
#pass
PARTTIME = logtime(PARTTIME, "Plotting")
# Write results to file
TRUSS.writeresults(TRUSS.name + ' - Results.txt')
PARTTIME = logtime(PARTTIME, "Writing results to the output file")
if _ARDUINO:
# Closing Arduino port
SER.close()
if _LOG:
TAC = time.time()
TOTALTIME = TAC-TIC
if _UPDATING:
print "Update statistics:"
print "Totacully updated models: " + str(TRUSS.numofupdates[0])
print " Updates with running out of possibilities: " + str(TRUSS.numofupdates[2])
print " Updates did not finshed: " + str(TRUSS.numofupdates[1])
print 'Total time: ' + str("{:10.3f}".format(TOTALTIME))
if _REALISTICSIMULATION:
sleeptime = nowreadtime - prevreadtime
except Exception:
pass
except Exception:
skip = 1
sleeptime = 0.
if not skip:
if not sleeptime > 0:
sleeptime = 0.
arduinoline = str(arduinoline.split(']')[0])+"]"
arduinovalues = eval(arduinoline)
try:
for i in range(len(self.arduino_mapping)):
data[i] = float(arduinovalues[i])
self.processeddata = data
except Exception:
print "Type error: " + str(arduinovalues) + "... continuing"
self.measurement = zip(self.arduino_mapping, data)
# Calculate differences
delta = self.difference(self.displacement, self.measurement)
time.sleep(sleeptime)
print delta
return delta
except IndexError:
print "IndexError"
#pass
except Exception:
print "Exception in simulation data"
#pass
def updatemodel(self):
"""
General function to manage model updatin procedure.
"""
self.processeddata = [0.]*len(self.arduino_mapping)
if not _SIMULATION:
base = self.calibrate()
filemode = 'a'
else:
base = ['SIMULATION']
try:
os.remove(self.name + ' - UpdateResults - SIMULATED.txt')
except Exception:
pass
filemode = 'r'
with open(self.name + ' - Input Data.txt', filemode) as inputfile:
# Saving input data
if not _SIMULATION:
inputfile.write('Input data of \'' + self.name + '\':\n\n')
inputfile.write('Start Time: ' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) + "\n")
inputfile.write('Base: ' + str(base) + '\n')
inputline = "[],0.0"
for i in range(1000):
if not _SIMULATION:
delta = self.readarduino(base, inputfile)
self.optimize(delta)
else:
try:
delta = None
previnputline = inputline
inputline = inputfile.readline()
if not inputline == '':
delta = self.simulatearduino(inputline, previnputline)
except Exception:
pass
if not delta is None:
self.optimize(delta)
print "Update statistics:"
print "Totally updated models: " + str(TRUSS.numofupdates[0] + TRUSS.numofupdates[1]++ TRUSS.numofupdates[2])
print " Successfully updated models: " + str(TRUSS.numofupdates[0])
print " Updates with running out of possibilities: " + str(TRUSS.numofupdates[2])
print " Updates did not finshed: " + str(TRUSS.numofupdates[1])
def calibrate(self):
"""
Calibration for Arduino measurement. All the measurements will describe the dispalecements from the claibration-state.
"""
answer_1 = '0'
restart = '0'
accept = '0'
arduinovalues = []
print "Before starting the model updating, the measuring tools must be calibrated."
print "The calibration should be done in load-free state."
while (answer_1 not in ['Y', 'N']):
answer_1 = raw_input('Can we start the calibration? (y/n) ').upper()
if answer_1 == 'N':
SER.close()
raise Exception('Calibration is terminated')
else:
try:
SER.flushInput()
#time.sleep(0.2)
arduinoline = '' #SER.readline()
while len(arduinoline) == 0:
time.sleep(0.2)
arduinoline = SER.readline()
if len(arduinoline) > 0:
arduinovalues = arduinoline.split(',')
del arduinovalues[len(arduinovalues)-1] # if needed!!!
if len(arduinovalues) == len(self.arduino_mapping):
measurement = zip(self.arduino_mapping, arduinovalues)
print "Calibration result:"
print measurement
while (accept not in ['Y', 'N']):
accept = raw_input('Ok? Can we start the main part? Put on the loads! (y/n) ').upper()
if accept == 'N':
restart = 'Y'
else:
print "Data error. Calibartion is restarting."
print "Arduino values:" + str(arduinovalues)
restart = 'Y'
else:
print 'The calibration cannot be done: no data'
while (restart not in ['Y', 'N']):
restart = raw_input('Do you want to restart calibration? (y/n) ').upper()
except Exception:
print 'The calibration cannot be done: exception was raised'
while (restart not in ['Y', 'N']):
restart = raw_input('Do you want to restart calibration? (y/n) ').upper()
if restart == 'Y':
print "Restarting calibration"
SER.flushInput()
return self.calibrate()
elif restart == 'N':
SER.close()
raise Exception('Calibration is terminated')
if accept == 'Y':
return measurement
def postprocess(self):
"""
Calculates reaction forces and stresses
"""
self._reactions()
self._stresses()
self._postprocessed = 1
def _reactions(self):
"""
Calculates reaction forces
"""
for i in self.known_dis_a:
self.force[i] = 0
for j, displ in enumerate(self.displacement):
self.force[i] += self.stiffness[i][j]*displ
def _stresses(self):
"""
Calculates stress in elements
Last part: Coloring elements for graphical output
"""
self.stress = [0.]*self.elenum
for element in range(self.elenum):
locstiff = [-self._cx[element], -self._cy[element], -self._cz[element], \
self._cx[element], self._cy[element], self._cz[element]]
for i in range(3*2):
self.stress[element] += locstiff[i]*self.displacement[self.eledof[element][i]]
self.stress[element] = self.stress[element]*self._norm_stiff[element]
smax = max([abs(min(self.stress)), max(self.stress), 0.000000001])
self.stresscolor = [float(x)/float(smax) for x in self.stress]
def postprocessed(self):
"""
Tells if the structure's postprocess part is already calcuated
"""
return self._postprocessed
def writeresults(self, fname):
"""
Writing results to file.
"""
out_element = ''
for i in self.node:
out_element += str(i[0] + self._io_origin) + ', ' + str(i[1] + self._io_origin) + ' | '
out_coords = ''
for i in self.nodalcoord:
out_coords += str(i[0]) + ', ' + str(i[1]) + ', ' + str(i[2]) + ' | '
out_crsect = ''
for i in self.area:
out_crsect += str(i) + ', '
out_materials = ''
for i in self.el_mod:
out_materials += str(i) + ', '
out_forces = ''
for forcedof in self.known_f_notzero:
if self.dof == 3:
out_forces += str(forcedof + self._io_origin) + ', ' + str(self.force[forcedof]) + ' | '
elif self.dof == 2 and i % 3 != 2:
out_forces += str(forcedof - forcedof//3 + self._io_origin) + ', ' + str(self.force[forcedof]) + ' | '
out_supports = ''
for i in self.constraint:
if self.dof == 3:
out_supports += str(i[0] + self._io_origin) + ', ' + str(i[1]) + ' | '
elif i[0] % 3 != 2:
out_supports += str(i[0] - i[0]//3 + self._io_origin) + ', ' + str(i[1]) + ' | '
# Not elegant solution
out_specdofs = self.specdof_inputstring
with open(fname, 'w') as outfile:
# Writing data
outfile.write('Calculation of \'' + self.name + '\':\n\n')
outfile.write('Reactions\n')
#for i in range(len(self.force)//3):
prev = -1
for i in self.known_dis_a:
if self.dof == 3 or i%3 != 2:
if i//3 != prev:
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
nodalforce = ''
if (i//3)*3+0 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+0]) + ', '
else:
nodalforce += ' '
if (i//3)*3+1 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+1]) + ', '
else:
nodalforce += ' '
if self.dof != 2 and (i//3)*3+2 in self.known_dis_a:
nodalforce += "{:10.2f}".format(self.force[(i//3)*3+2]) + '\n'
else:
nodalforce += ' \n'
if nodalforce != ' \n':
outfile.write(str(i//3 + self._io_origin) + ', ' + nodalforce)
prev = i//3
outfile.write('\n')
outfile.write('Displacements\n')
for i in range(len(self.displacement)//3):
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
outfile.write(str(i + self._io_origin) + ', ' + "{:10.3f}".format(self.displacement[i*3 +0]) + ', ' \
+ "{:10.3f}".format(self.displacement[i*3 +1]) + ', ' + "{:10.3f}".format(self.displacement[i*3 +2]) + ', ' + '\n')
outfile.write('\n')
outfile.write('Stresses\n')
for i, stress in enumerate(self.stress):
if i < 100:
outfile.write(' ')
if i < 9:
outfile.write(' ')
outfile.write(str(i + self._io_origin) + ', ' + "{:10.3f}".format(stress) + '\n')
outfile.write('\n')
# Saving original input
outfile.write('----- Original input: -----\n\n')
outfile.write('_ORIGIN\n')
outfile.write(str(self._io_origin) + '\n\n')
outfile.write('DOF\n')
outfile.write(str(self.dof) + '\n\n')
outfile.write('ELEMENTS\n')
outfile.write(out_element + '\n\n')
outfile.write('COORDINATES\n')
outfile.write(out_coords + '\n\n')
outfile.write('CROSS-SECTIONS\n')
outfile.write(out_crsect + '\n\n')
outfile.write('MATERIALS\n')
outfile.write(out_materials + '\n\n')
outfile.write('FORCES\n')
outfile.write(out_forces + '\n\n')
outfile.write('SUPPORTS\n')
outfile.write(out_supports + '\n\n')
outfile.write('SPECDOF\n')
outfile.write(out_specdofs + '\n\n')
outfile.write('EOF\n')
##################################
# BEGINNING OF THE MAIN PART #
##################################
PARTTIME = logtime(TIC, "Initialization")
# Define new truss
TRUSS = Truss('bridge')
if not _DEBUG:
TRUSS.name = raw_input('Test name: ')
else:
print "*** Debug mode ***"
print "*** The following file will be opened: " + TRUSS.name + ".str"
# Read input file
#TRUSS.read('lab_01.txt')
try:
TRUSS.read(TRUSS.name + ".str")
except IOError:
print "The following file could not be opened: " + TRUSS.name + ".str"
print "Please make sure that the structural data is available for the program in the running directory."
raise IOError
#if _ARDUINO or _SIMULATION: # deprecated
# TRUSS.setspecdofs(arduino_mapping)
PARTTIME = logtime(PARTTIME, "Setting up structure")
# Calculate stiffness-matrix
TRUSS.calcstiffness()
#TRUSS.calcstiffness_plate()
PARTTIME = logtime(PARTTIME, "Calculating Stiffness Matrix")
#Solve structure
TRUSS.solve()
#TRUSS.solve_plate()
PARTTIME = logtime(PARTTIME, "Solving")
if _UPDATING:
TRUSS.setunitmodification(0.05)
TRUSS.seterrorlimit(1.2)
TRUSS.setmodificationlimit(0.7)
TRUSS.setiterationlimit(100)
TRUSS.updatemodel()
PARTTIME = logtime(PARTTIME, "Updating numerical model")
if _GRAPHICS:
# Plot settings:
# O: Original D: Deformed S: Supports F: Forces R: Reactions
# ScD: Scale displacments (Z-axis) (def:1.0) ScF: Scale forces (def:1.0)
# ScS: Scale Support signs (Z-axis) (def:1.0)
# Save: Save plot to file
# plot(O, D, S, F, R, ScD, ScF, ScS, Save)
TRUSS.plot(1, 0, 1, 1, 0, 1.0, 0.0, 0.0, True)
TRUSS.plot(1, 1, 1, 0, 0, 1.0, 0.0, 0.0, True)
TRUSS.plot(0, 1, 1, 1, 1, 2.0, 0.0, 0.0, True)
#pass
PARTTIME = logtime(PARTTIME, "Plotting")
# Write results to file
TRUSS.writeresults(TRUSS.name + ' - Results.txt')
PARTTIME = logtime(PARTTIME, "Writing results to the output file")
if _ARDUINO:
# Closing Arduino port
SER.close()
if _LOG:
TAC = time.time()
TOTALTIME = TAC-TIC
if _UPDATING:
print "Update statistics:"
print "Totally updated models: " + str(TRUSS.numofupdates[0] + TRUSS.numofupdates[1]++ TRUSS.numofupdates[2])
print " Successfully updated models: " + str(TRUSS.numofupdates[0])
print " Updates with running out of possibilities: " + str(TRUSS.numofupdates[2])
print " Updates did not finshed: " + str(TRUSS.numofupdates[1])
print 'Total time: ' + str("{:10.3f}".format(TOTALTIME))
| gpl-3.0 |
meduz/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
xiaoh/sediFoam | cases/development-testing/multiParticles/particlePosition.py | 2 | 1084 | #!/usr/bin/python
import sys, os, os.path
import matplotlib.pyplot as plt
p = [];
for i in [1,2,3]:
cmd = 'grep "^' + str(i) +' 0.00" snapshot.bubblemd > data/p' + str(i) + '.dat';
os.system(cmd);
x_p = [];
y_p = [];
x_p_bench = [];
y_p_bench = [];
pData='data/p' + str(i) + '.dat';
fData = open(pData, 'r');
for line in fData.readlines():
data = [x.strip() for x in line.split(None)]
if (not data):
continue
x_p.append(data[4]);
y_p.append(data[5]);
p.append(plt.plot(x_p,y_p,'k-o',markersize = 5))
pData='data/origin/p' + str(i) + '.dat';
fData = open(pData, 'r');
for line in fData.readlines():
data = [x.strip() for x in line.split(None)]
if (not data):
continue
x_p_bench.append(data[4]);
y_p_bench.append(data[5]);
p.append(plt.plot(x_p_bench,y_p_bench,'r-o',markersize = 3))
lg = plt.legend([p[0],p[1]],["current result","benchmark"],loc=4)
lg.draw_frame(False)
plt.savefig('data/multiParticlesPositionDia.pdf');
| gpl-2.0 |
IndraVikas/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/barchart_demo2.py | 6 | 4284 | """
Thanks Josh Hemann for the example
This examples comes from an application in which grade school gym
teachers wanted to be able to show parents how their child did across
a handful of fitness tests, and importantly, relative to how other
children did. To extract the plotting code for demo purposes, we'll
just make up some data for little Johnny Doe...
"""
import numpy as np
import matplotlib.pyplot as plt
import pylab
from matplotlib.ticker import MaxNLocator
student = 'Johnny Doe'
grade = 2
gender = 'boy'
cohortSize = 62 # The number of other 2nd grade boys
numTests = 5
testNames = ['Pacer Test', 'Flexed Arm\n Hang', 'Mile Run', 'Agility',
'Push Ups']
testMeta = ['laps', 'sec', 'min:sec', 'sec', '']
scores = ['7', '48', '12:52', '17', '14']
rankings = np.round(np.random.uniform(0, 1, numTests)*100, 0)
fig, ax1 = plt.subplots(figsize=(9, 7))
plt.subplots_adjust(left=0.115, right=0.88)
fig.canvas.set_window_title('Eldorado K-8 Fitness Chart')
pos = np.arange(numTests)+0.5 # Center bars on the Y-axis ticks
rects = ax1.barh(pos, rankings, align='center', height=0.5, color='m')
ax1.axis([0, 100, 0, 5])
pylab.yticks(pos, testNames)
ax1.set_title('Johnny Doe')
plt.text(50, -0.5, 'Cohort Size: ' + str(cohortSize),
horizontalalignment='center', size='small')
# Set the right-hand Y-axis ticks and labels and set X-axis tick marks at the
# deciles
ax2 = ax1.twinx()
ax2.plot([100, 100], [0, 5], 'white', alpha=0.1)
ax2.xaxis.set_major_locator(MaxNLocator(11))
xticks = pylab.setp(ax2, xticklabels=['0', '10', '20', '30', '40', '50', '60',
'70', '80', '90', '100'])
ax2.xaxis.grid(True, linestyle='--', which='major', color='grey',
alpha=0.25)
#Plot a solid vertical gridline to highlight the median position
plt.plot([50, 50], [0, 5], 'grey', alpha=0.25)
# Build up the score labels for the right Y-axis by first appending a carriage
# return to each string and then tacking on the appropriate meta information
# (i.e., 'laps' vs 'seconds'). We want the labels centered on the ticks, so if
# there is no meta info (like for pushups) then don't add the carriage return to
# the string
def withnew(i, scr):
if testMeta[i] != '':
return '%s\n' % scr
else:
return scr
scoreLabels = [withnew(i, scr) for i, scr in enumerate(scores)]
scoreLabels = [i+j for i, j in zip(scoreLabels, testMeta)]
# set the tick locations
ax2.set_yticks(pos)
# set the tick labels
ax2.set_yticklabels(scoreLabels)
# make sure that the limits are set equally on both yaxis so the ticks line up
ax2.set_ylim(ax1.get_ylim())
ax2.set_ylabel('Test Scores')
#Make list of numerical suffixes corresponding to position in a list
# 0 1 2 3 4 5 6 7 8 9
suffixes = ['th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th']
ax2.set_xlabel('Percentile Ranking Across ' + str(grade) + suffixes[grade]
+ ' Grade ' + gender.title() + 's')
# Lastly, write in the ranking inside each bar to aid in interpretation
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
# Figure out what the last digit (width modulo 10) so we can add
# the appropriate numerical suffix (e.g., 1st, 2nd, 3rd, etc)
lastDigit = width % 10
# Note that 11, 12, and 13 are special cases
if (width == 11) or (width == 12) or (width == 13):
suffix = 'th'
else:
suffix = suffixes[lastDigit]
rankStr = str(width) + suffix
if (width < 5): # The bars aren't wide enough to print the ranking inside
xloc = width + 1 # Shift the text to the right side of the right edge
clr = 'black' # Black against white background
align = 'left'
else:
xloc = 0.98*width # Shift the text to the left side of the right edge
clr = 'white' # White on magenta
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y()+rect.get_height()/2.0
ax1.text(xloc, yloc, rankStr, horizontalalignment=align,
verticalalignment='center', color=clr, weight='bold')
plt.show()
| mit |
cavestruz/L500analysis | fitting/ICM_profiles/training_io/training_steps.py | 1 | 4868 | from sklearn import linear_model
from sklearn.cross_validation import train_test_split
import numpy as np
class TrainModel :
'''
| Parameters
| ----------
| data_X : numpy array or sparse matrix of shape [n_samples,n_features]
| ( Feature data )
| data_y : numpy array of shape [n_samples, n_targets]
| ( Target values )
|
| train_size : Fraction of sample for training vs. testing. Default is 0.9.
|
| Attributes
| ----------
| trained_model : Trained linear regression model that will output a target
| value on other values that are not necessarily in the
| training data
| coefficients : Coefficients of the regression
| residual : Residual of the fit
| variance : Variance score on the regression
| train_test_samples : dictionary of the train/test split features and targets
|
'''
def __init__( self, features, targets, train_size=0.9 ) :
'''This expects data_X, data_y, optional kw for train_size'''
self.features = features
self.targets = targets
self.train_size = train_size
self._test_split()
self._train_model()
def _test_split( self ) :
''' Saves a subset for tesing.'''
assert( self.train_size > 0.0 and self.train_size < 1.0 )
self.train_test_samples = dict(zip(['data_X_train', 'data_X_test',
'data_y_train', 'data_y_test'],
train_test_split(self.features,
self.targets,
train_size=self.train_size)))
def _train_model( self ) :
# Create the linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training set
self.trained_model = regr.fit(self.train_test_samples['data_X_train'],
self.train_test_samples['data_y_train'])
# The coefficients
self.coefficients = regr.coef_
# The mean square error
self.residual_sum_of_sq = \
np.mean((regr.predict(self.train_test_samples['data_X_test']) - \
self.train_test_samples['data_y_test']) ** 2)
# Explained variance score: 1 is perfect prediction
self.variance_score = regr.score(self.train_test_samples['data_X_test'],
self.train_test_samples['data_y_test'])
def get_trained_model(collected_samples, ibin_radial=None) :
'''
Return a TrainModel instance that has been trained on collected samples. This
can be a model for integrated quantities or radial quantities.
'''
cs = collected_samples
if ibin_radial != None :
cs.set_radial_bin(ibin_radial)
cs.get_targets()
cs.get_features()
return TrainModel(features=cs.features, targets=cs.targets)
def get_trained_model_profile( collected_samples, rbins ) :
'''
Return a list of trained models for every radial point in the profiles
'''
return [ get_trained_model(collected_samples, ibin_radial=i).trained_model for i in range(len(rbins)) ]
| mit |
lewismc/climate | RCMES/run_RCMES.py | 3 | 13922 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import ssl
import yaml
import operator
from datetime import datetime
from glob import glob
from getpass import getpass
import numpy as np
# Need these lines to run RCMES through SSH without X11
import matplotlib
matplotlib.use('Agg')
import ocw.utils as utils
import ocw.dataset_processor as dsp
from ocw.dataset import Bounds
from ocw.dataset_loader import DatasetLoader
from metrics_and_plots import *
def load_datasets_from_config(extra_opts, *loader_opts):
'''
Generic dataset loading function.
'''
for opt in loader_opts:
loader_name = opt['loader_name']
if loader_name == 'esgf':
if extra_opts['password'] is None:
extra_opts['username'] = raw_input('Enter your ESGF OpenID:\n')
extra_opts['password'] = getpass(
prompt='Enter your ESGF password:\n')
opt['esgf_username'] = extra_opts['username']
opt['esgf_password'] = extra_opts['password']
elif loader_name == 'rcmed':
opt['min_lat'] = extra_opts['min_lat']
opt['max_lat'] = extra_opts['max_lat']
opt['min_lon'] = extra_opts['min_lon']
opt['max_lon'] = extra_opts['max_lon']
opt['start_time'] = extra_opts['start_time']
opt['end_time'] = extra_opts['end_time']
loader = DatasetLoader(*loader_opts)
loader.load_datasets()
return loader.datasets
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
config_file = str(sys.argv[1])
print('Reading the configuration file {}'.format(config_file))
config = yaml.load(open(config_file))
time_info = config['time']
temporal_resolution = time_info['temporal_resolution']
# Read time info
maximum_overlap_period = time_info.get('maximum_overlap_period', False)
if not maximum_overlap_period:
start_time = datetime.strptime(time_info['start_time'].strftime('%Y%m%d'),'%Y%m%d')
end_time = datetime.strptime(time_info['end_time'].strftime('%Y%m%d'),'%Y%m%d')
else:
# These values will be determined after datasets are loaded
start_time, end_time = None, None
# Read space info
space_info = config['space']
if not 'boundary_type' in space_info:
min_lat = space_info['min_lat']
max_lat = space_info['max_lat']
min_lon = space_info['min_lon']
max_lon = space_info['max_lon']
else:
domain = space_info['boundary_type']
if 'CORDEX' in domain:
domain = domain.replace('CORDEX', '').lower()
domain = domain.strip()
min_lat, max_lat, min_lon, max_lon = utils.CORDEX_boundary(domain)
# Additional arguments for the DatasetLoader
extra_opts = {'min_lat': min_lat, 'max_lat': max_lat, 'min_lon': min_lon,
'max_lon': max_lon, 'start_time': start_time,
'end_time': end_time, 'username': None, 'password': None}
# Get the dataset loader options
data_info = config['datasets']
# Extract info we don't want to put into the loader config
# Multiplying Factor to scale obs by. Currently only supported for reference
# (first) dataset. We should instead make this a parameter for each
# loader and Dataset objects.
fact = data_info[0].pop('multiplying_factor', 1)
""" Step 1: Load the datasets """
print('Loading datasets:\n{}'.format(data_info))
datasets = load_datasets_from_config(extra_opts, *data_info)
multiplying_factor = np.ones(len(datasets))
multiplying_factor[0] = fact
names = [dataset.name for dataset in datasets]
for i, dataset in enumerate(datasets):
res = dataset.temporal_resolution()
if res == 'daily' or res == 'monthly':
datasets[i] = dsp.normalize_dataset_datetimes(dataset, res)
if multiplying_factor[i] != 1:
datasets[i].values *= multiplying_factor[i]
""" Step 2: Subset the data for temporal and spatial domain """
# Create a Bounds object to use for subsetting
if maximum_overlap_period:
start_time, end_time = utils.get_temporal_overlap(datasets)
print('Maximum overlap period')
print('start_time: {}'.format(start_time))
print('end_time: {}'.format(end_time))
if temporal_resolution == 'monthly' and end_time.day !=1:
end_time = end_time.replace(day=1)
for i, dataset in enumerate(datasets):
min_lat = np.max([min_lat, dataset.lats.min()])
max_lat = np.min([max_lat, dataset.lats.max()])
min_lon = np.max([min_lon, dataset.lons.min()])
max_lon = np.min([max_lon, dataset.lons.max()])
if not 'boundary_type' in space_info:
bounds = Bounds(lat_min=min_lat,
lat_max=max_lat,
lon_min=min_lon,
lon_max=max_lon,
start=start_time,
end=end_time)
else:
bounds = Bounds(boundary_type=space_info['boundary_type'],
start=start_time,
end=end_time)
for i, dataset in enumerate(datasets):
datasets[i] = dsp.subset(dataset, bounds)
if dataset.temporal_resolution() != temporal_resolution:
datasets[i] = dsp.temporal_rebin(datasets[i], temporal_resolution)
# Temporally subset both observation and model datasets
# for the user specified season
month_start = time_info['month_start']
month_end = time_info['month_end']
average_each_year = time_info['average_each_year']
# For now we will treat the first listed dataset as the reference dataset for
# evaluation purposes.
for i, dataset in enumerate(datasets):
datasets[i] = dsp.temporal_subset(dataset, month_start, month_end,
average_each_year)
reference_dataset = datasets[0]
target_datasets = datasets[1:]
reference_name = names[0]
target_names = names[1:]
# generate grid points for regridding
if config['regrid']['regrid_on_reference']:
new_lat = reference_dataset.lats
new_lon = reference_dataset.lons
else:
delta_lat = config['regrid']['regrid_dlat']
delta_lon = config['regrid']['regrid_dlon']
nlat = (max_lat - min_lat)/delta_lat+1
nlon = (max_lon - min_lon)/delta_lon+1
new_lat = np.linspace(min_lat, max_lat, nlat)
new_lon = np.linspace(min_lon, max_lon, nlon)
# Get flag for boundary checking for regridding. By default, this is set to True
# since the main intent of this program is to evaluate RCMs. However, it can be
# used for GCMs in which case it should be set to False to save time.
boundary_check = config['regrid'].get('boundary_check', True)
# number of target datasets (usually models, but can also be obs / reanalysis)
ntarget = len(target_datasets)
print('Dataset loading completed')
print('Reference data: {}'.format(reference_name))
print('Number of target datasets: {}'.format(ntarget))
for target_name in target_names:
print(target_name)
""" Step 3: Spatial regriding of the datasets """
print('Regridding datasets: {}'.format(config['regrid']))
if not config['regrid']['regrid_on_reference']:
reference_dataset = dsp.spatial_regrid(reference_dataset, new_lat, new_lon)
print('Reference dataset has been regridded')
for i, dataset in enumerate(target_datasets):
target_datasets[i] = dsp.spatial_regrid(dataset, new_lat, new_lon,
boundary_check=boundary_check)
print('{} has been regridded'.format(target_names[i]))
print('Propagating missing data information')
datasets = dsp.mask_missing_data([reference_dataset]+target_datasets)
reference_dataset = datasets[0]
target_datasets = datasets[1:]
""" Step 4: Checking and converting variable units """
print('Checking and converting variable units')
reference_dataset = dsp.variable_unit_conversion(reference_dataset)
for i, dataset in enumerate(target_datasets):
target_datasets[i] = dsp.variable_unit_conversion(dataset)
print('Generating multi-model ensemble')
if len(target_datasets) >= 2.:
target_datasets.append(dsp.ensemble(target_datasets))
target_names.append('ENS')
""" Step 5: Generate subregion average and standard deviation """
if config['use_subregions']:
# sort the subregion by region names and make a list
subregions= sorted(config['subregions'].items(),key=operator.itemgetter(0))
# number of subregions
nsubregion = len(subregions)
print('Calculating spatial averages and standard deviations of {} subregions'
.format(nsubregion))
reference_subregion_mean, reference_subregion_std, subregion_array = (
utils.calc_subregion_area_mean_and_std([reference_dataset], subregions))
target_subregion_mean, target_subregion_std, subregion_array = (
utils.calc_subregion_area_mean_and_std(target_datasets, subregions))
""" Step 6: Write a netCDF file """
workdir = config['workdir']
if workdir[-1] != '/':
workdir = workdir+'/'
print('Writing a netcdf file: ',workdir+config['output_netcdf_filename'])
if not os.path.exists(workdir):
os.system("mkdir -p "+workdir)
if config['use_subregions']:
dsp.write_netcdf_multiple_datasets_with_subregions(
reference_dataset, reference_name, target_datasets, target_names,
path=workdir+config['output_netcdf_filename'],
subregions=subregions, subregion_array=subregion_array,
ref_subregion_mean=reference_subregion_mean,
ref_subregion_std=reference_subregion_std,
model_subregion_mean=target_subregion_mean,
model_subregion_std=target_subregion_std)
else:
dsp.write_netcdf_multiple_datasets_with_subregions(
reference_dataset, reference_name, target_datasets,
target_names,
path=workdir+config['output_netcdf_filename'])
""" Step 7: Calculate metrics and draw plots """
nmetrics = config['number_of_metrics_and_plots']
if config['use_subregions']:
Map_plot_subregion(subregions, reference_dataset, workdir)
if nmetrics > 0:
print('Calculating metrics and generating plots')
for imetric in np.arange(nmetrics)+1:
metrics_name = config['metrics'+'%1d' %imetric]
plot_info = config['plots'+'%1d' %imetric]
file_name = workdir+plot_info['file_name']
print('metrics {0}/{1}: {2}'.format(imetric, nmetrics, metrics_name))
default_shape = (int(np.ceil(np.sqrt(ntarget + 2))),
int(np.ceil(np.sqrt(ntarget + 2))))
if metrics_name == 'Map_plot_bias_of_multiyear_climatology':
row, column = plot_info.get('subplots_array', default_shape)
if 'map_projection' in plot_info.keys():
Map_plot_bias_of_multiyear_climatology(
reference_dataset, reference_name, target_datasets, target_names,
file_name, row, column,
map_projection=plot_info['map_projection'])
else:
Map_plot_bias_of_multiyear_climatology(
reference_dataset, reference_name, target_datasets, target_names,
file_name, row, column)
elif metrics_name == 'Taylor_diagram_spatial_pattern_of_multiyear_climatology':
Taylor_diagram_spatial_pattern_of_multiyear_climatology(
reference_dataset, reference_name, target_datasets, target_names,
file_name)
elif config['use_subregions']:
if (metrics_name == 'Timeseries_plot_subregion_interannual_variability'
and average_each_year):
row, column = plot_info.get('subplots_array', default_shape)
Time_series_subregion(
reference_subregion_mean, reference_name, target_subregion_mean,
target_names, False, file_name, row, column,
x_tick=['Y'+str(i+1)
for i in np.arange(target_subregion_mean.shape[1])])
if (metrics_name == 'Timeseries_plot_subregion_annual_cycle'
and not average_each_year and month_start==1 and month_end==12):
row, column = plot_info.get('subplots_array', (1, 1))
Time_series_subregion(
reference_subregion_mean, reference_name,
target_subregion_mean, target_names, True,
file_name, row, column,
x_tick=['J','F','M','A','M','J','J','A','S','O','N','D'])
if (metrics_name == 'Portrait_diagram_subregion_interannual_variability'
and average_each_year):
Portrait_diagram_subregion(reference_subregion_mean, reference_name,
target_subregion_mean, target_names,
False, file_name)
if (metrics_name == 'Portrait_diagram_subregion_annual_cycle'
and not average_each_year and month_start==1 and month_end==12):
Portrait_diagram_subregion(reference_subregion_mean, reference_name,
target_subregion_mean, target_names,
True, file_name)
else:
print('please check the currently supported metrics')
| apache-2.0 |
TomAugspurger/pandas | pandas/tests/series/test_combine_concat.py | 1 | 4030 | import numpy as np
import pytest
import pandas as pd
from pandas import Series
class TestSeriesConcat:
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = pd.concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = pd.concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
pd.concat(
[Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
).dtype
== np.object_
)
def test_concat_empty_series_dtype_category_with_array(self):
# GH 18515
assert (
pd.concat(
[Series(np.array([]), dtype="category"), Series(dtype="float64")]
).dtype
== "float64"
)
def test_concat_empty_series_dtypes_sparse(self):
result = pd.concat(
[
Series(dtype="float64").astype("Sparse"),
Series(dtype="float64").astype("Sparse"),
]
)
assert result.dtype == "Sparse[float64]"
result = pd.concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
# TODO: release-note: concat sparse dtype
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
result = pd.concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
)
# TODO: release-note: concat sparse dtype
expected = pd.SparseDtype("object")
assert result.dtype == expected
| bsd-3-clause |
lkilcommons/atmodweb | atmodweb/atmodweb.py | 1 | 86988 | import cherrypy #Python web server
#Main imports
import numpy as np
import sys, pdb, textwrap, datetime,os,time, glob, traceback, time, shutil, subprocess, gc
import socket #to figure out our hostname
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as pp
from mpl_toolkits.basemap import Basemap
from matplotlib import ticker
from matplotlib.colors import LogNorm, Normalize
from collections import OrderedDict
import random
import logging
logging.basicConfig(level=logging.DEBUG)
from cherrypy.lib import auth_digest
from cherrypy._cpdispatch import Dispatcher
import copy #Dicts must be deepcopied.
#Import the model running code
from atmodbackend import ModelRunner, MsisRun, ModelRun, PlotDataHandler
# create logger
log = logging.getLogger('atmodweb_root')
log.setLevel(logging.DEBUG)
# create file handler which logs everything except debug messages
#fh = logging.FileHandler('atmodweb_root_%s.log' % (datetime.datetime.now().strftime("%c")))
#fh.setLevel(logging.INFO)
# create console handler with a lower log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
#log.addHandler(fh)
log.addHandler(ch)
def strongly_expire(func):
"""Decorator that sends headers that instruct browsers and proxies not to cache.
"""
def newfunc(*args, **kwargs):
cherrypy.response.headers['Expires'] = 'Sun, 19 Nov 1978 05:00:00 GMT'
cherrypy.response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'
cherrypy.response.headers['Pragma'] = 'no-cache'
return func(*args, **kwargs)
return newfunc
class ansicolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class ControlState(dict):
"""
Subclass dict to make a dictionary that handles synchronizing with the cherrypy session before getting and after setting
TODO Document Controlstate
"""
def __init__(self, *args):
self._sync = False #Add a flag to determine whether or not we sync with the session on set/get
self.log = logging.getLogger(self.__class__.__name__)
dict.__init__(self, args)
@property
def sync(self):
return self._sync
@sync.setter
def sync(self, value):
oldval = self._sync
self._sync = value
if value and not oldval: #If we've switched to syncing
self.push() #Force an update of the session
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if self.sync:
cherrypy.session[key]=value
def __getitem__(self, key):
if self.sync:
value = cherrypy.session.get(key)
if value is not None:
dict.__setitem__(self,key,value) #Make sure local copy is up to date
else:
return dict.__getitem__(self,key)
else:
value = dict.__getitem__(self, key)
return value
def push(self):
"""
Update the session with the dictionary contents
"""
self.log.debug("Now pushing controlstate contents to cherrypy.session")
for key in self:
if key in cherrypy.session:
logging.warn('Overwriting already-existing key %s in session.' % (key))
cherrypy.session[key] = dict.__getitem__(self,key)
def sanized_set(self,key,new_val,subkey=None):
"""
Examines a new value that is going to be put into the controlstate to see if it passes
basic sanity checks, like being the same type as the old value, and, if it's a list,
having the same number of elements, or if a dict, having the same keys
"""
if subkey is None:
oldval = dict.__getitem__(self,key)
else:
olddict = dict.__getitem__(self,key)
oldval = olddict[key]
if isinstance(oldval,list):
if not isinstance(newval,list):
self.log.debug("At key %s, attempted to replace %s with %s, which is not a list!" % (str(oldval),str(newval)))
return False
def ashtml(self,key):
"""
Returns value at key as html to the best of it's ability
"""
htmllines = []
typestr = lambda x: str(type(x)).replace('<','(').replace('>',')')
if key in self:
item = self[key]
if isinstance(item,dict):
#It's a dictionary, render it as a ul
htmllines.append('<ul ID="controlstate_%s">' % (key))
for ikey in item:
htmllines.append('<li ID="controlstate_%s_%s"> %s (%s) </li>' % (str(key),str(ikey),str(item[ikey]),typestr(item[ikey])))
htmllines.append('</ul>')
elif isinstance(item,list):
#Array or similar, render as ol
htmllines.append('<ol ID="controlstate_%s">' % (key))
for iind in range(len(item)):
htmllines.append('<li ID="controlstate_%s_%s"> %s (%s) </li>' % (str(key),str(iind),str(item[iind]),typestr(item[iind])))
htmllines.append('</ol>')
else:
#Render as a p
htmllines.append('<p ID="controlstate_%s">%s %s </p>' % (str(key),str(item),typestr(item)))
else:
raise ValueError('No such key in controlstate %s' % (key))
return "".join(htmllines)
def copyasdict(self):
"""
Returns a copy ControlState as a normal dictionary
"""
newdict = dict()
for key in self:
item = dict.__getitem__(self,key)
if hasattr(item,'__deepcopy__'): #Make sure that we're not getting references
newitem = item.deepcopy()
elif hasattr(item,'__copy__') or isinstance(item,dict):
newitem = copy.deepcopy(item)
else:
newitem = item
newdict[key] = newitem
return newdict
class ControlStateManager(object):
"""
Contains and manages controlstate objects
Since controlstates and the model runs they create are intrinsically linked,
this class is a 'peer' of ModelRunner. What that means in practice is that
ControlStateManager.states and ModelRunner.runs should be 1 to 1, i.e. the n-th
index of ModelRunner.runs should be the model run created by the n-th index of ControlStateManager.states
"""
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.states = [] #List of controlstates
self.default_controlstate = {'model_index':-1,'datetime':{'year':2000,'month':6,'day':21,'hour':12,'minute':0,'second':0},\
'lat':40.0274,'lon':105.2519,'alt':200.,\
'plottype':'map',\
'descstr':'intial plot','gif_mode':False,\
'xvar':'Longitude','xbounds':[-180.,180.],'xnpts':50,'xlog':False,'xmulti':False,'xunits':'deg','xdesc':'Longitude',\
'yvar':'Latitude','ybounds':[-90.,90.],'ynpts':50,'ylog':False,'ymulti':False,'yunits':'deg','ydesc':'Geodetic Latitude',\
'zvar':'Temperature','zbounds':[0.,1000.],'zlog':False,'zmulti':False,'zunits':'K','zdesc':'Atmospheric Temperature',\
'modelname':'msis','differencemode':False,'run_model_on_refresh':True,'controlstate_is_sane':None,
'thisplot':None,'thiscaption':None,'mapproj':'moll',
'drivers':{'dt':datetime.datetime(2000,6,21,12,0,0)},
'drivers_units':{'dt':None},
'drivers_ranges':{'dt':[datetime.datetime(1970,1,1),datetime.datetime(2012,12,31,23,59,59)]},
'drivers_descriptions':{'dt':'date and time of model run'},
'driver_lookup':True}
self._bound_meth = dict() # Methods which are bound to certain controlstate keys, such that when those keys are changed,
#the methods are called. Sort of an ad-hoc slots and signals a'la QT
self.errors = [] #If the Synchronizer produces an error during refresh, it will pass a message to controlstatemangager
#to tell it the controlstate is bad
self.controlstate = None
self.special_keys = ['lasterror'] #Keys that can be 'get'ed or 'set' which aren't in the current controlstate
#They all get some individual ControlState instance independant variable
#'lasterror' gets self.errors[-1], and is a way for the front end to get an
#error message that was created with self.error
self.n_max_states = 10.
self.n_total_states = 0.
self._lastind = -1
#Add our first controlstate
self.set_default_state()
@property
def lastind(self):
return self._lastind
@lastind.setter
def lastind(self, value):
value = int(value)
if value >= 0:
self.log.warn("Attempted to set control state history index to %s, Only negative indicies allowed!" % ( str(value) ))
value = -1
elif value < -1*len(self.states):
self.log.warn("Attempted to set control state history index to %s, but only %s plots in history!" % ( str(value),str(len(self.states))))
value = -1*len(self.states)
self._lastind = value
self.restore(self.lastind) #Restore that controlstate
def __contains__(self, key):
"""Make sure we can use 'in' with this, by passing all in calls through to the current controlstate, or,
if it's an exception key which has special behaviour when used in getting and setting, it will be in self.special_keys"""
if key not in self.special_keys:
return key in self.controlstate
else:
return True
def __call__(self):
#If we've gotten here, it's because we successfully refreshed, so this is an okay set of controlstate settings
self.controlstate['controlstate_is_sane']=True
self._lastind = -1 # Fix it so that the previous and next are always referenced to the plot that is displaying (i.e
#If we've gotten here then we've just plotted something, and this controlstate is getting appended to self.states,
#so the previous plot should be at -2, one before this one)
#Add to the history on call
self.n_total_states += 1
self.log.debug("Now adding controlstate %d to history." %(self.n_total_states))
self.log.debug("--Drivers dictionary is %s" %(str(self.controlstate['drivers'])))
if len(self.states) > 0:
changeddict = self.changed()
for key in changeddict:
self.log.debug("--Key %s changed from self.states[-1]\n %s \nto current\n %s" % (key,str(self.states[-1][key]),str(self.controlstate[key])))
self.states.append(self.controlstate.copyasdict())
if len(self.states)>self.n_max_states:
del self.states[0]
self.log.info( "Exceeded total number of stored controlstates %d. Removed %dth controlstate." %(self.n_max_states,self.n_total_states))
def __setitem__(self, key, value):
"""Setting on the ControlStateManager sets on the current ControlState and triggers bound methods"""
if key not in self.special_keys:
self.controlstate[key] = value
if key in self._bound_meth:
for meth in self._bound_meth[key]:
self.log.debug("ControlStateManager TRIGGERING BOUND METHOD %s of key %s new value %s" % (meth.__name__,str(key),str(self[key])))
meth()
def __getitem__(self, key):
"""Getting on the ControlStateManager gets on the current ControlState, unless the key is 'lasterror' in which case it gets the last bad controlstate"""
if key not in self.special_keys:
return self.controlstate[key]
elif key == 'lasterror': #Extra behaviors
if len(self.errors) > 0:
self.log.info("Returning error %s to controlstate caller" % (self.errors[-1]))
return self.errors[-1]
else:
return "No Error"
def set_default_state(self):
# change the working ControlState to the default one
self.controlstate = ControlState() # Overwrite anything already there
#Fill up the controlstate instance
self.log.debug("Overwriting working controlstate with default values.")
for key in self.default_controlstate:
self.controlstate[key]=self.default_controlstate[key]
self.controlstate.sync = True
def changed(self,key=None):
"""
Is the current value assigned to key changed since last updatelast call?
"""
if key is None and len(self.states)>=1:
ch = dict()
for key in self.states[-1]:
if self.changed(key):
ch[key]=self.controlstate[key]
return ch
elif len(self.states)>=1:
if key in self.states[-1]:
#Typecheck
if isinstance(self.controlstate[key],list) and isinstance(self.states[-1][key],list):
if len(self.controlstate[key]) != len(self.states[-1][key]):
return False
if isinstance(self.controlstate[key],dict) != isinstance(self.states[-1][key],dict):
return False
try:
oldeqnew = self.states[-1][key] != self.controlstate[key]
except:
self.log.error("Unable to compare this controlstate key %s with last, because compare statement errored" % (key))
self.log.error("self.states[-1][%s]=i\n%s\nself.controlstate[%s]=\n%s" % (key,str(self.states[-1][key]),key,str(self.controlstate[key])))
return False
return oldeqnew
else:
self.log.warn("Key %s not in controlstate memory\n" % (key))
return True
else:
self.log.warn("No historical controlstates are available (key %s) \n" % (key))
return True
def bind_changed(self,key,meth):
"""
Binds a method with no inputs to call when the controlstate item corresponding to key is called
"""
if key in self._bound_meth:
self._bound_meth[key].append(meth)
self.log.debug("Method %s added to bound methods for key %s" % (meth.__name__,key))
else:
self._bound_meth[key] = [meth]
self.log.debug("Key %s got it's first bound method %s" % (key,meth.__name__))
def trigger_changed(self,key,**kwargs):
"""
Triggers a bound on changed method corresponding to key
Allows specification of keyword arguments (as oppssed to setting the value, which doens't)
Specifically this gets used when setting subelements of dictionarys in this dictionary
because that doesn't trigger a '__set__'
"""
self.log.debug("MANUALLY triggering bound methods for key %s" % (key))
for meth in self._bound_meth[key]:
meth(**kwargs)
def restore(self,ind):
"""Copies all values from controlstate at states[ind] to current controlstate"""
self.log.debug("Restoring controlstate from history at index[%d]" % (ind))
for key in self.states[ind]:
self.controlstate[key]=self.states[ind][key]
try:
if self.controlstate[key] != self.states[ind][key]:
self.log.debug("--On restore: differing values:\n--current controlstate %s value:\n ---%s \n with self.states[%s][%s], value:\n ---%s" % (key,
str(self.controlstate[key]),str(ind),str(key),str(self.controlstate[key])))
except: #This should not be a breaking error
pass
def restore_last_good(self):
"""
Tries to find a model run with model_run_success = True and then restores those settings
to the current controlstate.
"""
found = False
tobedeleted = []
for i in range(len(self.states)):
#Work backwards
ind = -1-i
if self.states[ind]['controlstate_is_sane']:
found = True
self.restore(ind)
self.log.info("Restoring last controlstate from history for which model run succeeded. Index %d is known good" % (ind))
break
# else:
# tobedeleted.append(ind)
# self.log.info("Controlstate at index %d was unsuccessful in running model and will be removed from the history" % (ind) )
#for ind in tobedeleted:
# del self.states[ind]
if not found:
self.set_default_state()
def error(self,message,roll_back=True):
"""This method is called when we want to throw an error back to the front end because something in the control state is incorrect,
or badly formatted. Stores a custom error message which can be recalled with a GET from the UiHandler/Frontend. The
roll_back keyword argument, when true, will restore the controlstate to the last known good value"""
#If we got here, the controlstate settings are bad. We will set the appropriate flag
self.controlstate['controlstate_is_sane']=False
#Add the message to the errors list
self.errors.append(message)
if roll_back:
self.restore_last_good()
class Synchronizer(object):
"""
Reacts to UiHandler PUT changes in the controlstate via the bound methods
Also has methods which are called by UiHandler POST
Prepares Model Runs and Refreshes the FakeCanvas figure via PlotDataHandler
"""
def __init__(self,canvas,uihand):
#mr is the ModelRunner instance
#csm is the ControlStateManager instance
self.log = logging.getLogger(self.__class__.__name__)
self.mr = None #Placeholder for ModelRunner instance
self.canvas = canvas #The FakeCanvas instance we will plot on
self.uihand = uihand
self.controlstate = self.uihand.controlstate #The UI Handler has to spin up the controlstate b
self.pdh = PlotDataHandler(self.canvas,controlstate=self.controlstate) #PDH needs canvas, obviously since it needs to plot
self.initModelRunner()
#Bind on changed methods
self.log.info("Binding execute on change controlstate methods")
self.controlstate.bind_changed('plottype',self.refreshSelectOptions)
self.controlstate.bind_changed('xbounds',self.xbounds_changed)
self.controlstate.bind_changed('ybounds',self.ybounds_changed)
self.controlstate.bind_changed('xvar',self.xvar_changed)
self.controlstate.bind_changed('yvar',self.yvar_changed)
self.controlstate.bind_changed('zvar',self.zvar_changed)
self.controlstate.bind_changed('datetime',self.datetime_changed)
self.controlstate.bind_changed('drivers',self.drivers_changed)
self.controlstate.bind_changed('mapproj',self.mapproj_changed)
self.controlstate.bind_changed('modelname',self.modelname_changed)
def initModelRunner(self):
"""Does a first run of the model specified in the controlstate to make sure that there's a reference run.
A lot of code looks to the previously run model (i.e. to populate the selects for x, y and z)"""
if self.mr is None:
self.mr = ModelRunner(firstmodel=self.controlstate['modelname'])
else:
#Don't create a new modelrunner if we changed models, just
#use the same one...a heterogeneous collection of ModelRun
#objects in mr.runs doens't really matter much unless
#we're trying to use peering, which AtModWeb doesn't
self.mr.model = self.controlstate['modelname']
self.mr.init_nextrun()
#Make sure the default selection is sane and set the appropriate things in the next model run instance
self.prepare_model_run()
#Now run the model
self.mr.nextrun.populate()
#Append the model run to the runs database
self.mr()
def refreshModelRunOptions(self):
"""To be called whenever a new model run is instantiated. Updates all controlstate options which change with model run"""
#Update the drivers dictionary in
self.log.info("refreshModelRunOptions called...copying drivers dictionary and clearing plot data handler data")
self.pdh.clear_data()
self.log.debug(self.mr.runs[-1].drivers.__class__.__name__+':'+str(self.mr.runs[-1].drivers))
self.controlstate['drivers']=self.mr.runs[-1].drivers.copyasdict()
for key in self.controlstate['datetime']:
self.controlstate['datetime'][key] = getattr(self.mr.runs[-1].drivers['dt'],key)
self.controlstate['drivers_units']=copy.deepcopy(self.mr.runs[-1].drivers.units)
self.controlstate['drivers_ranges']=copy.deepcopy(self.mr.runs[-1].drivers.allowed_range)
self.controlstate['drivers_descriptions']=copy.deepcopy(self.mr.runs[-1].drivers.descriptions)
def autoscale(self):
"""Updates the xbounds,ybounds and zbounds in the controlstate from the lims dictionary in last model run"""
self.mr.runs[-1].autoscale_all_lims() #Sets all lims to their min and max in the model data
xdata,xlims,xunits,xdesc = self.mr[self.controlstate['xvar']] #returns data,lims
ydata,ylims,yunits,ydesc = self.mr[self.controlstate['yvar']] #returns data,lims
zdata,zlims,zunits,zdesc = self.mr[self.controlstate['zvar']] #returns data,lims
self.controlstate['xbounds']=xlims
self.controlstate['ybounds']=ylims
self.controlstate['zbounds']=zlims
def drivers_changed(self,subfield=None):
"""On changed to controlstate['drivers'], update the next model run drivers"""
#Currently done in referesh?
if subfield is None:
#Nothing calls this without a subfield right now
#Do Nothing
pass
else:
old_driver_val = str(self.mr.nextrun.drivers[subfield]) if subfield in self.mr.nextrun.drivers else "no previous value"
self.log.debug("drivers_changed: next model run driver %s changed from %s to %s" % (subfield,old_driver_val,
str(self.controlstate['drivers'][subfield])))
self.mr.nextrun.drivers[subfield] = self.controlstate['drivers'][subfield]
if subfield == 'dt':
for f in self.controlstate['datetime']:
self.controlstate['datetime'][f] = getattr(self.controlstate['drivers']['dt'],f)
def datetime_changed(self,subfield=None):
"""Process a change in controlstate['datetime'] dict by setting controlstate['drivers']['dt']"""
#Needs subfield kwarg
#Convert the dict to an actual datetime
dt = datetime.datetime(**self.controlstate['datetime'])
self.controlstate['drivers']['dt'] = dt
#Have to explicitly trigger changed since we aren't explicitly
#setting controlstate['drivers'] TODO find a better way
self.controlstate.trigger_changed('drivers',subfield='dt')
def xvar_changed(self):
"""Updates the xbounds in the controlstate when a new xvar is selected"""
xdata,xlims,xunits,xdesc = self.mr[self.controlstate['xvar']] #returns data,lims, works for multi
self.controlstate['xbounds']=xlims
self.controlstate['xunits']=xunits
self.controlstate['xdesc']=xdesc
def yvar_changed(self):
"""Updates the ybounds in the controlstate when a new yvar is selected"""
ydata,ylims,yunits,ydesc = self.mr[self.controlstate['yvar']] #returns data,lims, works for multi
self.controlstate['ybounds']=ylims
self.controlstate['yunits']=yunits
self.controlstate['ydesc']=ydesc
def zvar_changed(self):
"""Updates the zbounds in the controlstate when a new zvar is selected"""
zdata,zlims,zunits,zdesc = self.mr[self.controlstate['zvar']] #returns data,lims, works for multi
self.controlstate['zbounds']=zlims
self.controlstate['zunits']=zunits
self.controlstate['zdesc']=zdesc
def xbounds_changed(self):
"""Function which is called whenever the xbounds are changed in the controlstate. Changes limits for next model run"""
if not self.is_multi('x') and self.is_position('x'):
self.mr.nextrun.vars.lims[self.controlstate['xvar']] = self.controlstate['xbounds']
elif self.is_multi('x') and not isinstance(self.controlstate['xbounds'][0],list):
nvars = len(self.controlstate['xvar'])
newlst = []
for k in range(nvars):
newlst.append(self.controlstate['xbounds'])
self.controlstate['xbounds'] = newlst
def ybounds_changed(self):
"""Function which is called whenever the ybounds are changed in the controlstate. Changes limits for next model run"""
if not self.is_multi('y') and self.is_position('y'):
self.mr.nextrun.vars.lims[self.controlstate['yvar']] = self.controlstate['ybounds']
elif self.is_multi('y') and not isinstance(self.controlstate['ybounds'][0],list):
nvars = len(self.controlstate['yvar'])
newlst = []
for k in range(nvars):
newlst.append(self.controlstate['ybounds'])
self.controlstate['ybounds'] = newlst
def mapproj_changed(self):
"""Map projection type changed"""
if self.controlstate['mapproj'] in self.pdh.supported_projections:
self.pdh.mapproj = self.controlstate['mapproj']
def modelname_changed(self):
"""Model name is changed, big reinit"""
if self.controlstate['modelname'] in ['msis','iri']:
self.initModelRunner()
self.refreshSelectOptions()
self.refreshModelRunOptions()
#Set what we are allowed to plot
def refreshSelectOptions(self):
"""
Creates the (x,y,z)var_options JSON-style dictionaries used to populate the variable select elements.
Relies on the PlotDataHandler plot properies settings to figure out what are allowed variables
for x, y and z axes for a particular type of plot (i.e. a 'map' type plot can only have 'Longitude' as 'xvar'
'Latitude' as 'yvar' and anything as 'zvar').
Get the current plottype from the controlstate, instead of using the PlotDataHandler setting, because
the plotDataHandler setting is only updated when the 'refresh' method is called, and front end needs to know
what the available variables are to populate the HTML select element for picking variables.
"""
allowed = dict()
allowed['x'] = self.plotProperty('x_allowed')
allowed['y'] = self.plotProperty('y_allowed')
allowed['z'] = self.plotProperty('z_allowed')
all_options_dict = dict()
for k in self.mr.runs[-1].vars:
all_options_dict[k]=k
for var in ['x','y','z']:
options_dict = dict()
#Short circuting options
if 'all' in allowed[var]:
options_dict=copy.deepcopy(all_options_dict)
elif 'none' in allowed[var]:
pass #Return an empty
#Iterate through list of allowed values
else:
for value in allowed[var]:
if value == 'position':
for k in all_options_dict:
if all_options_dict[k] in self.mr.nextrun.vars: #Only position (input) variables are in a run before execution
options_dict[k] = all_options_dict[k]
elif value == 'notposition':
for k in all_options_dict:
if all_options_dict[k] not in self.mr.nextrun.vars: #Only position (input) variables are in a run before execution
options_dict[k] = all_options_dict[k]
elif value in all_options_dict:
options_dict[value]=value
self.controlstate[var+'var_options']=options_dict
def plotProperty(self,prop):
"""Simple convenience function to retrieve a property of the current type of plot"""
#Current plottype
cpt = self.controlstate['plottype']
return self.pdh.plottypes[cpt][prop]
def is_multi(self,coord):
"""
Convenience function for testing whether the currently selected x or y variables (controlstate['xvar'],etc) are multiple vars
i.e. stored as lists internally.
"""
return isinstance(self.controlstate[coord+'var'],(list,tuple)) #just tests if the controlstate is a list/tuple
def is_position(self,coord):
"""
Convenience function for testing whether the currently selected x or y variables are positions.
i.e do they have names 'Latitude','Longitude' or 'Altitude'.
Handles the possibility of the variables being multiple variables on the same axes, so this is
preferred way of checking instead of
.. code-block :: <python>
self.controlstate['xvar'] in ['Latitude','Longitude','Altitude']
"""
if not self.is_multi(coord):
return self.controlstate[coord+'var'] in self.mr.nextrun.vars
else:
return any(v in self.mr.nextrun.vars for v in self.controlstate[coord+'var'])
def prepare_model_run(self):
"""
Determines which position variables (lat,lon, or alt) are constant,
given the current settings of the xvar, yvar and zvar.
Tells the ModelRun instance that is about to be populated with data,
i.e. the model is going to run, what shape that data will be.
If it's a line we are plotting we only need 1-d data in the model and can save some time,
but if we will be plotting a pcolor or map, we'll need 2-d data.
Also tells that ModelRun which position variables will be constant,
and which independant: i.e. if we are plotting a 'Temperature'
vs. 'Altitude' plot, then we want to determine which latitude
and longitude values the user wants the model to calculate the altitude profile
for from the controlstate (and lat and lon will be constant for the model run).
"""
#Begin by assigning all of the position variables their approprate output
#from the controls structure. These are all single (scalar) values set by
#the frontend HTML elements for Lat, Lon and Alt.
#Everything is GEODETIC, not GEOCENTRIC, because that's what MSIS expects.
#Some of these position variables will be ignored
#since one must be on a plot axes if line plot,
#or two if a colored plot (pcolor or map)
#These values will be the values for their associated
#position variable if it is not one of the axes of the plot
self.mr.nextrun.vars['Latitude'] = self.controlstate['lat']
self.mr.nextrun.vars['Longitude'] = self.controlstate['lon']
self.mr.nextrun.vars['Altitude'] = self.controlstate['alt']
#Handle the case of two variables being set to the same thing:
if self.controlstate['xvar'] == self.controlstate['yvar']:
raise RuntimeError('X and Y both are %s, cannot make sensible plot!' % (self.controlstate['xvar']))
#Make sure all position variables have their limits set correctly
#before model run so that we end up with the right generated
#grid
if not self.is_multi('x') and self.is_position('x'):
self.mr.nextrun.vars.lims[self.controlstate['xvar']] = self.controlstate['xbounds']
if not self.is_multi('y') and self.is_position('y'):
self.mr.nextrun.vars.lims[self.controlstate['yvar']] = self.controlstate['ybounds']
#Copy out the drivers from the controlstate (only copy those that are exposed via the model's __init__)
self.mr.nextrun.drivers['dt'] = self.controlstate['drivers']['dt']
#Now we determine from the plottype if we need to grid x and y
if self.plotProperty('gridxy'):
#Fault checks
if not self.is_position('x'): #vars dict starts only with position and time
raise RuntimeError('xvar %s is not a valid position variable!' % (self.controlstate['xvar']))
else:
#self.mr.nextrun.lims[self.controlstate['xvar']] = self.controlstate['xbounds']
self.mr.nextrun.vars.npts[self.controlstate['xvar']] = self.controlstate['xnpts']
self.mr.nextrun.set_x(self.controlstate['xvar'])
if not self.is_position('y'):
raise RuntimeError('yvar %s is not a valid position variable!' % (self.controlstate['yvar']))
else:
#self.mr.nextrun.lims[self.controlstate['yvar']] = self.controlstate['ybounds']
self.mr.nextrun.vars.npts[self.controlstate['yvar']] = self.controlstate['ynpts']
self.mr.nextrun.set_y(self.controlstate['yvar'])
else: #We do not need to grid data
#Check that at least one selected variable is a location
#Handle multiple variables on an axis
if self.is_multi('x') and self.is_position('x'):
self.controlstate['xvar']=self.controlstate['xvar'][0]
raise RuntimeError('Multiple plotting of position variables is not allowed!')
elif self.is_multi('y') and self.is_position('y'):
self.controlstate['yvar']=self.controlstate['yvar'][0]
raise RuntimeError('Multiple plotting of position variables is not allowed!')
elif not self.is_position('x') and not self.is_position('y'):
raise RuntimeError('%s and %s are both not valid position variables!' % (str(self.controlstate['xvar']),str(self.controlstate['yvar'])))
elif not self.is_multi('x') and self.is_position('x'): #It's scalar, so check if it's a position
#self.mr.nextrun.lims[self.controlstate['xvar']] = self.controlstate['xbounds']
self.mr.nextrun.vars.npts[self.controlstate['xvar']] = self.controlstate['xnpts']
self.mr.nextrun.set_x(self.controlstate['xvar'])
elif not self.is_multi('y') and self.is_position('y'): #It's scalar, so check if it's a position
#self.mr.nextrun.lims[self.controlstate['yvar']] = self.controlstate['ybounds']
self.mr.nextrun.vars.npts[self.controlstate['yvar']] = self.controlstate['ynpts']
self.mr.nextrun.set_y(self.controlstate['yvar'])
else:
raise RuntimeError('Nonsensical variables: xvar:%s\n yvar:%s\n' % (repr(self.controlstate['xvar']),repr(self.controlstate['yvar'])))
def refresh(self,force_full_refresh=False, force_autoscale=False):
"""
Redraws what is on the plot. Trigged on 'refreshnow' POST request.
This is the big method of this class.
The basic outline is that check what controlstate values have changed
since the last time it was called, and determines based on that
how to tell the PlotDataHandler how to plot the user's desired image.
Does not neccessarily create a new model run. Tries to determine if
one is needed by the controlstate differences since last referesh.
"""
ffr = force_full_refresh
fauto = force_autoscale
if self.controlstate['plottype'] == 'pcolor' and self.controlstate.changed("xvar") or self.controlstate.changed("yvar"):
self.controlstate['run_model_on_refresh']=True
self.log.info("We are plotting a pcolor type plot, and x or y was changed, so we will need to re-run the model, since what is held constant has changed")
if self.controlstate.changed('plottype') or ffr:
#Determine if we need to rerun the model
oldplottype = self.pdh.plottypes[self.pdh.plottype]
newplottype = self.pdh.plottypes[self.controlstate['plottype']]
self.log.info("Plottype was changed since last refresh, from %s to %s." % (oldplottype,newplottype))
if oldplottype['gridxy'] != newplottype['gridxy']: #we are going from vectors to grids or visa-versa
self.controlstate['run_model_on_refresh']=True #Must force re-run
self.log.info("This change requires a %s re-run, since gridding scheme has changed" % (self.controlstate['modelname']))
self.pdh.plottype=self.controlstate['plottype']
self.refreshSelectOptions()
if self.controlstate.changed('datetime') or self.controlstate.changed('drivers') or ffr:
#Force model rerun
self.controlstate['run_model_on_refresh']=True
try:
self.mr.nextrun.drivers['dt'] = datetime.datetime(**self.controlstate['datetime'])
except:
#Capture the error for retrieval by the frontend failure callback
self.log.error("Badly formed time! Calling controlstate error function")
self.log.error( traceback.format_exc() )
self.controlstate.error("Badly formed time!: "+str(sys.exc_info()))
self.controlstate.restore_last_good()
raise #continue erroring
self.log.info("Datetime was changed since last refresh. Will rerun %s with datetime %s" % (self.controlstate['modelname'],
self.mr.nextrun.drivers['dt'].strftime('%c')))
if self.controlstate.changed('lat') or ffr:
if 'Latitude' not in [self.controlstate['xvar'],self.controlstate['yvar']]:
self.log.info("Now holding Latiude constant")
self.mr.nextrun.hold_constant('Latitude')
#We are holding latitude constant, so we will have to rerun the model
self.controlstate['run_model_on_refresh'] = True
if self.controlstate.changed('lon') or ffr:
if 'Longitude' not in [self.controlstate['xvar'],self.controlstate['yvar']]:
self.log.info("Now holding Longitude constant")
self.mr.nextrun.hold_constant('Longitude')
#We are holding longitude constant, so we will have to rerun the model
self.controlstate['run_model_on_refresh'] = True
if self.controlstate.changed('alt') or ffr:
if 'Altitude' not in [self.controlstate['xvar'],self.controlstate['yvar']]:
self.log.info("Now holding Altitude constant")
self.mr.nextrun.hold_constant('Altitude')
#We are holding altitude constant, so we will have to rerun the model
self.controlstate['run_model_on_refresh'] = True
#If position boundaries were changed
if any([self.is_position(coord) and self.controlstate.changed(coord+'bounds') for coord in ['x','y']]):
self.log.info("Position boundaries changed, will rerun model")
self.controlstate['run_model_on_refresh'] = True
if self.controlstate.changed('differencemode'):
self.controlstate['run_model_on_refresh'] = True
#---------------------------------------------------------------------------------------------------------------------------------------
#Actually prepare for a new run of the model
if self.controlstate['run_model_on_refresh'] or ffr:
self.log.info("Now preparing next model run, because controlstate variable run_model_on_refresh==True")
try: #Attempt to run the model
self.prepare_model_run()
except RuntimeError as e: #Prepare model run can throw quite a few possible runtime errors based on incorrect variables selection
self.log.error("Model preperation FAILED. Calling controlstate error function")
self.log.error( traceback.format_exc() )
self.controlstate.error("Prep for model call FAILED: "+str(e))
#Continue erroring
raise
#If we succeeded in preparing the model try to actually run it
try:
self.mr.nextrun.populate() #Trigger next model run
except:
#Capture the error for retrieval by the frontend failure callback
self.log.error("Model call FAILED. Calling controlstate error function")
self.log.error( traceback.format_exc() )
self.controlstate.error("Model Call FAILED: "+str(sys.exc_info()))
self.controlstate.restore_last_good()
#Continue erroring
raise
#Then maybe we will want to not look up drivers and just keep using the same ones
propagate = False if self.controlstate['driver_lookup'] else True
self.mr(propagate_drivers=propagate) #Trigger storing just created model run as mr.runs[-1]
self.refreshModelRunOptions() #Reset the plotDataHandler, make sure all controlstate options that change with model run are set
self.refreshSelectOptions()
#---------------------------------------------------------------------------------------------------------------------------------------
if fauto:
self.log.info("Autoscaling because forced")
self.mr.runs[-1].autoscale_all_lims()
#Always grab the most current data
self.log.info("Now getting data for X=%s Y=%s and Z=%s via ModelRunner __getitem__" % (self.controlstate['xvar'],
self.controlstate['yvar'],self.controlstate['zvar']))
latlims = self.mr.runs[-1].vars.lims['Latitude']
lonlims = self.mr.runs[-1].vars.lims['Longitude']
altlims = self.mr.runs[-1].vars.lims['Altitude']
xdata,xlims,xunits,xdesc = self.mr[self.controlstate['xvar']] #returns data,lims, correctly handles list xvar
ydata,ylims,yunits,ydesc = self.mr[self.controlstate['yvar']] #returns data,lims, correctly handles list yvar
zdata,zlims,zunits,zdesc = self.mr[self.controlstate['zvar']] #returns data,lims, correctly handles list zvar
#Reset the bounds, multiplotting and turn of log scaling if we have changed any variables or switched on or off of difference mode
if self.controlstate.changed('xvar') or self.controlstate.changed('yvar') or \
self.controlstate.changed('zvar') or self.controlstate.changed('differencemode') or \
self.controlstate.changed('alt') or self.controlstate.changed('lat') or self.controlstate.changed('lon') or ffr:
self.log.info("A variable or position was changed since last refresh")
self.controlstate['xbounds'] = xlims
self.controlstate['ybounds'] = ylims
self.controlstate['zbounds'] = zlims
self.controlstate['xunits'] = xunits
self.controlstate['yunits'] = yunits
self.controlstate['zunits'] = zunits
self.controlstate['xdesc'] = xdesc
self.controlstate['ydesc'] = ydesc
self.controlstate['zdesc'] = zdesc
#Associate data in the data handler based on what variables are desired
if self.controlstate.changed('modelname') or self.controlstate.changed('xvar') or self.controlstate.changed('xbounds') or self.controlstate.changed('xlog') or self.controlstate['run_model_on_refresh'] or ffr:
xname = self.controlstate['xvar']
self.log.info("Associating x variable %s with plot data handler bounds %s, log %s" % (str(xname),
str(self.controlstate['xbounds']),str(self.controlstate['xlog'])))
self.pdh.associate_data('x',xdata,xname,self.controlstate['xbounds'],self.controlstate['xlog'],
multi=self.controlstate['xmulti'],units=xunits,description=xdesc)
if self.controlstate.changed('modelname') or self.controlstate.changed('yvar') or self.controlstate.changed('ybounds') or self.controlstate.changed('ylog') or self.controlstate['run_model_on_refresh'] or ffr:
yname = self.controlstate['yvar']
self.log.info("Associating y variable %s with plot data handler bounds %s, log %s" % (str(yname),
str(self.controlstate['ybounds']),str(self.controlstate['ylog'])))
self.pdh.associate_data('y',ydata,yname,self.controlstate['ybounds'],self.controlstate['ylog'],
multi=self.controlstate['ymulti'],units=yunits,description=ydesc)
if self.controlstate.changed('modelname') or self.controlstate.changed('zvar') or self.controlstate.changed('zbounds') or self.controlstate.changed('zlog') or self.controlstate['run_model_on_refresh'] or ffr:
zname = self.controlstate['zvar']
self.log.info("Associating z variable %s with plot data handler bounds %s, log %s" % (str(zname),
str(self.controlstate['zbounds']),str(self.controlstate['zlog'])))
self.pdh.associate_data('z',zdata,zname,self.controlstate['zbounds'],self.controlstate['zlog'],units=zunits,description=zdesc)
self.controlstate['descstr']=self.make_descstr()
#Actually make the plot
try:
self.pdh.plot()
except:
#Capture the error for retrieval by the frontend failure callback
self.log.error("PlotDataHandler Plotting FAILED. Calling controlstate error function")
self.log.error( traceback.format_exc() )
self.controlstate.error("Data plotting FAILED: "+str(sys.exc_info()))
#Continue erroring
raise
self.caption = self.make_caption()
#Make the description string showing what changed since the last plot
#Reinitialize the run_on_refresh setting
self.controlstate['run_model_on_refresh'] = False
def make_descstr(self):
"""Makes a string which shows how the this controlstate differs from the previous one"""
thestr = ''
thestr += datetime.datetime(**self.controlstate['datetime']).strftime('%m-%d-%Y %H:%M UT')
thestr += '\n'
for driver in self.controlstate['drivers']:
val = self.controlstate['drivers'][driver]
if not isinstance(val,list) and not isinstance(val,dict) and driver is not 'dt':
thestr += "%s: %s\n" % (driver,str(self.controlstate['drivers'][driver]))
held_positionvars = ['Altitude','Latitude','Longitude']
controlstate_positions = ['alt','lat','lon']
for coord in ['x','y']:
if self.is_position(coord):
i = held_positionvars.index(self.controlstate[coord+'var'])
held_positionvars.pop(i)
controlstate_positions.pop(i)
for i in range(len(held_positionvars)):
thestr+='%s: %.2f\n' % (held_positionvars[i],self.controlstate[controlstate_positions[i]])
thestr = thestr[:-1] # remove trailing newline
return thestr
def make_caption(self):
"""
Writes a caption fully describing the latest graph
"""
#Build a description of the plot
return self.pdh.caption()+'|'+str(self.mr.runs[-1])
def data_as_csv(self):
"""
Render the current plot's data as a CSV string
"""
coords = ['x','y'] if self.controlstate['plottype']=='line' else ['x','y','z']
vs = [self.controlstate[coord+'var'] for coord in coords]
data,header = self.mr.runs[-1].as_csv(vs)
return data,header
class UiHandler(object):
"""
A class to hold the state of the UI controls and settings.
The UiHandler processes requests from the browser (i.e. GET, SET, POST)
"""
#exposed = True
def __init__(self,amwo):
self.log = logging.getLogger(self.__class__.__name__)
self.amwo = amwo #Parent atmodweb object
self.controlstate = ControlStateManager()
def output_sanitize(self,indata):
"""
Turns output into something serializable
Cherrypy is pretty good about doing this itself. Basically all I handle right now is datetime
"""
if isinstance(indata,dict):
outdata = copy.deepcopy(indata)
for k in outdata:
outdata[k] = self.output_sanitize(outdata[k])
elif isinstance(indata,list):
outdata = copy.deepcopy(indata)
for k in range(len(outdata)):
outdata[k] = self.output_sanitize(outdata[k])
elif isinstance(indata,datetime.datetime):
outdata = indata.strftime('%Y-%m-%d %H:%M:%S')
else:
outdata = indata
return outdata
#@cherrypy.tools.accept(media='text/plain')
#@cherrypy.tools.json_out()
def GET(self, statevar, subfield=None):
"""
ReST GET request handler (i.e. browser asks backend for information and backend returns informations)
Returns JSONified dictionary.
The RESTful API here is all based on setting and getting values from the ControlState dictionary subclass.
INPUTS
------
statevar - string
Which key of self.controlstate will be retrieved with this request
subfield - string,optional
If self.controlstate[statevar] is a dictionary, then the value at self.controlstate[statevar][subfield] will
be retrieved if subfield is not None
RETURNS
-------
retjson - dict
A dictionary response to the request. Has a key of the input statevar, the value of which is the desired data.
Does NOT ever have a key of subfield.
"""
retjson = dict()
#Direct getting from ControlState
if statevar in self.controlstate and subfield is None:
retval = self.controlstate[statevar]
self.log.info('GET for statevar:%s returning %s' % (statevar,str(retval)))
elif statevar in self.controlstate and subfield is not None:
if hasattr(self.controlstate[statevar],'__getitem__'): #Must be some kind of dict like thing
retval = self.controlstate[statevar][subfield]
self.log.info('GET for statevar:%s, key:%s returning %s' % (statevar,subfield,str(retval)))
else: #Just try to do the thing UNSAFE
if hasattr(self.controlstate[statevar],subfield):
mymeth = getattr(self.controlstate[statevar],subfield)
retval = mymeth
self.log.warn("UNSAFE GET Eval %s = self.controlstate[%s].%s()" % (str(retval),statevar,str(subfield)))
elif statevar == 'modeldesc':
#Get the description of the last run model
retval = {'modeldesc':self.mr.runs[-1].modeldesc}
elif statevar == 'controlstate':
#Get the controlstate as an html table
retval=dict()
for key in self.controlstate:
if self.controlstate.changed(key):
retval[key]=self.controlstate.ashtml(key)
elif statevar == 'vars':
#Get all of the information about the variables as one package
retval = dict()
for prefix in ['x','y','z']:
for suffix in ['var','bounds','units','desc','log']:
retval[prefix+suffix] = dict.__getitem__(self.controlstate,prefix+suffix)
elif statevar == 'chartdata':
#Get all the data about the drivers needed for the d3 chart
retval = dict()
for driver in self.controlstate['drivers']:
retval[driver] = dict()
retval[driver]['data'] = self.controlstate['drivers'][driver]
#Now loop on all
for metadata_key in ['drivers_descriptions','drivers_units','drivers_ranges']:
if driver in self.controlstate[metadata_key]:
retval[driver][metadata_key.split('_')[-1]]=self.controlstate[metadata_key][driver]
else:
retval[driver][metadata_key.split('_')[-1]]=None
self.log.info("Chartdata is %s" % (str(retval)))
retjson[statevar]=self.output_sanitize(retval)
return retjson
#@cherrypy.tools.accept(media='text/plain')
#@cherrypy.tools.json_out()
def POST(self, posttype=None):
"""
ReST POST request handler (i.e. browser tells backend information and backend does something based on that information)
Returns JSONified dictionary.
INPUTS
------
posttype - string
Which POST request to evaluate
RETURNS
-------
retjson - dict
A dictionary response to the request. Gets converted to json by cherrypy. For POST requests this
is kind of a dummy response, because jQuery assumes that a PUT that doesn't respond has failed.
All it is is a dictionary with one field, keyed to the input statevar, and valued to True
'uiready' - Starts controlstate syncing with CherryPy session
'refreshnow' - Calls the FakeCanvas method 'refresh' to process any enqued changes to the plot
'replotnow' - Calls the AtModWebObj method 'replot' to write the FakeCanvas matplotlib figure to a file
and update the 'lastplot' key in controlstate
'refreshselect' - Calls the FakeCanvas method refreshSelectOptions, which checks the controlstate x, y and z vars, and the plottype and sets
the appropriate possible choices of variables to plot for x, y and z
'refreshmodeloptions' - Updates the controlstate 'drivers' key from the last model run (ModelRun instance)
'debugreinit' - A 'panic'. Reinits the controlstate to its default values, and does all of the above as well.
"""
if posttype in self.controlstate:
retval = self.controlstate[posttype]
self.log.info(ansicolors.HEADER+'POST for posttype:%s returning %s' % (posttype,str(retval))+ansicolors.ENDC)
return {posttype:retval}
elif posttype == 'restart':
self.log.info(ansicolors.HEADER+"POST for posttype: restart THE BACKEND WILL NOW RESTART" +ansicolors.ENDC)
self.amwo.restart()
return {'restart':True}
elif posttype == 'nextplot':
self.controlstate.lastind = self.controlstate.lastind + 1 #lastind is a property. the setter will cause the controlstate to re-sync
self.log.info(ansicolors.HEADER+"POST for posttype: nextplot get plot at index %d" % (self.controlstate.lastind)+ansicolors.ENDC)
return {'nextplot':True,'plot':self.controlstate['thisplot'],'caption':self.controlstate['thiscaption'],
'ind':self.controlstate.lastind+len(self.controlstate.states)+1,'maxind':len(self.controlstate.states)}
elif posttype == 'prevplot':
self.controlstate.lastind = self.controlstate.lastind - 1 #lastind is a property. the setter will cause the controlstate to re-sync
self.log.info(ansicolors.HEADER+"POST for posttype: prevplot get plot at index %d" % (self.controlstate.lastind)+ansicolors.ENDC)
return {'prevplot':True,'plot':self.controlstate['thisplot'],'caption':self.controlstate['thiscaption'],
'ind':self.controlstate.lastind+len(self.controlstate.states)+1,'maxind':len(self.controlstate.states)}
elif posttype == 'uiready':
#Begin syncing the local controlstate with the session
self.log.info(ansicolors.HEADER+'POST for posttype:%s beginning controlstate sync' % (posttype)+ansicolors.ENDC)
self.controlstate.controlstate.sync = True
return {"uiready":True}
elif posttype == 'refreshnow':
self.amwo.syncher.refresh()
self.log.info(ansicolors.HEADER+'POST for posttype:%s successful refresh' % (posttype)+ansicolors.ENDC)
return {"refresh":True}
elif posttype == 'replotnow':
newfn,newcap = self.amwo.replot()
self.log.info(ansicolors.HEADER+'POST for posttype:%s successful replot: new file=%s ' % (posttype,newfn)+ansicolors.ENDC)
return {"src":newfn,"cap":newcap}
elif posttype == 'refreshselect':
self.amwo.syncher.refreshSelectOptions()
self.log.info(ansicolors.HEADER+'POST for posttype:%s successful refresh select options' % (posttype)+ansicolors.ENDC)
return {"refreshselect":True}
elif posttype == 'autoscale':
self.amwo.syncher.autoscale()
self.log.info(ansicolors.HEADER+'POST for posttype:%s successful refresh variable limits' % (posttype)+ansicolors.ENDC)
return {"autoscale":True}
elif posttype == 'refreshmodeloptions':
newfn = self.amwo.canvas.refreshModelRunOptions()
self.log.info(ansicolors.HEADER+'POST for posttype:%s successful refresh model run options' % (posttype)+ansicolors.ENDC)
return {"refreshmodeloptions":True}
elif posttype == 'gifmode':
self.controlstate['gif_mode'] = False if self.controlstate['gif_mode'] else True
f = None
if not self.controlstate['gif_mode']:
f = self.amwo.make_gif()
return {"gifmode":self.controlstate['gif_mode'],"file":f}
elif posttype == 'debugreinit':
#This is extreme measures. Expires the CherryPy session and tries to resync it with a good version of the controlstate
self.log.info("Reinitializing controlstate")
cherrypy.lib.sessions.expire()
self.controlstate.restore_last_good()
self.controlstate.controlstate.sync = True
self.log.info(ansicolors.HEADER+'POST for posttype:%s successfully reintialized local controlstate' % (posttype)+ansicolors.ENDC)
return {"debugreinit":True}
def input_sanitize(self,inval):
"""
Sanitize data that came from a browser PUT request. This method handles inputs that are lists (i.e passed as JS arrays),
and inpus that are dicts (i.e. passed as JS objects)
It calls input_sanitize_single to process individual values.
"""
self.log.debug("Sanitizing %s" % (str(inval)))
#Sanitize input
if isinstance(inval,list):
outval = []
for v in inval:
#Recursion FTW
outval.append(self.input_sanitize(v))
elif isinstance(inval,dict):
outval = dict()
for key in inval:
#More recursion
outval[key] = self.input_sanitize(inval[key])
elif ',' in inval: #Maybe its a string trying to represent a list
inval = inval.split(',')
#Now recurse with list of strings and no commas
self.input_sanitize(inval)
else:
outval = self.input_sanitize_single(inval)
return outval
def input_sanitize_single(self,val):
"""
Turns request data from PUT requests into something python knows what to do with. First it converts the strings from unicode
to ASCII, and then trys to guess what the string is intended to be by trying to turn into an int, float, bool, or datetime.datetime
using the format: Y-m-d H:M:S. If it fails all these, it just returns the string.
"""
#Convert a unicode string returned with a put into a python
#datatype in a sensible way
#Check if it's a bool
#First try to turn the unicode into a normal string
# try:
# val = val.encode('ascii','ignore')
# except:
# pass
val = val.strip() #Remove any leading or trailing whitespace
#Make sure there's no spaces, parens, brackets or other nonsense
nonsense = ['(',')','[',']','{','}',';','/']
if any([ns in val for ns in nonsense]):
for char in nonsense:
val = val.replace(char,'')
#Check if it's just an integer
if val.isdigit(): #isdigit returns false if theres a .
val = int(val)
#Check if it's able to be turned into a float
elif '.' in val:
floatable=True
try:
val = float(val)
except ValueError:
floatable=False
#Check for bool
if val in ['true','True','on']:
val=True
elif val in ['false','False','off']:
val=False
#Try to convert it to a datetime
try:
val = datetime.datetime.strptime(val,'%Y-%m-%d %H:%M:%S')
except:
self.log.debug("Sanitizing: %s failed to convert %s to datetime\n" % (val,val))
pass
return val
#@cherrypy.tools.json_out()
#@cherrypy.tools.accept(media='text/plain')
def PUT(self,statevar=None,newval=None,subfield=None):
"""
ReST PUT request handler. Returns JSONified dictionary.
The RESTful API here is all based on setting and getting values from the ControlState dictionary subclass.
INPUTS
------
statevar - string
Which key of self.controlstate will be set with this request
newval - anything
What will be stored at self.controlstate[statevar]
subfield - string,optional
If self.controlstate[statevar] is a dictionary, then newval will be stored at self.controlstate[statevar][subfield] if
subfield is not None
RETURNS
-------
retjson - dict
A dictionary response to the request. Gets converted to json by cherrypy. For PUT requests this
is kind of a dummy response, because jQuery assumes that a PUT that doesn't respond has failed.
All it is is a dictionary with one field, keyed to the input statevar, and valued to "True"
"""
if newval is not None:
newval = self.input_sanitize(newval)
if subfield is None: #Top level put
self.log.info(ansicolors.OKBLUE+'PUT request for statevar:%s old value: %s, new value %s, type: %s ' % (str(statevar),
str(self.controlstate[statevar]),str(newval),str(type(newval)))+ansicolors.ENDC)
if statevar in self.controlstate:
self.controlstate[statevar] = newval
else:
raise RuntimeError('PUT request with invalid controlstate addressee %s, data: %s' % (str(statevar),str(newval)))
else:
self.log.info(ansicolors.OKBLUE+'PUT request for statevar:%s, subfield:%s, old value %s, new value %s, type: %s ' % (str(statevar),
str(subfield),str(self.controlstate[statevar][subfield]),str(newval),str(type(newval)))+ansicolors.ENDC)
if hasattr(self.controlstate[statevar],'__setitem__'): #Must be some kind of dict like thing
self.controlstate[statevar][subfield] = newval
#Have to explicitly trigger changed since we aren't explicitly
#setting controlstate['drivers'] TODO find a better way
self.controlstate.trigger_changed(statevar,subfield=subfield)
elif hasattr(self.controlstate[statevar],subfield):
myattr = getattr(self.controlstate[statevar],subfield)
myattr = newval
self.log.warn("UNSAFE PUT Eval setattr(self.controlstate[%s],%s,%s)" % (statevar,str(subfield),str(newval)))
else:
raise RuntimeError('PUT request with invalid controlstate addressee %s.%s, data: %s' % (str(statevar),str(subfield),str(newval)))
#self.amwo.canvas.refresh()
#Return something to make jquery happy (a request that returns nothing has state "rejected")
return {statevar:"True"}
#def DELETE(self):
# cherrypy.session.pop('mystring', None)
class FakeCanvas(object):
"""
The FakeCanvas is the workhorse of the backend of AtModWeb. It's called a FakeCanvas because of the project this
was based off of, the AtModExplorer. This takes the place of the matplotlib canvas subclass, that
preformed a similar function in atmodexplorer.
It is a matplotlib canvas in the sense that it has one matplotlib figure at self.fig, and an axes at self.ax. But
otherwise it's just a convenient way of organizing the code and has nothing to do with Matplotlib.
It's important parameters are the PlotDataHandler instance as self.pdh, and the ControlState instance at self.controlstate
It's parent, the AtModWebObj that ties the application together is at self.atmo. It shares it's controlstate with the UiHandler,
which handles requests.
"""
def __init__(self,atmo):
self.atmo = atmo #"parent" atmodwebobject
self.fig = pp.figure(figsize=(6,4),dpi=200)
self.caption = 'caption' # Caption for the figure, created by make_caption
self.ax = self.fig.add_subplot(111)
self.textobj = None
def apply_lipstick(self):
"""Called on each replot, allows cosmetic adjustment"""
#self.fig.subplots_adjust(left=0.05,bottom=0.05,top=.95,right=.95)
fs = 9
w = .5
lw = .3
lp = 0
pd = .5
if self.atmo.syncher.pdh.plottype=='pcolor':
mpl.artist.setp(self.ax.get_xmajorticklabels(),size=fs,rotation=30)
mpl.artist.setp(self.ax.get_ymajorticklabels(),size=fs)
#mpl.artist.setp(self.atmo.syncher.pdh.cb.ax.get_xmajorticklabels(),size=fs,rotation=45)
#Label is a text object
self.ax.xaxis.label.set_fontsize(fs)
self.ax.yaxis.label.set_fontsize(fs)
self.ax.xaxis.labelpad=lp
self.ax.yaxis.labelpad=lp
#Adjust tick size
self.ax.xaxis.set_tick_params(width=w,pad=pd)
self.ax.yaxis.set_tick_params(width=w,pad=pd)
#Colorbar Ticks
self.atmo.syncher.pdh.cb.ax.xaxis.set_tick_params(width=w,pad=pd+.5)
self.atmo.syncher.pdh.cb.ax.xaxis.label.set_fontsize(fs)
self.atmo.syncher.pdh.cb.ax.yaxis.set_tick_params(width=w,pad=pd+.5)
self.atmo.syncher.pdh.cb.outline.set_linewidth(w)
self.ax.grid(True,linewidth=.1)
#Adjust axes border size
for axis in ['top','bottom','left','right']:
self.ax.spines[axis].set_linewidth(lw)
#self.pdh.cb.spines[axis].set_linewidth(lw)
self.ax.title.set_fontsize(fs)
self.ax.title.set_fontweight('bold')
elif self.atmo.syncher.pdh.plottype=='map':
#Colorbar Ticks
self.ax.title.set_fontsize(fs)
self.ax.title.set_fontweight('bold')
mpl.artist.setp(self.atmo.syncher.pdh.cb.ax.get_xmajorticklabels(),size=fs,rotation=35)
self.atmo.syncher.pdh.cb.ax.xaxis.set_tick_params(width=w,pad=pd)
self.atmo.syncher.pdh.cb.ax.yaxis.set_tick_params(width=w,pad=pd)
self.atmo.syncher.pdh.cb.ax.xaxis.label.set_fontsize(fs)
self.atmo.syncher.pdh.cb.outline.set_linewidth(w)
#Adjust axes border size
for axis in ['top','bottom','left','right']:
self.ax.spines[axis].set_linewidth(lw)
elif self.atmo.syncher.pdh.plottype=='line':
self.ax.title.set_fontsize(fs)
self.ax.title.set_fontweight('bold')
if self.atmo.syncher.pdh.ax.get_legend() is not None:
mpl.artist.setp(self.atmo.syncher.pdh.ax.get_legend().get_texts(),size=fs)
mpl.artist.setp(self.atmo.syncher.pdh.ax.get_xmajorticklabels(),size=fs,rotation=35)
#Adjust axes border size
for axis in ['top','bottom','left','right']:
self.ax.spines[axis].set_linewidth(lw)
def text(self,*args,**kwargs):
"""Displays text on the figure in figure coordinates (0,0) is bottom left, (1,1) is top right"""
if self.textobj is not None:
self.textobj.remove()
self.textobj = self.fig.text(*args,**kwargs)
class AtModWebObj(object):
"""
The AtModWebObj class is a representation of a single user session of AtModWeb.
It includes all of the pieces required for a user with the AtModWeb webpage open in their browser
to generate plots and interact with the data.
"""
def __init__(self,parent,userid=None):
self.log = logging.getLogger(self.__class__.__name__)
self.parent = parent
self.userid = userid
self.time_created = datetime.datetime.now()
self.last_accessed = datetime.datetime.now()
self.gif_frames = []
self.n_max_plots = 20
self.n_total_plots = 0
#Start up the rest of the application
self.uihandler = UiHandler(self)
self.controlstate = self.uihandler.controlstate
self.canvas = FakeCanvas(self)
self.syncher = Synchronizer(self.canvas,self.uihandler)
self.syncher.refresh(force_full_refresh=True)
plots = glob.glob(os.path.join(self.parent.rootdir,self.parent.imgreldir,'amwo_%s_*.png' % (str(self.userid))))
if len(plots) > self.n_max_plots:
os.remove(plots) #Clean up after yourself on restart
self.plots = [] #List of all plots in the img dir
self.replot()
def replot(self):
"""
The last step in the creation of a new plot to be displayed in the frontend.
This function, when called, writes the FakeCanvas' matplotlib figure
to a file on the disk, the name of which is dependent on the current unix time.
The URL for the file (as a relative path) is then recorded in the controlstate,
and returned to the caller (usually the UiHandler, which then relays it to the frontend in
a HTTP response).
Does several other incidental labeling tasks, and also returns a 'caption' for the
graphic.
"""
#self.canvas.refresh(force_full_refresh=True)
#Name file with unix epoch and userid
relfn = os.path.join(self.parent.imgreldir,'amwo_%s_%d.png' % (str(self.userid),
int(time.mktime(datetime.datetime.now().timetuple()))))
absfn = os.path.join(self.parent.rootdir,relfn)
if self.controlstate['gif_mode']:
self.gif_frames.append(absfn)
self.canvas.text(.01,.94,self.controlstate['descstr'],
fontsize=8,verticalalignment='top',color='blue',
bbox=dict(facecolor='white', edgecolor='blue', boxstyle='round',alpha=.8))
self.canvas.fig.savefig(absfn,dpi=200)
#self.canvas.fig.clf()
#self.canvas.ax = self.canvas.fig.add_subplot(111)
#Generate caption
cap = self.syncher.caption
self.controlstate['thiscaption']=cap
self.controlstate['thisplot']=relfn
#Deal with the plot and caption history
while len(self.plots) > self.n_max_plots:
tobedeleted = self.plots.pop(0)
os.remove(os.path.join(self.parent.rootdir,tobedeleted))
self.log.info("REMOVED old plot %s" % (tobedeleted))
self.log.info('Replotted to %s' % (absfn))
#Store the controlstate that was used to make the plot
self.controlstate() # store the last controlstate as states[-1]
self.n_total_plots += 1
return relfn, cap
def make_gif(self,gif='out.gif',delay=20,delete_imgs=False):
"""
Converts a list of pngs, in order, to frames for a gif
imgs - list of valid png or jpg paths
gif - gif file to write to
tempdir - directory where temporary frame pngs will be stored
delay - time in milliseconds each frame will display for
"""
imgs = self.gif_frames
tempdir = os.path.join(self.parent.rootdir,self.parent.imgreldir)
#if not is_ImageMagick_Installed():
# raise RuntimeError("ImageMagick appears to not be installed! If on Ubuntu, try sudo apt-get install imagemagick\n")
self.log.debug("Now beginning to copy files to temporary frames for gif. Temporary directory is %s...\n" % (tempdir))
paddingcode = '%.'+str(len(str(len(imgs))))+'d' #imagemagick needs zero padded frame numbering
imgext = os.path.splitext(imgs[0])[-1]
tmpimgs = []
for i,img in enumerate(imgs):
tmpimg = os.path.join(tempdir,"img2gif_frame_"+paddingcode % (i+1)+imgext)
shutil.copy(img,tmpimg)
tmpimgs.append(tmpimg)
imcall = "convert -delay %d -loop 0 %s %s" % (delay,os.path.join(tempdir,'img2gif_frame_*'+imgext),os.path.join(tempdir,gif))
self.log.debug("Calling ImageMagick to convert frames to gif...")
self.log.debug("Call is %s\n" % (imcall))
subprocess.check_call(imcall,shell=True)
self.log.debug("Cleaning up temp files...")
for tmpimg in tmpimgs:
os.remove(tmpimg)
self.gif_frames = []
return os.path.join(self.parent.imgreldir,gif)
#if open_gif:
# subprocess.check_call("xdg-open %s" % (gif))
def restart(self):
"""
A full 'hard' restart of the backend. Destroys and recreates all of
the instances of the AtModWeb components (FakeCanvas, UiHandler, Synchronizer)
for this single user. Called if UiHandler receives {posttype:'restart'} as a POST request
"""
#A full scale panic restart
#Just reinitalize all the things
self.log.warn("---RESTARTING THE BACKEND---")
#cherrypy.lib.sessions.expire()
self.uihandler = UiHandler(self)
self.controlstate = self.uihandler.controlstate
self.canvas = FakeCanvas(self)
self.syncher = Synchronizer(self.canvas,self.uihandler)
self.syncher.refresh(force_full_refresh=True)
self.replot()
class UiDispatcher(object):
"""
Makes sure that requests from a particular user's session
get dispatched to the proper instances of UiHandler and AtModWebObj
that contains their plot history. Helps make AtModWeb multiuser.
a.k.a dispatches requests to the approriate uihandler as
specified by the userid cookie.
"""
exposed = True
def __init__(self,muamwo):
#This needs to be done here (before the UiDispatcher is created) so that CherryPy
#knows that tools.auth is a thing before it's used in UiDispatcher
self.muamwo = muamwo
def get_uihandler(self):
return self.muamwo.get_user_amwo().uihandler
def get_amwo(self):
return self.muamwo.get_user_amwo()
@cherrypy.tools.accept(media='text/plain')
@cherrypy.tools.json_out()
def GET(self, statevar, subfield=None):
if statevar == 'username':
uid = self.muamwo.get_userid()
if uid is not None:
return {'username':self.muamwo._usernames[uid]}
else:
return {'username':''}
else:
return self.get_uihandler().GET(statevar=statevar,subfield=subfield)
@cherrypy.tools.json_out()
@cherrypy.tools.accept(media='text/plain')
def PUT(self,statevar=None,newval=None,subfield=None):
return self.get_uihandler().PUT(statevar=statevar,newval=newval,subfield=subfield)
@cherrypy.tools.accept(media='text/plain')
@cherrypy.tools.json_out()
def POST(self, posttype=None):
if 'authenticate_' in posttype:
un = posttype.split('authenticate_')[-1] # Anything after is username
userid = self.muamwo.newuserid()
self.muamwo._usernames[userid] = un
respcookie = cherrypy.response.cookie
respcookie['userid'] = userid
respcookie['userid']['max-age']=3630
return {posttype:'true'}
elif 'kill_' in posttype:
uid = posttype.split('kill_')[-1]
uid = str(uid)
if uid in self.muamwo._usernames:
self.muamwo.kill(userids=[uid])
else:
self.log.error("Recieved kill POST for nonexistant userid %d" % (uid))
elif 'logout' == 'posttype':
self.muamwo.logout()
return {'logout':'true'}
else: #Push on to appropriate handler
return self.get_uihandler().POST(posttype=posttype)
class MultiUserAtModWebObj(object):
""" Thin class to spin up AtModWebObj instances when a request comes in from a new user"""
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self._amwo = dict() # AtModWeb instances
self._usernames = dict()
#Application Location
self.uihandler = UiDispatcher(self)
self.rootdir = os.environ['ATMODWEB_ROOT_DIR']
self.imgreldir = 'www'
self.docreldir = 'docs'
@cherrypy.expose
def restart(self):
self.get_user_amwo().restart()
return """<html>Restarting done. </html>"""
@cherrypy.expose
def currentplot(self):
"""
A link to the currentplot with the proper response headers to trigger a download
"""
url = self.get_user_amwo().controlstate['thisplot']
return cherrypy.lib.static.serve_download(os.path.join(self.rootdir,url))
@cherrypy.expose
def console(self):
uid = self.get_userid()
#Organize the parameters for each AtModWeb as a table row
th = OrderedDict()
th['created'] = 'Time Created'
th['accessed'] = 'Last Accessed'
th['username'] = 'Username'
th['userid'] = 'User ID Number'
th['nplots'] = 'Number of Plots Created'
th['kill'] = 'Kill Session'
tr = copy.deepcopy(th) #OrderedDict for table rows
retstr = "<html><body>"
#Handle the kill buttons
retstr += """
<link href="www/atmodweb.css" rel="stylesheet" type="text/css">
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
<script>
$(document).ready(function(){
$('.killer').on("click", function (e) {
var id = $(e.target).attr('name');
console.log('Killing '+String(id))
var doingthething = $.ajax({url: "/uihandler",data: {"posttype":"kill_"+String(id)},type: "POST",dataType:"json",
success : function (json) {
$(e.target).text('KILLED')
console.log('Kill success')
}
})
});
});
</script>
"""
retstr += '<div ID="locationpanel" class="panel">'
retstr += '<h1>Admin Interface for AtModWeb</h1>'
retstr += '<p>If you are not the administrator of this application, you should not be here.</p>'
if uid is not None and self._usernames[uid].lower() == 'liam':
retstr += "<h1> Currently running amwo instances: </h1>"
retstr += "<table>"
#Add headers to the table
retstr += "<tr>"
for field in th:
retstr += "<th>%s</th>" % (str(th[field]))
retstr += "</tr>\n"
for key in self._amwo:
tr['username'] = str(self._usernames[key]) if key in self._usernames else 'NO-USERNAME'
tr['created'] = self._amwo[key].time_created.strftime('%c')
tr['accessed'] = self._amwo[key].last_accessed.strftime("%c")
tr['userid'] = '<strong>%s</strong>' % str(key) # Make the username stand out
tr['nplots'] = str(self._amwo[key].n_total_plots)
tr['kill'] = """<button ID='kill_%s' class='killer' name='%s' title="Kill this user's session">KILL</button>""" % (key,
key)
#Add a row to the table
retstr += "<tr>"
for field in th:
retstr += "<td class='%s'>%s</td>" % (str(key),str(tr[field]))
retstr += "</tr>\n"
retstr += '</table>'
retstr += '</div>'
retstr += "</body></html>"
return retstr
@cherrypy.expose
def data(self):
cherrypy.response.headers['Content-Type']= 'text/csv'
uid = self.get_userid()
data,header = self.get_user_amwo().syncher.data_as_csv()
return header+'\n'+data
def kill(self,userids=None):
"""Kill all specified instance or if userids is not kills any instances that haven't been touched in an hour"""
if userids is None:
userids = self._amwo.keys()
for userid in userids:
if (datetime.datetime.now() - self._amwo[userid].last_accessed).total_seconds() >= 3600:
self.log.warn('Session is too old and will be killed for %s: uid %d' % (self._usernames[userid],userid))
del(self._amwo[userid])
del(self._usernames[userid])
gc.collect()
else:
for userid in userids:
self.log.warn('Forced to end session for %s: uid %s' % (self._usernames[userid],str(userid)))
del(self._amwo[userid])
del(self._usernames[userid])
gc.collect()
#Authorization tool
#This is called whenever a request comes in via a CherryPy Tool
def check_auth(self,*args, **kwargs):
"""A tool that looks for a userid cookie, and makes sure that the cookie has an entry in the _usernames"""
reqcookie = cherrypy.request.cookie
if 'userid' not in reqcookie:
#No userid set
userid = self.newuserid() #Create a new userid and assign it to the user
self._usernames[userid] = '--pending--'
respcookie = cherrypy.response.cookie
respcookie['userid'] = userid
respcookie['userid']['max-age']=3630
raise cherrypy.HTTPRedirect("/login")
else:
#No username registered to userid
userid = reqcookie['userid'].value
if userid not in self._usernames:
self._usernames[userid] = '--pending--'
raise cherrypy.HTTPRedirect("/login")
def logout(self):
uid = self.muamwo.get_userid()
del(self._usernames[uid])
del(self._amwo[uid])
respcookie = cherrypy.response.cookie
respcookie['userid']['max-age']=0
respcookie['userid']['expires']=time.strftime("%a, %d-%b-%Y %T GMT", time.gmtime(time.time()))
gc.collect()
def newuserid(self):
return str(random.randint(0,2**31))
def get_userid(self):
reqcookie = cherrypy.request.cookie
#Safety checks
if 'userid' not in reqcookie:
return None
userid = reqcookie['userid'].value
if userid not in self._usernames or self._usernames[userid]=='--pending--':
return None
#self.log.info("Request sent to AMWO with userid cookie %s, method %s" % (str(userid),str(cherrypy.request.method)))
if userid not in self._amwo:
self._amwo[userid] = AtModWebObj(parent=self,userid=userid)
self.log.info("Spun up new AMWO instance with userid %s, there are now %d instances running" % (str(userid),len(self._amwo.keys())))
#self.log.debug("Username for id %s is %s" % (str(userid),str(self._usernames[userid])))
return userid
def get_user_amwo(self):
userid = self.get_userid()
if userid is not None:
#Update the last accessed time
self._amwo[userid].last_accessed = datetime.datetime.now()
return self._amwo[userid]
else:
raise cherrypy.HTTPRedirect('/login')
if __name__ == '__main__':
webapp = MultiUserAtModWebObj()
cherrypy.tools.auth = cherrypy.Tool('before_handler',webapp.check_auth)
conf = {
'/': {
'tools.sessions.on': True,
#'tools.sessions.storage_type':"memcached",
'tools.sessions.locking':'implicit',
'tools.auth.on': True
},
'/index': {
'tools.staticfile.on':True,
'tools.staticfile.filename': os.path.join(os.path.abspath(webapp.rootdir),'www','atmodweb.html')
},
'/login': {
'tools.staticfile.on':True,
'tools.staticfile.filename': os.path.join(os.path.abspath(webapp.rootdir),'www','login.html')
},
'/uihandler': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
},
'/www': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.path.abspath(webapp.rootdir),'www'),
'tools.expires.on' : True,
'tools.expires.secs' : 3600
},
'/docs': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.path.abspath(webapp.rootdir),'doc','build'),
'tools.expires.on' : True,
'tools.expires.secs' : 3600
},
'/favicon.ico': {
'tools.staticfile.on':True,
'tools.staticfile.filename':os.path.join(os.path.abspath(webapp.rootdir),"www","favicon.ico")
}
}
#Optional password protection by setting some environent variables
if 'CHERRYPY_USER' in os.environ and 'CHERRYPY_PWD' in os.environ:
USERS = {os.environ['CHERRYPY_USER']:os.environ['CHERRYPY_PWD']}
conf['/']['tools.auth_digest.on']=True
conf['/']['tools.auth_digest.realm']='localhost'
conf['/']['tools.auth_digest.get_ha1']=auth_digest.get_ha1_dict_plain(USERS)
conf['/']['tools.auth_digest.key']='b565d27146791cfc'
cherrypy.config.update({'server.socket_host':os.getenv('CHERRYPY_IP'),'server.socket_port': 8080})
cherrypy.config.update({'log.screen':False})
cherrypy.log.screen = False
cherrypy.log.access_log.propagate = False
cherrypy.tree.mount(webapp, '/',conf)
cherrypy.engine.start()
cherrypy.engine.block()
| gpl-3.0 |
iut-ibk/DynaMind-Sewer | scripts/Sewer/clustering.py | 1 | 2985 | # -*- coding: utf-8 -*-
"""
@file
@author Chrisitan Urich <[email protected]>
@version 1.0
@section LICENSE
This file is part of DynaMind
Copyright (C) 2012 Christian Urich
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from pydynamind import *
from pydmtoolbox import *
import networkx
import numpy, matplotlib
from scipy.cluster import hierarchy
from scipy.spatial import distance
class Clustering(Module):
def __init__(self):
Module.__init__(self)
self.conduits = View("CONDUIT", EDGE, READ)
viewvector = []
viewvector.append(self.conduits)
self.addData("Sewer", viewvector)
def run(self):
try:
g = networkx.Graph()
sewer = self.getData("Sewer")
CostsTotal = 0
LengthTot = 0
names = sewer.getNamesOfComponentsInView(self.conduits)
pointnamelist = []
for nc in names:
c = sewer.getEdge(nc)
startNode = c.getStartpointName()
endNode = c.getEndpointName()
if startNode not in pointnamelist:
pointnamelist.append(startNode)
if endNode not in pointnamelist:
pointnamelist.append(endNode)
g.add_edge(pointnamelist.index(startNode), pointnamelist.index(endNode))
path_length=networkx.all_pairs_shortest_path_length(g)
n = len(g.nodes())
distances=numpy.zeros((n,n))
for u,p in path_length.iteritems():
for v,d in p.iteritems():
distances[int(u)-1][int(v)-1] = d
sd = distance.squareform(distances)
hier = hierarchy.average(sd)
hierarchy.dendrogram(hier)
matplotlib.pylab.savefig("tree.png",format="png")
partition = community.best_partition(g)
print partition
for i in set(partition.values()):
print "Community", i
members = list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == i]
print members
except Exception, e:
print e
print "Unexpected error:"
| gpl-2.0 |
udp3f/gemini | docs/conf.py | 4 | 8633 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# from mpld3
def get_version():
"""Get the version info from the mpld3 package without importing it"""
import ast
with open(os.path.join(os.path.abspath('../'), "gemini", "version.py"), "r") as init_file:
module = ast.parse(init_file.read())
version = (ast.literal_eval(node.value) for node in ast.walk(module)
if isinstance(node, ast.Assign)
and node.targets[0].id == "__version__")
try:
return next(version)
except StopIteration:
raise ValueError("version could not be located")
version = get_version()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gemini'
copyright = u'2012,2013,2014,2015'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
print version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'gemini.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'gemini.png'
html_style = 'labibi.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebar-intro.html', 'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gemini-docs'
# Google analytics
# googleanalytics_id = "UA-24167610-15"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gemini.tex', u'gemini Documentation', u'Quinlan lab @ UVa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gemini', u'gemini Documentation', [u'UVa'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['numpy', 'matplotlib', 'matplotlib.pyplot',
'inheritance',
'matplotlib.sphinxext', 'matplotlib.sphinxext.plot_directive']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| mit |
gojira/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 46 | 6682 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
UN-DESA-Modelling/Electricity_Consumption_Surveys | ipc_microsim_tool/ipc_microsim_tool.py | 1 | 32419 | # -*- coding: utf-8 -*-
"""
------------------------
IPC microsimulation tool
for SDG-based planning
------------------------
Version 0
January 2016
Written by Rafael Guerreiro Osorio
Instituto de Pesquisa Econômica Aplicada - www.ipea.gov.br
International Policy Centre for Inclusive Growth - www.ipc-undp.org
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import patsy
import random
import statsmodels.formula.api as smf
class IPCmicrosimTool(object):
"""
#TODO: docstring
"""
class population(object):
"""
Population projection object
On this version, the population projection comes from
United Nations
Department of Economic and Social Affairs
Population Division (2015)
World Population Prospects: The 2015 Revision, DVD Edition.
(downloaded November 2015)
This was imported by read_WPP.py to two CSV files,
one with past estimates, and other with the projections
WPP-ESTIMATES-1950-2015.tab.txt
WPP-PROJECTIONS-2015-2100.tab.txt
"""
def __init__(self):
"""
projections is a pandas dataframe
indexed by year, with a column named year containing
years; second column is the total population, next
columns are projections for sex, age, regions, to
make simulations by groups (in the future...)
"""
self.projection = None
self.country = None
self.description = None
def get_WPP_countries(self, begins=None):
"""
get a list of country or region names
as in the WPP dataset
initial - first letters
"""
estimates = pd.read_csv('WPP-ESTIMATES-1950-2015.tab.txt',
sep='\t', index_col=None, na_values='')
countries = estimates.geoname.values
if not begins is None:
begins = begins.capitalize()
countries = [country for country in countries if
country[0:len(begins)] == begins]
return countries
def get_WPP_projection(self, country, variant):
"""
country or region name as in the WPP dataset
variants available in the WPP dataset:
0 'Low variant'
1 'Medium variant'
2 'High variant'
3 'Constant-fertility'
4 'Instant-replacement'
5 'Zero-migration'
6 'Constant-mortality'
7 'No change'
"""
variants = ('Low variant' , 'Medium variant',
'High variant' , 'Constant-fertility',
'Instant-replacement', 'Zero-migration',
'Constant-mortality', 'No change')
try:
int(variant)
variant = variants[variant]
except:
if str(variant).isalnum():
raise IndexError(variant)
if not variant in variants:
raise ValueError(variant)
estimates = pd.read_csv('WPP-ESTIMATES-1950-2015.tab.txt',
sep='\t', index_col=None, na_values='')
try:
res1 = estimates[estimates.geoname == country]
except:
raise ValueError(country)
self.country = country
self.description = 'WPP 2015 - {}'.format(variant)
projections = pd.read_csv('WPP-PROJECTIONS-2015-2100.tab.txt',
sep='\t', index_col=None, na_values='')
res2 = projections[projections.geoname == country] \
[projections.variant == variant]
del(estimates, projections)
todrop = ['index', 'variant', 'geoname', 'geocode']
# TODO: get period from data
res1.drop(todrop, axis=1, inplace=True)
res1 = res1.T[:-1]
res1.columns = [country]
res1.index = range(1950, 2015)
res2.drop(todrop, axis=1, inplace=True)
res2 = res2.T
res2.columns = [country]
res2.index = range(2015, 2101)
res = pd.concat((res1, res2))
self.projection = res
return '{} - {}'.format(country, variant)
class resource_access(object):
"""
at the moment this is very basic
"""
def __init__(self):
"""
RAM is a pandas dataframe
indexed by year, with a column named year containing
years; second column is the access rate, next
columns are projections for sex, age, regions, to
make simulations by groups (in the future...)
"""
self.RAM = None
self.period = None
self.description = None
class microsim(object):
"""
Microsimulation object
"""
def __init__(self, dataframe):
if len(dataframe) == 0 and not isinstance(dataframe, pd.DataFrame):
raise Exception("Needs a populated pandas dataframe")
self.dataset = dataframe[:]
self.results = {}
self.seedvars = list(dataframe.columns)
self.cursim = None
def add_results(self, data, name='newvar'):
"""
add results generated with microsim methods
to the results dic - such as:
pov = ms.poverty('inc', 'wgt', [1.9, 3.1, 5, 10])
ms.add_results(pov)
if result is a Series a name is needed, otherwise
will create column with name 'newvar' and overwrite
if already a column thus named
"""
if isinstance(data, pd.Series):
self.results[self.cursim]['dataset'][name] = data
elif isinstance(data, pd.DataFrame):
self.results[self.cursim]['dataset'] = pd.concat(
[self.results[self.cursim]['dataset'], data], axis=1)
else:
raise TypeError(data)
def elast_calc(self, key, Y, X, P, stub='', parts=100):
"""
Add elasticities using log-log quantile regressions
number of elasticities will be that of hypothetical delimiters
in parts i.e parts-1
key - household key
Y - dependent variable - resource consumption
X - independent variable - income
P - household population weights
stub - sufix to name variables containing quantiles
and elasticities
parts - number of parts
"""
dt = self.dataset
quantstub = 'quant' + stub
elaststub = 'elast' + stub
print '\nElasticity calculator started - please be patient'
# take the logs of Y and X
dt['__lnY'] = np.log(dt[Y])
dt['__lnX'] = np.log(dt[X])
# log of 0 is -infinite, replace with missing (NaN)
dt['__lnY'][dt[Y] == 0] = np.NaN
dt['__lnX'][dt[X] == 0] = np.NaN
# rescale and round weights to inform replication
dt['__' + P] = dt[P]/dt[P].min()
dt['__rdwgt'] = dt['__' + P].round()
# define quantiles based on parts and mark
dt.sort(Y, inplace=True)
dt[quantstub] = (dt['__' + P].cumsum() /
dt['__' + P].sum() *
parts).astype(int) / float(parts)
dt.sort(key, inplace=True)
# the quantile of the regression, can't be 0 or 1
# unique() is sorted as dt, get the smallest non zero quantile
# and the larger < 1
quantiles = dt[quantstub].unique()
quantiles.sort()
quantiles = quantiles[1:-1]
dt[quantstub][dt[quantstub] == 0] = quantiles[0]
dt[quantstub][dt[quantstub] == 1] = quantiles[-1]
# dataframe with replications
print 'Replicating observations, {} to {}...'.format(
dt['__rdwgt'].count(), int(dt['__rdwgt'].sum()))
lnY, lnX = pd.Series(), pd.Series()
for i in xrange(len(dt)):
lnY = lnY.append(pd.Series((dt['__lnY'][i],) *
int(dt['__rdwgt'][i])))
lnX = lnX.append(pd.Series((dt['__lnX'][i],) *
int(dt['__rdwgt'][i])))
estdt = pd.DataFrame()
estdt['lnY'] = lnY
estdt['lnX'] = lnX
del lnY, lnX
# calculate elasticities
print 'Fitting models...'
model = smf.quantreg('lnY ~ lnX', estdt)
elastseries = ()
#elasterrors = ()
print 'Quantile\telasticity\tse_elast\tintercept\tse_intercept'
for quantile in quantiles:
elast = model.fit(quantile)
elastseries += (elast.params[1],)
print '{}\t{:8.6f}\t{:8.6f}\t{:8.6f}\t{:8.6f}'.format(
quantile, elast.params[1], elast.bse[1], elast.params[0],
elast.bse[0],)
elastdt = pd.DataFrame()
elastdt[quantstub] = quantiles
elastdt[elaststub] = elastseries
# add elasticities and clean dataset
todrop = [var for var in dt.keys() if '__' in var]
self.dataset = pd.merge(dt, elastdt, on=quantstub)
self.dataset.sort(key, inplace=True)
self.dataset.reset_index(drop=True, inplace=True)
self.dataset.drop(todrop, axis=1, inplace=True)
self.seedvars += [quantstub, elaststub]
def __reset__(self):
"""
keep the seed variables and drop all others from the dataset
"""
todrop = [col for col in list(self.dataset.columns) if
col not in self.seedvars]
self.dataset.drop(todrop, axis=1, inplace=True)
def simulate(self, name, period, X, Y, P, key):
"""
name - a name for the simulation
period - tuple base year, end year eg. (2010,2030)
X - income tuple (variable name, stub,
'random'/'order', growth object)
Y - [(resourcevar, stub, elasticityvar)]
P - weight variable
"""
# resets the dataset, results of previous simulations will
# remain in results
self.__reset__()
self.cursim = name
if name in self.results.keys():
rettext = 'Simulation {} results overwritten'.format(name)
del(self.results[name])
else:
rettext = 'Simulation {} results written'.format(name)
self.results[name] = {'name': name}
self.results[name]['period'] = period
self.results[name]['income'] = (X[0], X[1])
self.results[name]['growth'] = X[3]
for y in range(len(Y)):
self.results[name]['resvar{}'.format(y)] = (Y[y][0], Y[y][1])
self.results[name]['reselast{}'.format(y)] = Y[y][2]
self.results[name]['dataset'] = pd.DataFrame({'year': range(
period[0], period[1] + 1)}, range(period[0], period[1] + 1))
# first year of the period is the base year
# a duplicate is generated as stubbaseyear
dt = self.dataset
dt[X[1] + str(period[0])] = dt[X[0]]
for y in Y:
dt[y[1] + str(period[0])] = dt[y[0]]
# a list to register the growth pattern
# 'none' for base year
grwtpatt = ['none']
# simulations begin
order = 0
for year in xrange(period[0] + 1, period[1] + 1):
# previous income distribution - base year
prvinc = X[1] + str(year - 1)
# sort by previous income distribution
# repeating allows income mobility
sorted_dt = dt.sort(prvinc)
# partition and tag cases by income quantiles
# each part will receive a growth rate
# TODO: make sure there is no variable named as growth.key
# in the microsim dataset
sorted_dt[X[3].key] = (sorted_dt[P].cumsum() /
sorted_dt[P].sum() *
X[3].parts).astype(int)
sorted_dt[X[3].key][sorted_dt[X[3].key]
== X[3].parts] = X[3].parts - 1
dt[X[3].key] = sorted_dt[X[3].key]
del sorted_dt
# get and distribute growth rates
if X[2] == 'random':
# choose a column from growth.dataset, excluding
# key from choice
grwtrates = random.choice([col for col in
X[3].dataset.columns if
col != X[3].key])
elif X[2] == 'order':
# choose a column as they appear on the dataset
# left to right after key; if there are less columns
# than periods, start over
orlst = [col for col in X[3].dataset.columns if
col != X[3].key]
grwtrates = orlst[order]
order += 1
if order == len(orlst):
order = 0
elif X[2] in X[3].dataset.columns:
# a column name was passed - this specific
# single pattern will be repeated - same as
# choosing ordered with just one column
grwtrates = X[2]
else:
raise ValueError(X[2])
# register the name of the growth pattern (column name)
# this goes to results.dataset
grwtpatt += [grwtrates]
# prepare a dataset with the growth pattern and the key
# and merge it with the simulation dataset distributing
# the growth rates by income quantiles
tomerge = X[3].dataset[[X[3].key, grwtrates]]
self.dataset = pd.merge(dt, tomerge, on=X[3].key)
self.dataset.sort(key, inplace=True)
self.dataset.reset_index(drop=True, inplace=True)
dt = self.dataset
# THIS IS IMPORTANT: the rate should be Xt1/Xt0-1
dt[grwtrates] = dt[grwtrates] + 1.0
# new income variable
dt[X[1] + str(year)] = dt[prvinc] * dt[grwtrates]
# new resource variables
for y in Y:
dt[y[1] + str(year)] = dt[y[1] + str(year - 1)] * \
(dt[y[2]] * (dt[grwtrates] - 1.0) + 1.0)
self.dataset.drop([X[3].key, grwtrates], axis=1, inplace=True)
# simulation is over, besides information about the
# simulation, means are stored in results, and also
# the name of the growth pattern for income
self.results[name]['dataset']['grwtpatt'] = grwtpatt
col = self.results[name]['income'][1]
self.results[name]['dataset']['mean_{}'.format(col)] = self.mean(
X[1], P)
for y in range(len(Y)):
col = self.results[name]['resvar{}'.format(y)][1]
self.results[name]['dataset']['mean_{}'.format(col)] = \
self.mean(Y[y][1], P, nozero=True)
return rettext
def totaldemand(self, stub, pop, ram, correct=1, unit=1e6):
period = self.results[self.cursim]['period']
totdem = pd.Series(0.0, range(period[0], period[1] + 1))
resmean = 'mean_{}'.format(stub)
# TODO: improve this
# below it is assuming data average is for months
# and annualizes; improve indices - why pop is string
cmp1 = pop.projection.icol(0).loc[period[0]:period[1]]
cmp2 = ram.RAM.icol(0)
cmp3 = self.results[self.cursim]['dataset'][resmean]
totdem = (cmp1 * cmp2 * cmp3 * 12 * correct) / unit
return totdem
def mean(self, stub, weight, nozero=False):
if nozero:
rescale = True
else:
rescale = False
period = self.results[self.cursim]['period']
mean = pd.Series(0.0, range(period[0], period[1] + 1))
for year in xrange(period[0], period[1] + 1):
mean[year] = ((self.dataset['{}{}'.format(stub, year)] *
self.dataset[weight]).sum() /
self.dataset[weight].sum())
if rescale:
zeropop = self.dataset[weight][self.dataset[
'{}{}'.format(stub, year)] == 0].sum()
poptot = self.dataset[weight].sum() - zeropop
mean[year] = (mean[year] * self.dataset[weight].sum() /
poptot)
return mean
def variance(self, stub, weight):
period = self.results[self.cursim]['period']
means = self.mean(stub, weight)
variance = pd.Series(0.0, range(period[0], period[1] + 1))
for year in xrange(period[0], period[1] + 1):
variance[year] = (((self.dataset['{}{}'.format(stub, year)] -
means[year]) ** 2
* self.dataset[weight]).sum() /
(self.dataset[weight].sum() - 1))
return variance
def inequality_ge(self, stub, weight, theta=1.0, nozero=False):
period = self.results[self.cursim]['period']
if theta <= 0 or theta == 1:
nozero = True
means = self.mean(stub, weight, nozero=nozero)
ge = pd.Series(0.0, range(period[0], period[1] + 1))
for year in xrange(period[0], period[1] + 1):
if nozero:
zeropop = self.dataset[weight][self.dataset[
'{}{}'.format(stub, year)] == 0].sum()
poptot = self.dataset[weight].sum() - zeropop
else:
poptot = self.dataset[weight].sum()
self.dataset['__ratio'] = (
self.dataset['{}{}'.format(stub, year)] / means[year])
if float(theta) == 0.0:
self.dataset['__ratio'] = np.log(
self.dataset['__ratio'] ** -1)
self.dataset['__ratio'][self.dataset['__ratio']
== np.inf] = 0
ge[year] = (self.dataset['__ratio'] *
self.dataset[weight]).sum() / poptot
elif float(theta) == 1.0:
self.dataset['__ratio'] = self.dataset['__ratio'] * np.log(
self.dataset['__ratio'])
self.dataset['__ratio'][self.dataset['__ratio']
== np.inf] = 0
ge[year] = (self.dataset['__ratio'] *
self.dataset[weight]).sum() / poptot
else:
self.dataset['__ratio'] = self.dataset['__ratio'] ** theta
self.dataset['__ratio'][self.dataset['__ratio']
== np.inf] = 0
ge[year] = (((self.dataset['__ratio'] *
self.dataset[weight]).sum() / poptot - 1) /
(theta ** 2 - theta))
if 0 in self.dataset['{}{}'.format(stub, year)]:
print '\nThere were zeroes in {}xxxx'.format(stub)
if nozero:
print 'Ge({}) did not consider those obs.'.format(theta)
return ge
def inequality_gini(self, stub, weight):
"""
stub - variable stub existent in microsim.dataset
weight - weight variable in microsim.dataset
"""
period = self.results[self.cursim]['period']
gini = pd.Series(0.0, range(period[0], period[1] + 1))
for year in xrange(period[0], period[1] + 1):
"""
the Gini index is calculated as twice the area between
the Lorenz Curve and the diagonal (equality line)
"""
curvar = '{}{}'.format(stub, year)
self.dataset.sort(curvar, inplace=True)
self.dataset['__cumpop'] = (self.dataset[weight].cumsum() /
self.dataset[weight].sum())
self.dataset['__cumren'] = ((self.dataset[curvar] *
self.dataset[weight]).cumsum() /
(self.dataset[curvar] *
self.dataset[weight]).sum())
self.dataset['__polyarea'] = (self.dataset['__cumpop'] -
self.dataset['__cumpop'].shift(1)) * (
self.dataset['__cumren'] +
self.dataset['__cumren']).shift(1)
gini[year] = 1 - self.dataset['__polyarea'].sum()
todrop = [c for c in self.dataset.columns if '__' in c]
self.dataset.drop(todrop, axis=1, inplace=True)
return gini
def poverty(self, stub, weight, plines):
"""
stub - variable stub existent in microsim.dataset
weight - weight variable in microsim.dataset
plines - list or tuple with poverty lines
"""
period = self.results[self.cursim]['period']
perang = range(period[0], period[1] + 1)
povind = pd.DataFrame({'__temp': pd.Series(0.0, perang)})
for pline in plines:
povind['p0({})'.format(pline)] = 0.0
povind['p1({})'.format(pline)] = 0.0
povind['p2({})'.format(pline)] = 0.0
povind['pgap({})'.format(pline)] = 0.0
povind['pge2({})'.format(pline)] = 0.0
povind.drop('__temp', axis=1, inplace=True)
for year in xrange(period[0], period[1] + 1):
curvar = '{}{}'.format(stub, year)
dt = self.dataset
# process a list of poverty lines
for pline in plines:
povind['p0({})'.format(pline)][year] = (
((dt[curvar] < pline) *
dt[weight]).sum() /
dt[weight].sum())
povind['pgap({})'.format(pline)][year] = (
(((dt[curvar] < pline) *
(pline - dt[curvar]) / pline) *
dt[weight]).sum() /
dt[weight][dt[curvar] < pline].sum())
povind['p1({})'.format(pline)][year] = (
povind['p0({})'.format(pline)][year] *
povind['pgap({})'.format(pline)][year])
mnge2 = (((dt[curvar] < pline) * dt[curvar] *
dt[weight]).sum() /
dt[weight][dt[curvar] < pline].sum())
stge2 = (((dt[curvar] < pline) * (dt[curvar] -
mnge2) ** 2 * dt[weight]).sum() /
dt[weight][dt[curvar] < pline].sum()) ** 0.5
ge2 = (stge2/mnge2) ** 2 / 2
povind['pge2({})'.format(pline)][year] = ge2
povind['p2({})'.format(pline)][year] = (
povind['p0({})'.format(pline)][year] *
(povind['pgap({})'.format(pline)][year] ** 2 +
(1 - povind['pgap({})'.format(pline)][year]) ** 2 *
ge2 * 2))
return povind
def visualize(self, data, xcol='year', cols='all', subplotcols=2):
if xcol not in data.columns:
raise Exception('{} not in dataset'.format(xcol))
if cols == 'all':
toplot = [col for col in data.columns if col != xcol
and type(data[col].iloc[0]) != str]
elif isinstance(cols, tuple) or isinstance(cols, list):
for col in cols:
if col not in data.columns:
raise Exception('{} not in dataset'.format(col))
elif type(data[col].iloc[0]) == str:
raise Exception('{} is a string variable'.format(col))
toplot = cols
elif isinstance(cols, str):
if cols not in data.columns:
raise Exception('{} not in dataset'.format(cols))
elif type(data[cols].iloc[0]) == str:
raise Exception('{} is a string variable'.format(cols))
toplot = [cols]
else:
raise TypeError()
plt.figure(1)
plt.clf()
if len(toplot) > subplotcols:
gridrows = len(toplot) // subplotcols
if len(toplot) % subplotcols > 0:
gridrows += 1
gridcols = subplotcols
elif len(toplot) < subplotcols:
gridrows = 1
gridcols = len(toplot)
else:
gridrows = 1
gridcols = subplotcols
for nplot in range(len(toplot)):
plot = toplot[nplot]
plt.subplot(gridrows, gridcols, nplot)
#plt.subplot.axes.get_xaxis().set_ticks([])
#plt.subplot.axes.get_yaxis().set_ticks([])
plt.title(plot)
plt.locator_params(axis='both', tight=True, nbins=7)
plt.plot(data[xcol],
data[plot])
plt.tight_layout(pad=1.5)
plt.show()
class growth(object):
"""
Growt object
dataset
key column name
# of parts
this is the growth rate for every partition
of the income distribution
simulate will partition the income distribution of
microsim.dataset based on the growth object # of parts
and the identifier is key column name
the dataset has the growth rates, if the dataset has
more than one growth rate, simulate will need an order
instruction (such as random.choice)
"""
def __init__(self, parts=100, key='key'):
self.dataset = pd.DataFrame({key: range(parts)})
self.parts = parts
self.key = key
self.__logn = 1
self.__pwrn = 1
self.__coln = 1
def add_columns(self, data, stub='newcol'):
"""
Will get columns from a pandas dataframe
same rules that apply to csv files
csv reads to pandas dataframe and calls add_columns
"""
if isinstance(data, pd.Series) or \
isinstance(data, tuple) or \
isinstance(data, list):
if not self.__invalid(data):
name =stub
while name in self.dataset.columns:
name = '{}{}'.format(stub, self.__logn)
self.__coln += 1
dataframe = pd.DataFrame({name: data})
else:
dataframe = data[:]
for col in dataframe.columns:
if self.__invalid(dataframe[col]):
raise TypeError('{} is invalid'.format(col))
if col in self.dataset.columns:
raise Exception('{} exists in dataset'.format(col))
self.dataset[col] = dataframe[col]
def add_log(self, stub='log',
average=0.01,
shift=0,
flip=False,
alpha=1):
"""
will add a log growth pattern with average = average
if flip the log function is mirrored and the
growth rate will correlate negatively with
income - FLIP for PRO-POOR GROWTH
"""
self.dataset['__x'] = self.dataset.key + alpha
if flip:
X = self.dataset['__x'][:]
X.sort(ascending=False)
X = X.reset_index()
self.dataset['__x'] = X['__x']
self.dataset['__lnx'] = np.log(self.dataset['__x'])
name = stub
while name in self.dataset.columns:
name = '{}{}'.format(stub, self.__logn)
self.__logn += 1
average = average * self.parts
self.dataset[name] = self.dataset['__lnx'] / \
self.dataset['__lnx'].sum() * average + shift
self.dataset.drop(['__x', '__lnx'], axis=1, inplace=True)
def add_power(self, stub='pwr',
power=0.2,
average=0.01,
shift=0,
flip=False,
alpha=1):
"""
will add a power growth pattern with average = average
if flip the power function is mirrored and the
growth rate will correlate negatively with
income - FLIP for PRO-POOR GROWTH
"""
self.dataset['__x'] = self.dataset.key + alpha
if flip:
X = self.dataset['__x'][:]
X.sort(ascending=False)
X = X.reset_index()
self.dataset['__x'] = X['__x']
self.dataset['__pwrx'] = self.dataset['__x'] ** float(power)
name = stub
while name in self.dataset.columns:
name = '{}{}'.format(stub, self.__pwrn)
self.__pwrn += 1
average = average * self.parts
self.dataset[name] = self.dataset['__pwrx'] / \
self.dataset['__pwrx'].sum() * average + shift
self.dataset.drop(['__x', '__pwrx'], axis=1, inplace=True)
def load_csv(self, csvfile, delimiter='\t'):
"""
reads to pandas dataframe and calls add_columns
csv file has to conform:
# lines should be equal to parts
column names in first row (valid for pandas)
growth rate columns only, in proportions, 0.1 = 10%
if one growth rate column per simulation year, first
column should be the growth rate from base year to
second year
"""
data = pd.read_csv(csvfile, sep=delimiter,
index_col=False, na_values='')
self.add_columns(data)
def __invalid(self, X):
if isinstance(X, pd.Series):
if tuple(X.index) != tuple(self.dataset.index):
raise Exception('Idxs differ - should be zero to self.parts')
return False
elif isinstance(X, tuple) or isinstance(X, list):
if len(X) != self.parts:
raise Exception('Sizes differ: len(X) != self.parts')
return False
else:
return True
# if main, runs the example
if __name__ == '__main__':
pass
| gpl-3.0 |
UNR-AERIAL/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
avicennax/jedi | tests/sin_test.py | 1 | 2751 | from __future__ import division
import jedi.jedi as jedi
from jedi.utils import plot, init_tools
import matplotlib.pylab as plt
import numpy as np
# Setting Seeds
seeds = np.random.uniform(0,10000,1).astype(int)
# sine-wave target
target = lambda t0: np.cos(2 * np.pi * t0/.5)
#Simulation parameters for FORCE
dt = .01 # time step
tmax = 10 # simulation length
tstart = 0
tstop = 5 # learning stop time
rho = 1.25 # spectral radius of J
N = 300 # size of stochastic pool
lr = 1.0 # learning rate
pE = .8 # percent excitatory
sparsity = (.1,1,1) # sparsity
# Noise matrix
noise_mat = np.array([np.random.normal(0,.3,N) for i in range(int(tmax/dt+2))])
errors = []
zs = []
wus = []
for seedling in seeds:
J, Wz, _, x0, u, w = init_tools.set_simulation_parameters(seedling, N, 1, pE=pE, p=sparsity, rho=rho)
# inp & z are dummy variables
def model(t0, x, params):
index = params['index']
tanh_x = params['tanh_x']
z = params['z']
noise = params['noise'][index]
return (-x + np.dot(J, tanh_x) + Wz*z + noise)/dt
x, t, z, _, wu,_ = jedi.force(target, model, lr, dt, tmax, tstart, tstop, x0, w, noise=noise_mat)
zs.append(z)
wus.append(wu)
error = np.abs(z-target(t))
errors.append(error)
errors = np.array(errors)
# Visualizing activities of first 20 neurons
T = 300
plt.figure(figsize=(12,4))
plt.subplot(211)
plt.title("Neuron Dynamics");
for i in range(10):
plt.plot(t[:T], x[:T, i]);
plt.subplot(212)
for i in range(10):
plt.plot(t[-T:], x[-T:, i]);
plt.xlim(t[-T], t[-1]);
plt.show()
## -- DFORCE -- ##
derrors = []
zs = []
wus = []
for seedling in seeds:
J, Wz, _, x0, u, w = init_tools.set_simulation_parameters(seedling, N, 1, pE=pE, p=sparsity, rho=rho)
def model(t0, x, params):
index = params['index']
tanh_x = params['tanh_x']
z = params['z']
noise = params['noise'][index]
return (-x + np.dot(J, tanh_x) + Wz*z + noise)/dt
x, t, z, _, wu,_ = jedi.dforce(jedi.step_decode, target, model, lr, dt, tmax, tstart, tstop, x0, w,
noise=noise_mat, pE=pE)
zs.append(z)
wus.append(wu)
derror = np.abs(z-target(t))
derrors.append(derror)
derrors = np.array(derrors)
# Visualizing activities of first 20 neurons
T = 300
plt.figure(figsize=(12,4))
plt.subplot(211)
plt.title("Neuron Dynamics");
for i in range(10):
plt.plot(t[:T], x[:T, i]);
plt.subplot(212)
for i in range(10):
plt.plot(t[-T:], x[-T:, i]);
plt.xlim(t[-T], t[-1]);
plt.show()
plt.figure(figsize=(12,4))
plot.cross_signal_error(errors, derrors, t, tstart, tstop,
title="FORCE vs SFORCE (Sin Wave))", burn_in=100)
plt.show() | mit |
jdvelasq/pytimeseries | pytimeseries/pytimeseries (copia).py | 1 | 15891 | """
pytimeseries
A library for big data time series forecasting
"""
import pandas
import scipy
import numpy
import matplotlib
import statsmodels
import sklearn
import tabulate
import operator
import pyts_functions
from tabulate import tabulate
from scipy import stats
from statsmodels import api
from sklearn import linear_model
from statsmodels import api
from sklearn import datasets
class pyforecast():
def __init__(self, model=None, ts=None):
self.model = model
self.ts = ts
"""Time series' and model's diagnostics"""
class series_viewer():
def __init__(self, ts=None):
self.ts = ts
def time_plot(self):
tp = self.ts.plot()
return tp
def ACF_plot(self):
acf = statsmodels.graphics.tsaplots.plot_acf(self.ts)
return acf
def PACF_plot(self):
pacf = statsmodels.graphics.tsaplots.plot_pacf(self.ts)
return pacf
def qq_plot(self):
qq_plot = statsmodels.graphics.gofplots.qqplot(self.ts)
return qq_plot
def density_plot(self):
den_plot = self.ts.plot(kind='kde')
return den_plot
""" Histogram of frequencies """
def histogram(self):
hist = self.ts.hist()
return hist
""" Jarque Bera normality test """
def normality(self):
jb = scipy.stats.jarque_bera(self.ts)
jb_value = jb[0]
p_value = jb[1]
return (tabulate([['Test statistics value', jb_value], ['p value', p_value]], headers = ['Jarque Bera normality test']))
class base_model():
def __init__(self, ts=None):
self.ts = ts
self.residuals = None
self.trending = None
self.seasonality = None
self.ts_seasonal = None
self.original = None
self.estimation = None
frq = pandas.infer_freq(self.ts.index)
intfrq = len(pandas.date_range(pandas.datetime(2017, 1, 1), pandas.datetime(2017, 12, 31), freq=frq))
self.intfrq = intfrq
def __repr__(self):
return self.ts.__repr__()
def specify(self, trans=None, trend=None, seasonal=None):
"""Return the transformed series
Args:
trans (log, log10, sqrt, cbrt, boxcox): Transformation to apply
trend (linear, cuadratic, cubic, diff1, diff2): Trend to apply
seasonal (poly2, diff): Seasonality to apply
>>> ts = pandas.Series.from_csv('champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = base_model(ts = ts). specify(trans = 'log').residuals
>>> transformed_ts
Month
1964-01-01 7.942718
1964-02-01 7.890583
1964-03-01 7.921173
1964-04-01 7.908755
1964-05-01 7.988204
1964-06-01 8.018296
1964-07-01 7.732808
1964-08-01 7.701652
1964-09-01 7.980024
1964-10-01 8.366603
1964-11-01 8.659387
1964-12-01 8.897272
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = base_model(ts = ts). specify(trans = 'log10').residuals
>>> transformed_ts
Month
1964-01-01 3.449478
1964-02-01 3.426836
1964-03-01 3.440122
1964-04-01 3.434729
1964-05-01 3.469233
1964-06-01 3.482302
1964-07-01 3.358316
1964-08-01 3.344785
1964-09-01 3.465680
1964-10-01 3.633569
1964-11-01 3.760724
1964-12-01 3.864036
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = base_model(ts = ts). specify(trans = 'sqrt').residuals
>>> transformed_ts
Month
1964-01-01 53.056574
1964-02-01 51.691392
1964-03-01 52.488094
1964-04-01 52.163205
1964-05-01 54.277067
1964-06-01 55.099909
1964-07-01 47.770284
1964-08-01 47.031904
1964-09-01 54.055527
1964-10-01 65.582010
1964-11-01 75.921012
1964-12-01 85.510233
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = base_model(ts = ts). specify(trans = 'cbrt').residuals
>>> transformed_ts
Month
1964-01-01 14.119722
1964-02-01 13.876464
1964-03-01 14.018683
1964-04-01 13.960775
1964-05-01 14.335436
1964-06-01 14.479956
1964-07-01 13.165536
1964-08-01 13.029519
1964-09-01 14.296402
1964-10-01 16.262594
1964-11-01 17.929767
1964-12-01 19.409398
Name: Perrin, dtype: float64
"""
""" Transform time series """
if (trans == 'log'):
ts_trans = self.ts.apply(numpy.log)
elif (trans == 'log10'):
ts_trans = self.ts.apply(numpy.log10)
elif (trans == 'sqrt'):
ts_trans = self.ts.apply(numpy.sqrt)
elif (trans == 'cbrt'):
ts_trans = self.ts.apply(numpy.cbrt)
elif (trans == 'boxcox'):
ts_trans = self.ts.apply(pyts_functions.ts_boxcox)
elif (trans == None):
ts_trans = self.ts
else:
message_trans = 'Invalid transformation value: ' + trans
raise ValueError(message_trans)
""" Removing trend """
if (trend == 'linear'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
y = ts_trans
model = sklearn.linear_model.LinearRegression()
model.fit(X, y)
trend = model.predict(X)
detrended = [y[i]-trend[i] for i in range(0, len(ts_trans))]
ts_trend = detrended
elif (trend == 'cuadratic'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
y = ts_trans
model = sklearn.preprocessing.PolynomialFeatures(degree=2)
X_ = model.fit_transform(X)
model = linear_model.LinearRegression()
fitting = model.fit(X_, y)
trend = fitting.predict(X_)
detrended = [y[i]-trend[i] for i in range(0, len(ts_trans))]
ts_trend = detrended
elif (trend == 'cubic'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
y = ts_trans
model = sklearn.preprocessing.PolynomialFeatures(degree=3)
X_ = model.fit_transform(X)
model = linear_model.LinearRegression()
fitting = model.fit(X_, y)
trend = fitting.predict(X_)
detrended = [y[i]-trend[i] for i in range(0, len(ts_trans))]
ts_trend = detrended
elif (trend == 'diff1'):
y = ts_trans
diff = list()
diff.append(y[0])
for i in range(1, len(y)):
value = y[i] - y[i-1]
diff.append(value)
trend = diff
detrended = diff
ts_trend = detrended
elif (trend == 'diff2'):
y = ts_trans
diff = list()
diff.append(y[0])
diff.append(y[1])
for i in range(2, len(y)):
value = y[i] - y[i - 2]
diff.append(value)
trend = diff
detrended = diff
ts_trend = detrended
elif (trend == None):
ts_trend = ts_trans
trend = [0 for i in range(0, len(ts_trans))]
else:
message_trend = 'Invalid trending value: ' + trend
raise ValueError(message_trend)
""" Removing seasonality """
if (seasonal == 'poly2'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
X = X%self.intfrq
y = ts_trend
model = sklearn.preprocessing.PolynomialFeatures(degree=2)
X_ = model.fit_transform(X)
model = linear_model.LinearRegression()
fitting = model.fit(X_, y)
seasonality = fitting.predict(X_)
deseasonal = [y[i]-seasonality[i] for i in range(0, len(ts_trans))]
ts_seasonal = deseasonal
elif (seasonal == 'diff'):
y = ts_trend
diff = list()
for j in range(self.intfrq):
diff.append(y[j])
for i in range(self.intfrq, len(y)):
value = y[i] - y[i - self.intfrq]
diff.append(value)
seasonality = diff
deseasonal = diff
ts_seasonal = deseasonal
elif (seasonal == None):
ts_seasonal = ts_trend
seasonality = [0 for i in range(0, len(ts_trend))]
else:
message_seasonal = 'Invalid seasonal value: ' + seasonal
raise ValueError(message_seasonal)
self.seasonality = seasonality
self.trending = trend
self.residuals = ts_seasonal
return self
""" Restore series to its original values """
def restore(self, trans=None, trend=None, seasonal=None):
if (seasonal == 'poly2'):
ts_deseasonal = [self.estimation[i] + self.seasonality[i] for i in range(len(self.estimation))]
elif (seasonal == 'diff'):
ts_deseasonal = list()
for j in range(self.intfrq):
ts_deseasonal.append(self.seasonality[j])
for i in range(self.intfrq,len(self.seasonality)):
value = self.seasonality[i] + ts_deseasonal[i-self.intfrq]
ts_deseasonal.append(value)
else:
ts_deseasonal = self.estimation
if (trend == 'linear' or trend == 'cuadratic' or trend == 'cubic'):
ts_detrend = [ts_deseasonal[i] + self.trending[i] for i in range(len(ts_deseasonal))]
elif (trend == 'diff1'):
ts_detrend = list()
ts_detrend.append(self.trending[0])
for i in range(1,len(self.trending)):
value = self.trending[i] + ts_detrend[i-1]
ts_detrend.append(value)
elif (trend == 'diff2'):
ts_detrend = list()
ts_detrend.append(self.trending[0])
ts_detrend.append(self.trending[1])
for i in range(2,len(self.trending)):
value = self.trending[i] + ts_detrend[i-2]
ts_detrend.append(value)
else:
ts_detrend = ts_deseasonal
if (trans == 'log'):
ts_detrans = numpy.exp(ts_detrend)
elif (trans == 'log10'):
ts_detrans = scipy.special.exp10(ts_detrend)
elif (trans == 'sqrt'):
ts_detrans = [ts_detrend[i]**2 for i in range(len(ts_detrend))]
elif (trans == 'cbrt'):
ts_detrans = [ts_detrend[i]**3 for i in range(len(ts_detrend))]
elif (trans == 'boxcox'):
lmbda = pyts_functions.ts_boxcox_lmbda(self.ts)
ts_detrans = scipy.special.inv_boxcox(ts_detrend, lmbda)
else:
ts_detrans = ts_detrend
self.original = ts_detrans
return self
def set_residuals(self, residuals):
self.residuals = series_viewer(residuals)
""" Residuals analysis """
def time_plot(self):
self.residuals.time_plot()
def ACF_plot(self):
self.residuals.ACF_plot()
def PACF_plot(self):
self.residuals.PACF_plot()
def qq_plot(self):
self.residuals.qq_plot()
def density_plot(self):
self.residuals.density_plot()
def histogram(self):
self.residuals.histogram()
def normality(self):
self.residuals.normality()
class AR_p(base_model):
def __init__(self, ts=None, p=None, trans=None, trend=None, seasonal=None):
self.p = p
self.ts = ts
self.trans = trans
self.trend = trend
self.seasonal = seasonal
#super().__init__()
self.X = None
def __repr__(self):
return self.ts.__repr__()
def estimate(self):
estimation = 0
X = base_model(self.ts).specify(trans = self.trans, trend = self.trend, seasonal = self.seasonal)
model = statsmodels.tsa.ar_model.AR(X.residuals)
model_fit = model.fit()
estimation = model_fit.predict()
X.estimation = estimation
X.restore(trans = self.trans, trend = self.trend, seasonal = self.seasonal)
super().set_residuals(X.residuals)
self.X = X
return self
class AR(base_model):
def __init__(self, ts=None, p=None):
self.p = p
self.ts = ts
self.phi0 = numpy.random.rand(1)
self.phi = numpy.random.rand(p)
def params2vector(self):
params = list()
params.append(self.phi0)
for i in range(len(self.phi)):
params.append(self.phi[i])
return params
def vector2params(self, vector):
self.phi0 = vector[0]
self.phi = vector[1:]
return self
def predict(self):
y = self.ts.values
prediction = list()
for i in range(len(y)):
if i <= self.p:
if i == 0:
prediction.append(self.phi0)
else:
y_last = y[0:i]
result = self.phi0 + numpy.dot(y_last, self.phi[0:i])
prediction.append(result[0])
else:
y_last = y[i-self.p:i]
result = self.phi0 + numpy.dot(y_last, self.phi)
prediction.append(result[0])
prediction = pandas.Series((v for v in prediction), index = self.ts.index)
return prediction
def calc_error(self):
y_estimated = self.predict()
y_real = self.ts
error = sklearn.metrics.mean_squared_error(y_real, y_estimated)
return error
def fit(self):
def f(x):
self.vector2params(x)
return self.calc_error()
x0 = self.params2vector()
optim_params = scipy.optimize.minimize(f, x0)
self.vector2params(vector = optim_params.x)
return self
def forecast(self, periods):
def forward(y):
y = y.values
lon = len(y)
if lon <= self.p:
y_last = y[0:lon]
result = self.phi0 + numpy.dot(y_last, self.phi[0:lon])
else:
y_last = y[lon-self.p:lon]
result = self.phi0 + numpy.dot(y_last, self.phi)
return result
def add_next_date(y, value):
next_date = pandas.date_range(y.index[-1], periods=2, freq=pandas.infer_freq(y.index))
next_ts = pandas.Series(value, index = next_date)
next_ts = next_ts.drop(y.index[-1])
ts_forecast = y.append(next_ts)
return ts_forecast
for i in range(periods):
if i == 0:
y = self.ts
value = forward(y)
value = forward(y)
y = add_next_date(y, value)
return y[-periods-1:]
#def filter_ts(ts, model_class):
# def __init__(self, cls):
# self.cls = cls
| mit |
davharris/bbs-forecasting | test/test_benchmarks.py | 1 | 1328 | """Tests of time-series prediction benchmarking functions"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from bbs_benchmarks import *
def test_benchmark_predictions():
time = [1, 2, 3]
value = [4, 5, 6]
preds = benchmark_predictions(time, value, lag=1)
assert preds == [6, 5, 4.5]
def test_filter_timeseries_contiguous():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2], 'date': [1, 2, 3, 4, 1, 2]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigtrue():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigfalse():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3, contiguous=False)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]}))
| mit |
roxyboy/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
poryfly/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
zycdragonball/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 47 | 5877 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op)
return _model_fn
| apache-2.0 |
catalla/AYDABTU | visualization/legacy/model.py | 1 | 4094 | from collections import defaultdict
import urllib
from operator import itemgetter
def parseData(fname):
for l in urllib.urlopen(fname):
yield eval(l)
def feature(elem):
return [elem['date'],
elem['primary_type'],
elem['longitude'],
elem['latitude'],
elem['arrest'],
elem['location_description']]
def buildData(filename):
print "Exec buildData\n"
return [feature(d) for d in parseData(filename)]
def aggragateData(inData,s,e):
print "Exec AggragateData\n"
timeList = []
for elem in inData:
year = (elem[0])[0:4]
month = (elem[0])[5:7]
day = (elem[0])[8:10]
hour = (elem[0])[11:13]
minute = (elem[0])[14:16]
second = (elem[0])[17:19]
crime = elem[1]
try:
longi = float(elem[2])
lati = float(elem[3])
except ValueError:
longi = 0
lati = 0
arrest = elem[4]
locat = elem[5]
time = [year,month,day,hour,minute,second,
crime,
longi,lati,locat,
arrest]
timeList.append(time)
outDict = defaultdict(lambda:defaultdict(int))
crimeSet = set()
timeSet = set()
locList = []
for time in timeList:
timeVar = str()
for i in range(s,e+1): timeVar += time[i]
outDict[str(int(timeVar))][time[6]] += 1
locList.append([time[7],time[8],time[9],time[6]])
crimeSet.add(time[6])
timeSet.add(int(timeVar))
crimeDict = defaultdict(int)
for c in crimeSet:
crimeDict[c]=sum([outDict[str(t)][c] for t in timeSet])
tL = sorted(list(timeSet),key=int)
cL = sorted( [[c,crimeDict[c]] for c in crimeSet],
key=itemgetter(1),
reverse=True)
cL = [c[0] for c in cL]
out = [tL,cL,outDict,locList,crimeSet ]
return out
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
import matplotlib as mpl
##########################################################################
# Generate sample data
#colors_ = list(six.iteritems(colors.cnames))
inFile = "out_train.txt"
imdata = buildData(inFile)
data = aggragateData(imdata,3,3)
#locCrime = defaultdict(list)
#X = np.array(data[3]).T
#lon = X[0]
#lat = X[1]
#loc = X[2]
#crime = X[3]
#crimeColor = defaultdict(int)
#i = 0
#for c in data[4]:
crimeColor[c] = i
i += 1
#color = [ float(crimeColor[c])/255 for c in crime ]
#plt.scatter(lon, lat, c=color, s=500, cmap=mpl.cm.gray)
#plt.show()
dataScrub = []
prev = defaultdict(int)
for crime in data[1]:
sub = [data[2][str(time)][crime] for time in data[0]]
dataScrub.append(sub)
base = [0]*len(dataScrub[0])
for i in range(0,len(dataScrub)):
base += dataScrub[len(dataScrub)-i-1]
plt.plot(base)
#X = (np.array(dataScrub[0])).T
#y = np.array(data[0])
#w,v = np.linalg.eig(np.cov(dataScrub))
#print v
#for scrub in dataScrub: plt.plot(scrub)
plt.ylabel('Crime Count')
#plt.legend(data[1], loc='upper left')
#l=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
#ax.set_xticklabels(l)
plt.show()
def svrRBF(cVar,gamVar):
print "Exec svrRBF\n"
svr_rbf = SVR(kernel='rbf', C=cVar, gamma=gamVar)
y_rbf = svr_rbf.fit(X, y).predict(X)
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
def svrPoly(cVar,degVar):
print "Exec svrPoly\n"
svr_poly = SVR(kernel='poly', C=cVar, degree=degVar)
y_poly = svr_poly.fit(X, y).predict(X)
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
def svrLin(cVar):
print "Exec svrLin\n"
svr_lin = SVR(kernel='linear', C=cVar)
y_lin = svr_lin.fit(X, y).predict(X)
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
#svrLin(1e3)
#svrPoly(1e3,2)
svrRBF(1e3,0.1)
| mit |
MrChristophRivera/jupyter-tips-and-tricks | notebooks/04-More_basics.py | 4 | 4958 |
# coding: utf-8
# # Jupyter Notebook Basics
# In[1]:
names = ['alice', 'jonathan', 'bobby']
ages = [24, 32, 45]
ranks = ['kinda cool', 'really cool', 'insanely cool']
# In[3]:
for (name, age, rank) in zip(names, ages, ranks):
print name, age, rank
# In[4]:
for index, (name, age, rank) in enumerate(zip(names, ages, ranks)):
print index, name, age, rank
# In[5]:
# return, esc, shift+enter, ctrl+enter
# text keyboard shortcuts -- cmd > (right), < left,
# option delete (deletes words)
# type "h" for help
# tab
# shift-tab
# keyboard shortcuts
# - a, b, y, m, dd, h, ctrl+shift+-
# In[14]:
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u"config InlineBackend.figure_format='retina'")
import matplotlib.pyplot as plt
# no pylab
import seaborn as sns
sns.set_context('talk')
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import numpy as np
# don't do `from numpy import *`
import pandas as pd
# In[9]:
# If you have a specific function that you'd like to import
from numpy.random import randn
# In[10]:
x = np.arange(100)
y = np.sin(x)
plt.plot(x, y)#;
# In[12]:
get_ipython().magic(u'matplotlib notebook')
# In[13]:
x = np.arange(10)
y = np.sin(x)
plt.plot(x, y)#;
# ## Magics!
#
# - % and %% magics
# - interact
# - embed image
# - embed links, youtube
# - link notebooks
# Check out http://matplotlib.org/gallery.html select your favorite.
# In[15]:
get_ipython().run_cell_magic(u'bash', u'', u'for num in {1..5}\ndo\n for infile in *;\n do\n echo $num $infile\n done\n wc $infile\ndone')
# In[20]:
print "hi"
get_ipython().system(u'pwd')
# In[17]:
get_ipython().system(u'ping google.com')
# In[18]:
this_is_magic = "Can you believe you can pass variables and strings like this?"
# In[22]:
hey = get_ipython().getoutput(u'echo $this_is_magic')
# In[23]:
hey
# # Numpy
#
# If you have arrays of numbers, use `numpy` or `pandas` (built on `numpy`) to represent the data. Tons of very fast underlying code.
# In[24]:
x = np.arange(10000)
print x # smart printing
# In[25]:
print x[0] # first element
print x[-1] # last element
print x[0:5] # first 5 elements (also x[:5])
print x[:] # "Everything"
# In[26]:
print x[-5:] # last five elements
# In[27]:
print x[-5:-2]
# In[28]:
print x[-5:-1] # not final value -- not inclusive on right
# In[ ]:
# In[29]:
x = np.random.randint(5, 5000, (3, 5))
# In[30]:
x
# In[31]:
np.sum(x)
# In[32]:
x.sum()
# In[42]:
np.sum(x)
# In[41]:
np.sum(x, axis=0)
# In[43]:
np.sum(x, axis=1)
# In[44]:
x.sum(axis=1)
# In[45]:
# Multi dimension array slice with a comma
x[:, 2]
# In[ ]:
# In[46]:
y = np.linspace(10, 20, 11)
y
# In[47]:
get_ipython().magic(u'pinfo np.linspace')
# In[ ]:
np.linspace()
# shift-tab; shift-tab-tab
np.
# In[48]:
def does_it(first=x, second=y):
"""This is my doc"""
pass
# In[49]:
y[[3, 5, 7]]
# In[ ]:
does_it()
# In[51]:
num = 3000
x = np.linspace(1.0, 300.0, num)
y = np.random.rand(num)
z = np.sin(x)
np.savetxt("example.txt", np.transpose((x, y, z)))
# In[52]:
get_ipython().magic(u'less example.txt')
# In[53]:
get_ipython().system(u'wc example.txt')
# In[54]:
get_ipython().system(u'head example.txt')
# In[55]:
#Not a good idea
a = []
b = []
for line in open("example.txt", 'r'):
a.append(line[0])
b.append(line[2])
a[:10] # Whoops!
# In[56]:
a = []
b = []
for line in open("example.txt", 'r'):
line = line.split()
a.append(line[0])
b.append(line[2])
a[:10] # Strings!
# In[57]:
a = []
b = []
for line in open("example.txt", 'r'):
line = line.split()
a.append(float(line[0]))
b.append(float(line[2]))
a[:10] # Lists!
# In[58]:
# Do this!
a, b = np.loadtxt("example.txt", unpack=True, usecols=(0,2))
# In[59]:
a
# ## Matplotlib and Numpy
#
# In[60]:
from numpy.random import randn
# In[61]:
num = 50
x = np.linspace(2.5, 300, num)
y = randn(num)
plt.scatter(x, y)
# In[64]:
y > 1
# In[65]:
y[y > 1]
# In[66]:
y[(y < 1) & (y > -1)]
# In[67]:
plt.scatter(x, y, c='b', s=50)
plt.scatter(x[(y < 1) & (y > -1)], y[(y < 1) & (y > -1)], c='r', s=50)
# In[68]:
y[~((y < 1) & (y > -1))] = 1.0
plt.scatter(x, y, c='b')
plt.scatter(x, np.clip(y, -0.5, 0.5), color='red')
# In[71]:
num = 350
slope = 0.3
x = randn(num) * 50. + 150.0
y = randn(num) * 5 + x * slope
plt.scatter(x, y, c='b')
# In[72]:
# plt.scatter(x[(y < 1) & (y > -1)], y[(y < 1) & (y > -1)], c='r')
# np.argsort, np.sort, complicated index slicing
dframe = pd.DataFrame({'x': x, 'y': y})
g = sns.jointplot('x', 'y', data=dframe, kind="reg")
# ## Grab Python version of ggplot http://ggplot.yhathq.com/
# In[73]:
from ggplot import ggplot, aes, geom_line, stat_smooth, geom_dotplot, geom_point
# In[74]:
ggplot(aes(x='x', y='y'), data=dframe) + geom_point() + stat_smooth(colour='blue', span=0.2)
# In[ ]:
| mit |
CaptainBriot/prosperpy | ltc.py | 1 | 6882 | import sys
import logging
import argparse
import decimal
import collections
import prosperpy
import prosperpy.traders
LOGGER = logging.getLogger(__name__)
def get_candles(granularity, filename='data.json', reverse=False):
import json
with open(filename, 'r') as data_file:
candles = []
raw = json.load(data_file)
data = []
aux = []
timestamp = raw[0][0]
for item in raw:
aux.append(item)
if aux[-1][0] > timestamp + granularity:
timestamp = timestamp + granularity
data.append([aux[0][0],
aux[0][1],
aux[-1][2],
min([i[3] for i in aux]),
max([i[4] for i in aux]),
aux[0][5]])
aux = []
if reverse:
data = reversed(data)
for index, item in enumerate(data):
kwargs = dict(
timestamp=item[0], low=decimal.Decimal(item[1]), high=decimal.Decimal(item[2]),
open=decimal.Decimal(item[3]), close=decimal.Decimal(item[4]), volume=decimal.Decimal(item[5]))
candle = prosperpy.Candle(**kwargs)
try:
candle.previous = candles[index - 1]
if candle.open.is_nan():
candle.open = candle.previous.close
except IndexError:
if candle.open.is_nan():
candle.open = candle.close
candles.append(candle)
return candles
def init_logging(options):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
fmt = '%(asctime)s|%(levelname)s|%(name)s|%(message)s|%(filename)s|%(lineno)s'
handler.setFormatter(logging.Formatter(fmt=fmt))
if options.verbosity:
level = logging.DEBUG
prosperpy.engine.set_debug(True)
elif options.quiet:
level = logging.ERROR
else:
level = logging.INFO
logging.root.setLevel(level)
logging.root.addHandler(handler)
def plotme(curves, actions):
import matplotlib.pyplot
import matplotlib.ticker
fig = matplotlib.pyplot.figure()
plot = fig.add_subplot(111)
for x, y, color in curves:
plot.plot(x, y, color=color)
formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
plot.yaxis.set_major_formatter(formatter)
for tick in plot.yaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
tick.label2.set_color('green')
for counter, side, price in actions:
bbox = dict(boxstyle="round", fc="0.8")
arrowprops = dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=90,rad=10")
offset = 64
plot.annotate('{} ({:.2f})'.format(side, price), (counter, price), xytext=(-2 * offset, offset),
textcoords='offset points', bbox=bbox, arrowprops=arrowprops)
matplotlib.pyplot.show()
def the_past(period, granularity, feed, product, reverse):
# factor = 365 * (3600 * 24) / (granularity * period)
factor = 10
#candles = prosperpy.gdax.api.get_candles(period * factor, options.granularity, product)
ltc_candles = get_candles(granularity, filename='ltc-usd.json', reverse=reverse)
btc_candles = get_candles(granularity, filename='btc-usd.json', reverse=reverse)
feed.candles = collections.deque(iterable=ltc_candles[1:feed.period+1], maxlen=feed.period*factor)
ltc_x = [candle.timestamp for candle in ltc_candles[1:feed.period+1]]
ltc_y = [candle.close for candle in ltc_candles[1:feed.period + 1]]
btc_x = [candle.timestamp for candle in btc_candles[1:feed.period+1]]
btc_y = [candle.close * decimal.Decimal('0.01') for candle in btc_candles[1:feed.period+1]]
for candle in btc_candles[feed.period+1:]:
btc_x.append(candle.timestamp)
btc_y.append(candle.close * decimal.Decimal('0.01'))
for candle in ltc_candles[feed.period + 1:]:
ltc_x.append(candle.timestamp)
ltc_y.append(candle.close)
plotme([(ltc_x, ltc_y, 'blue'), (btc_x, btc_y, 'orange')], [])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--granularity', type=int, dest='granularity', required=True)
parser.add_argument('--period', type=int, dest='period', required=True)
parser.add_argument('--reverse', action='store_true', dest='reverse', default=False)
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0)
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False)
options = parser.parse_args()
init_logging(options)
product = 'BTC-USD'
auth = prosperpy.gdax.auth.GDAXAuth(
'727677e8492b36cd13b3b9325d20a5b7',
'G/EGnZRm5MG+gxZCgw1CIOlLBQcViib78486kJhsvAYkiyojJTI5EsLTEVc0UGw/W1Ko5xhqwmFOUIQGzigJwQ==',
'hus9I7src8U2')
api = prosperpy.gdax.api.GDAXAPI(auth)
run(product, options.period, options.granularity, api, options.reverse)
return
feeds = {}
for period in [10, 50, 100, 200, 500, 1000, 2000]:
for granularity in [60, 120, 300, 600, 1800, 3600, 7200, 21400, 43200, 86400]:
feeds[str(period) + '-' + str(granularity)] = run(product, period, granularity, api, False)
for key, feed in feeds.items():
LOGGER.info('-' * 40 + key + '-' * 40)
for trader in feed.traders:
trader.summary()
def run(product, period, granularity, api, reverse):
feed = prosperpy.gdax.GDAXFeed(product, period, granularity)
#feed.traders.append(prosperpy.traders.ADXTrader(product, feed, api))
#feed.traders.append(prosperpy.traders.RSITrader(product, feed, api))
feed.traders.append(prosperpy.traders.HMATrader(product, feed, api))
#feed.traders.append(prosperpy.traders.SMATrader(product, feed, api))
#feed.traders.append(prosperpy.traders.PercentageTrader(decimal.Decimal('0.8'), product, feed, api))
#feed.traders.append(prosperpy.traders.RegressorTrader(sklearn.ensemble.RandomForestRegressor, product, feed, api))
#feed.traders.append(prosperpy.traders.RegressorTrader(sklearn.ensemble.ExtraTreesRegressor, product, feed, api))
#feed.traders.append(prosperpy.traders.RegressorTrader(sklearn.ensemble.AdaBoostRegressor, product, feed, api))
#feed.traders.append(prosperpy.traders.RegressorTrader(sklearn.ensemble.BaggingRegressor, product, feed, api))
#feed.traders.append(prosperpy.traders.RegressorTrader(sklearn.ensemble.GradientBoostingRegressor, product, feed, api))
feed.traders.append(prosperpy.traders.HODLTrader(product, feed, api))
feed.traders.append(prosperpy.traders.PerfectTrader(product, feed, api))
#real_time(feed)
the_past(period, granularity, feed, product, reverse)
return feed
if __name__ == '__main__':
main()
| mit |
franzpl/sweep | dut_test/dut_test.py | 2 | 1294 | #!/usr/bin/env python3
"""Software-Test with a known IR.
h[k] = dirac[k] + dirac[k-1050]
Please note: For Frequency Response is scaling to 2 / len(signal)
before FFT invalid
"""
import sys
sys.path.append('..')
import matplotlib.pyplot as plt
import numpy as np
import generation
import plotting
import calculation
import measurement_chain
import ir_imitation
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
pad = 7
# Excitation signal
excitation = np.zeros(44100)
excitation[0] = 1
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
system_response = measurement_chain.convolution(ir_imitation.diracs([0, 1050]))(excitation_zeropadded)
h = calculation.deconv_process(excitation,
system_response, fs)[:len(excitation)]
# Plot impulse response
plotting.plot_time(h)
plt.xlim(-500, 10000)
plt.xticks([0, 1050, 2000, 4000, 6000, 8000, 10000])
plt.savefig('impulse_response.png')
plt.close()
# Plot frequency response
plotting.plot_freq(h, fs, scale='db', title=' ')
plt.xscale('log')
plt.xlim(1, 23000)
plt.savefig('frequency_response.png')
plt.close()
# Plot phase response
plotting.plot_freq(h, fs, mode='phase', title=' ')
plt.xscale('log')
plt.savefig('phase_response.png')
plt.close()
| mit |
EachenKuang/PythonRepository | MedicineTool/Perplexity.py | 1 | 3242 | # -*- encoding: utf-8 -*-
"""
Author: Eachen Kuang
Date: 2017.7.11
Goal: Perplexity 困惑度画图计算
Other:
"""
import logging
from gensim import models
from gensim import corpora
from gensim.models.coherencemodel import CoherenceModel
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# calculating the log perplexity per word as obtained by gensim code
# https://radimrehurek.com/gensim/models/atmodel.html
# parameters: pass in trained corpus
# return: graph of perplexity per word for varying number of topics
# 初始化数据
# 如果已经保存,就可不运行
# data_path_in_folds = "D:\\Kuangyichen\\JavaRepository\\LDAGibbsSampling-master\\data\\LdaOriginalDocs\\"
# data_in_folds_filenames = os.listdir(data_path_in_folds)
# # data_in_folds_filenames.sort()
# texts = []
#
# for date_in_file in data_in_folds_filenames:
# with open(data_path_in_folds+date_in_file, 'r') as doc:
# text = []
# for line in doc:
# text.append(line.strip())
# texts.append(text)
#
# dictionary = corpora.Dictionary(texts)
# dictionary.save('./tmp/all_doucment.dict.txt')
# corpus = [dictionary.doc2bow(text) for text in texts]
# corpora.BleiCorpus.serialize('./tmp/corpus.blei', corpus)
# 如果已经有数据,运行以下语句即可
# dictionary = corpora.Dictionary.load('./tmp/all_doucment.dict.txt')
# corpus = corpora.BleiCorpus('./tmp/corpus.blei')
parameter_list = range(5, 40)
grid = {}
grid1 = {}
corpus = corpora.BleiCorpus("./timewindow_in3/corpus_2008-2009-2010.blei")
dictionary = corpora.Dictionary.load("./dictionary/dict.dict")
# lda1 = models.LdaModel.load('./timewindow_in3/_2000-2001-2002lda_model')
for parameter_value in parameter_list:
ldaModel = models.LdaModel(corpus=corpus, id2word=dictionary, alpha='auto', eta='auto', num_topics=parameter_value, iterations=10)
ldaModel1 = models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=parameter_value, iterations=10)
coherenceModel = CoherenceModel(model=ldaModel, corpus=corpus, dictionary=dictionary, coherence='u_mass')
coherenceModel1 = CoherenceModel(model=ldaModel1, corpus=corpus, dictionary=dictionary, coherence='u_mass')
grid[parameter_value] = []
coherence = coherenceModel.get_coherence()
grid[parameter_value].append(coherence)
grid1[parameter_value] = []
coherence1 = coherenceModel1.get_coherence()
grid1[parameter_value].append(coherence1)
print parameter_value, coherence, coherence1
df = pd.DataFrame(grid)
ax = plt.figure(figsize=(5, 3), dpi=300).add_subplot(111)
df.iloc[0].transpose().plot(ax=ax, color="#254F09")
plt.xlim(parameter_list[0], parameter_list[-1])
plt.ylabel('Perplexity')
plt.xlabel('topics')
plt.show()
df = pd.DataFrame(grid1)
ax = plt.figure(figsize=(5, 3), dpi=300).add_subplot(111)
df.iloc[0].transpose().plot(ax=ax, color="#254F09")
plt.xlim(parameter_list[0], parameter_list[-1])
plt.ylabel('Perplexity')
plt.xlabel('topics')
plt.show()
for topic_num, perplex_tn in grid.iteritems():
print topic_num, perplex_tn
for topic_num, perplex_tn in grid1.iteritems():
print topic_num, perplex_tn | apache-2.0 |
joshloyal/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 69 | 6473 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1])).astype(np.int)
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size // 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size // 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size // 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size // 5):(x_size // 2 - 1)],
err_cov_emp_full.mean(1)[(x_size // 5):(x_size // 2 - 1)],
color='green', ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
FluVigilanciaBR/seasonality | methods/data_filter/sinan_convert2mem.py | 1 | 12750 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
import pandas as pd
import numpy as np
import argparse
import logging
from .episem import lastepiweek
from argparse import RawDescriptionHelpFormatter
module_logger = logging.getLogger('update_system.sinan_convert2mem')
age_cols = ['Idade desconhecida', '0-4 anos', '5-9 anos', '10-19 anos', '20-29 anos', '30-39 anos', '40-49 anos',
'50-59 anos', '60+ anos']
vir_cols = ['FLU_A',
'FLU_B',
'SARS2',
'VSR',
'PARA1',
'PARA2',
'PARA3',
'PARA4',
'ADNO',
'METAP',
'BOCA',
'RINO',
'OTHERS']
lab_cols_in = vir_cols + ['POSITIVE',
'NEGATIVE',
'INCONCLUSIVE',
'TESTING_IGNORED',
'NOTTESTED',
'DELAYED']
lab_cols_out = vir_cols + ['POSITIVE_CASES',
'NEGATIVE',
'INCONCLUSIVE',
'TESTING_IGNORED',
'NOTTESTED',
'DELAYED']
def readtable(fname, sep=','):
target_col = ['SG_UF_NOT', 'DT_SIN_PRI_epiyearweek', 'DT_SIN_PRI_epiyear', 'DT_SIN_PRI_epiweek', 'CS_SEXO',
'idade_em_anos'] + lab_cols_in
df = pd.read_csv(fname,
sep=sep,
low_memory=False,
encoding='utf-8')[target_col].rename(columns={'CS_SEXO': 'sexo',
'DT_SIN_PRI_epiyearweek': 'epiyearweek',
'DT_SIN_PRI_epiyear': 'epiyear',
'DT_SIN_PRI_epiweek': 'epiweek'})
df['Idade desconhecida'] = pd.isnull(df.idade_em_anos).astype(int)
df['< 2 anos'] = (df.idade_em_anos < 2).astype(int)
df['2-4 anos'] = ((df.idade_em_anos >= 2) & (df.idade_em_anos < 5)).astype(int)
df['0-4 anos'] = (df.idade_em_anos < 5).astype(int)
df['5-9 anos'] = ((df.idade_em_anos >= 5) & (df.idade_em_anos < 10)).astype(int)
df['10-19 anos'] = ((df.idade_em_anos >= 10) & (df.idade_em_anos < 20)).astype(int)
df['20-29 anos'] = ((df.idade_em_anos >= 20) & (df.idade_em_anos < 30)).astype(int)
df['30-39 anos'] = ((df.idade_em_anos >= 30) & (df.idade_em_anos < 40)).astype(int)
df['40-49 anos'] = ((df.idade_em_anos >= 40) & (df.idade_em_anos < 50)).astype(int)
df['50-59 anos'] = ((df.idade_em_anos >= 50) & (df.idade_em_anos < 60)).astype(int)
df['60+ anos'] = (df.idade_em_anos >= 60).astype(int)
tgt_cols = {'Agentes infecciosos detectados': vir_cols,
'Exames laboratoriais': ['POSITIVE_CASES', 'NEGATIVE', 'INCONCLUSIVE',
'TESTING_IGNORED', 'NOTTESTED', 'DELAYED']}
# df['POSITIVE_CASES'] = np.logical_not(df['NOTTESTED'] | df['TESTING_IGNORED'] | df['NEGATIVE'] |
# df['DELAYED'] | df['INCONCLUSIVE']).astype(int)
df['POSITIVE_CASES'] = (df.POSITIVE == 1).astype(int)
df.rename(columns={'SG_UF_NOT': 'UF'}, inplace=True)
grp_cols = ['UF', 'epiyearweek', 'epiyear', 'epiweek', '< 2 anos', '2-4 anos'] + age_cols + \
tgt_cols['Agentes infecciosos detectados'] + tgt_cols['Exames laboratoriais']
# Aggregate independent of sex:
dftmp = df[grp_cols].groupby(['UF', 'epiyearweek', 'epiyear', 'epiweek'], as_index=False).agg(sum)
dftmp['SRAG'] = dftmp[age_cols].apply(sum, axis=1)
dftmp['sexo'] = 'Total'
# Aggregate separating by sex:
grp_cols = ['UF', 'epiyearweek','epiyear', 'epiweek', 'sexo', '< 2 anos', '2-4 anos'] + age_cols + \
tgt_cols['Agentes infecciosos detectados'] + tgt_cols['Exames laboratoriais']
df = df[grp_cols].groupby(['UF', 'epiyearweek', 'epiyear', 'epiweek', 'sexo'], as_index=False).agg(sum)
df['SRAG'] = df[age_cols].apply(sum, axis=1)
df = df.append(dftmp, ignore_index=True, sort=True)
df.UF = df.UF.astype('int64')
yearlist = sorted(list(df.epiyear.unique()))
module_logger.info('Year list: %s', yearlist)
lastweek = df.epiweek[df.epiyear == max(yearlist)].max()
uflist = list(df.UF.unique())
sexlist = ['M', 'F', 'I', 'Total']
tmpdict = []
for year in yearlist[:-1]:
for week in range(1, (int(lastepiweek(year))+1)):
for uf in uflist:
tmpdict.extend([{'UF': uf, 'epiyearweek': '%sW%02d' % (year, week), 'epiyear': year, 'epiweek': week,
'sexo': sex} for sex in sexlist])
tmpdict.extend([{'UF': uf, 'epiyearweek': '%sW%02d' % (yearlist[-1], week), 'epiyear': yearlist[-1], 'epiweek': week,
'sexo': sex} for sex in sexlist for week in
range(1, (lastweek+1)) for uf in uflist])
dftmp = pd.DataFrame(tmpdict)
dffull = pd.merge(dftmp, df, how='left').fillna(0)
# Load Federal Units aggregation:
dfreg = pd.read_csv('../data/regioesclimaticas.csv', encoding='utf-8')
dffull = pd.merge(dffull, dfreg[['Código', 'Região', 'Região oficial']].rename(columns={'Código': 'UF'}),
how='left')
dffull_reg = dffull.drop(['UF', 'Região oficial'], axis=1).groupby(['Região', 'epiyearweek', 'epiyear', 'epiweek',
'sexo'],
as_index=False).sum()
dffull_reg_ofi = dffull.drop(['UF', 'Região'], axis=1).groupby(['Região oficial', 'epiyearweek', 'epiyear',
'epiweek', 'sexo'], as_index=False).sum()
dfBR = dffull.drop(['UF', 'Região', 'Região oficial'], axis=1).groupby(['epiyearweek', 'epiyear', 'epiweek',
'sexo'], as_index=False).sum()
dfBR['Região'] = 'BR'
dffull_reg.rename(columns={'Região': 'UF'}, inplace=True)
dfBR.rename(columns={'Região': 'UF'}, inplace=True)
dffull_reg_ofi.rename(columns={'Região oficial': 'UF'}, inplace=True)
dffull_reg = dffull_reg.append(dffull_reg_ofi, ignore_index=True, sort=True)
dffull_reg = dffull_reg.append(dfBR, ignore_index=True, sort=True)
dffull = dffull.drop(['Região', 'Região oficial'], axis=1).append(dffull_reg, ignore_index=True, sort=True)
dffull = dffull[['UF', 'epiyearweek', 'epiyear', 'epiweek', 'sexo', 'SRAG', '< 2 anos', '2-4 anos'] + age_cols +
tgt_cols['Agentes infecciosos detectados'] + tgt_cols['Exames laboratoriais']]
dffull = dffull.sort_values(by=['UF', 'epiyearweek', 'epiyear', 'epiweek', 'sexo'],
axis=0).reset_index().drop('index', axis=1)
return(dffull)
def uf4mem(dfin=pd.DataFrame()):
df = dfin.copy()
# Load Population file:
dfpop = pd.read_csv('../data/PROJECOES_2013_POPULACAO-simples_v3_agebracket.csv', encoding='utf-8')
dfpop.rename(columns={'UF': 'Unidade da Federação'}, inplace=True)
dfpop.rename(columns={'Código': 'UF'}, inplace=True)
# Calculate incidence:
yearlist = sorted(list(df.epiyear.unique()))
uflist = list(df.UF.unique())
dfinc = df[~(df.sexo == 'I')].rename(columns={'SRAG': 'Total'}).drop(['Idade desconhecida', '< 2 anos', '2-4 anos'],
axis=1)
tgt_cols = ['Total'] + age_cols
tgt_cols.remove('Idade desconhecida')
dfpop.set_index('Ano', inplace=True)
# Incidence from lab results:
lab_cols = lab_cols_out
for uf in uflist:
for year in yearlist:
for sex in ['M', 'F', 'Total']:
tgt_rows = (dfinc.UF == uf) & (dfinc.epiyear == year) & (dfinc.sexo == sex)
dfpop_tgt_rows = (dfpop.UF == str(uf)) & (dfpop.Sexo == sex) & (dfpop.index == year)
# Cases by age:
dfinc.loc[tgt_rows, tgt_cols] = 100000*dfinc.loc[tgt_rows, tgt_cols].\
div(dfpop.loc[dfpop_tgt_rows, tgt_cols].ix[year], axis='columns')
# Lab results:
dfinc.loc[tgt_rows, lab_cols] = (100000*dfinc.loc[tgt_rows, lab_cols] /
dfpop.loc[dfpop_tgt_rows, 'Total'].ix[year])
dfinc.rename(columns={'Total': 'SRAG'}, inplace=True)
# Structure data in the format accepted by MEM algorithm:
lastweek = df.epiweek[(df.epiyear == max(yearlist)) & (df.epiweek != 53)].max()
tmpdict = [{'UF': uf, 'epiweek': week} for week in range(1, 53) for uf in uflist]
dftmp = pd.DataFrame(tmpdict)
dftmpinc = dftmp.copy()
for year in yearlist:
lbl = 'SRAG' + str(year)
dftmp = pd.merge(dftmp, df[(df.epiyear == year) & (df.sexo == 'Total')][['UF', 'epiweek', 'SRAG']].rename(
columns={'SRAG': lbl}), on=['UF', 'epiweek'], how='left')
dftmpinc = pd.merge(dftmpinc, dfinc[(dfinc.epiyear == year) & (dfinc.sexo == 'Total')][['UF', 'epiweek',
'SRAG']].rename(columns={'SRAG': lbl}),
on=['UF', 'epiweek'], how='left')
if year != yearlist[-1]:
dftmp[lbl] = dftmp[lbl].fillna(0)
dftmpinc[lbl] = dftmpinc[lbl].fillna(0)
else:
dftmp.loc[dftmp.epiweek <= lastweek, lbl] = dftmp.loc[dftmp.epiweek <= lastweek, lbl].fillna(0)
dftmpinc.loc[dftmpinc.epiweek <= lastweek, lbl] = dftmpinc.loc[dftmpinc.epiweek <= lastweek, lbl].fillna(0)
df4mem = dftmp.sort_values(by=['UF', 'epiweek'], axis=0).reset_index().drop('index', axis=1)
dfinc4mem = dftmpinc.sort_values(by=['UF', 'epiweek'], axis=0).reset_index().drop('index', axis=1)
df.UF = df.UF.astype('str')
dfinc.UF = dfinc.UF.astype('str')
df = df.merge(dfpop[['UF', 'Unidade da Federação']].drop_duplicates(), how='left')
dfinc = dfinc.merge(dfpop[['UF', 'Unidade da Federação']].drop_duplicates(), how='left')
return df, dfinc, df4mem, dfinc4mem
def main(fname, sep=','):
# Reads and process data:
df = readtable(fname, sep)
# Structure data:
df, dfinc, df4mem, dfinc4mem = uf4mem(df)
# Write population table to be used for thresholds:
last_year = int(df.epiyear.max())
# Load population size time series:
dfpop = pd.read_csv('../data/PROJECOES_2013_POPULACAO-simples_agebracket.csv', encoding='utf-8')
dfpopcurrent = dfpop[dfpop.Ano == last_year]
dfpopcurrent.to_csv('../data/populacao_uf_regional_atual.csv', index=False, encoding='utf-8')
# Write output to file:
fnameout = '.'.join(fname.split('.')[:-1]) + '4mem-incidence.csv'
dfinc4mem.to_csv(fnameout, index=False, encoding='utf-8')
fnameout = '.'.join(fname.split('.')[:-1]) + '4mem.csv'
df4mem.to_csv(fnameout, index=False, encoding='utf-8')
fnameout = '.'.join(fname.split('.')[:-1]) + '-weekly-incidence.csv'
dfinc['Tipo'] = 'Estado'
dfinc.loc[dfinc['UF'].isin(['RegN', 'RegL', 'RegC', 'RegS']) ,'Tipo'] = 'Regional'
dfinc.loc[dfinc['UF'].isin(['N', 'S', 'NE', 'SE', 'CO']), 'Tipo'] = 'Região'
dfinc.loc[dfinc['UF'] == 'BR' ,'Tipo'] = 'País'
dfinc = dfinc.sort_values(by=['UF', 'epiyearweek', 'epiyear', 'epiweek', 'sexo'],
axis=0).reset_index().drop('index', axis=1)
dfinc.to_csv(fnameout, index=False, encoding='utf-8')
fnameout = '.'.join(fname.split('.')[:-1]) + '-weekly.csv'
df['Tipo'] = 'Estado'
df.loc[df['UF'].isin(['RegN', 'RegL', 'RegC', 'RegS']) ,'Tipo'] = 'Regional'
df.loc[df['UF'].isin(['N', 'S', 'NE', 'SE', 'CO']), 'Tipo'] = 'Região'
df.loc[df['UF'] == 'BR' ,'Tipo'] = 'País'
df = df.sort_values(by=['UF', 'epiyearweek', 'epiyear', 'epiweek', 'sexo'],
axis=0).reset_index().drop(['index', '0-4 anos'], axis=1)
df.to_csv(fnameout, index=False, encoding='utf-8')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Place each year in a corresponding column, with weeks in [1,52].\n" +
"Aggregate data by regional subdivision given in file regioesclimaticas.csv.\n" +
"Exemple usage:\n" +
"python3 sinan-convert2mem-fmt-regiao.py --path clean_data.csv",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--path', help='Path to data file')
parser.add_argument('--sep', help='Column separator', default=',')
args = parser.parse_args()
main(args.path, args.sep)
| gpl-3.0 |
liupfskygre/qiime | qiime/quality_scores_plot.py | 9 | 6918 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from pylab import plot, savefig, xlabel, ylabel, text, \
hist, figure, legend, title, show, xlim, ylim, xticks, yticks,\
scatter, subplot
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
alvarofierroclavero/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
jereze/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
simon-pepin/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
brodoll/sms-tools | lectures/03-Fourier-properties/plots-code/symmetry-real-even.py | 26 | 1150 | import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
| agpl-3.0 |
yunfeilu/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
barentsen/dave | lpp/calcLPPoctave.py | 1 | 2160 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 21:03:09 2015
@author: sthomp
"""
import numpy as np
import matplotlib.pyplot as plt
from oct2py import Oct2Py
import os
#import time as timer
#t0=timer.time()
#mapfile='/home/sthomp/DAVE/origLPP/maps/mapQ1Q17DR24-DVMed6084.mat'
def calcLPPone(time,flux,mapFile,period,duration,phase):
"""
Calculate the LPP transit metric given a time, flux (detrended)
inputs
----------
time : Time array in days
array
period : in days
float
duration : in hours
float
phase : in days
.float
This runs octave code.
outputs
-------
Tlpp : LPP transit metric value
binnedFlux : The sorted, folded, binned flux values input to LPP
"""
octave = Oct2Py()
octave.addpath('/home/sthomp/DAVE/dave/lpp/octave/transitLike')
octave.addpath('/home/sthomp/DAVE/dave/lpp/octave/createLightCurves/')
octave.addpath('/home/sthomp/DAVE/dave/lpp/octave/drtoolbox/')
octave.addpath('/home/sthomp/DAVE/dave/lpp/octave/drtoolbox/techniques/')
#octave.addpath('/home/sthomp/DAVE/dave/lpp/octave/drtoolbox')
Tlpp, Y, binnedFlux = octave.calcLPPMetricLCarray(time,flux,period,duration,phase,mapFile)
return Tlpp , binnedFlux
def fergalVersion(time, flux, mapFile, period, duration, phase):
path = getLppDir()
#Create a new instance for each time we run LPP. The
#oct2py.octave is not threadsafe and will crash when run in
#parallel. Oct2Py() won't
with Oct2Py() as octave:
octave.addpath(path)
octave.addpath(path + "/octave/transitLike")
octave.addpath(path + "/octave/createLightCurves/")
octave.addpath(path + "/octave/drtoolbox/")
octave.addpath(path + "/octave/drtoolbox/techniques")
Tlpp, Y, binnedFlux = octave.calcLPPMetricLCarray(\
time,flux,period,duration,phase,mapFile)
return Tlpp.copy(), Y.copy(), binnedFlux.copy()
def getLppDir():
"""Get the path where LPP stores its .m files"""
pathSep = "/"
path = os.path.realpath(__file__)
path = pathSep.join(path.split(pathSep)[:-1])
return path
| mit |
openhealthalgorithms/openhealthalgorithms | OHA/SgFramingham.py | 1 | 9372 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import pandas as pd
from OHA.Defaults import Defaults
from OHA.helpers.converters.CholesterolConverter import CholesterolConverter
from OHA.helpers.formatters.ParamFormatter import ParamFormatter
from OHA.param_builders.framingham_param_builder import FraminghamParamsBuilder
__author__ = 'fredhersch'
__email__ = '[email protected]'
class SgFramingham(object):
"""
Modified FRE based on the SG MoH CVD Guidelines
"""
__default_cholesterol_unit = 'mmol/l'
# co-efficients used in the calculation. See relevant paper
@staticmethod
def __get_co_efficient(key, gender):
return Defaults.co_efficients[key][gender]
# TODO: needs refactoring
@staticmethod
def __find_age_index(age, age_brackets):
age_index = '20-34'
for age_range in age_brackets:
min, max = age_range.split('-')
if int(min) <= age <= int(max):
age_index = age_range
return age_index
@staticmethod
def age_modifier_fre_points(age, gender):
age_brackets = ['20-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75-79']
age_points = {
"m": [-9, -4, 0, 3, 6, 8, 10, 11, 12, 13],
"f": [-7, -3, 0, 3, 6, 8, 10, 12, 14, 16]
}
value = None
bracket = SgFramingham.__find_age_index(age, age_brackets)
index = age_brackets.index(bracket)
gender = gender.lower()
if index >= 0:
value = age_points[gender][index]
return value
@staticmethod
def calculate_smoking_points(age, gender):
age_brackets = ['20-39', '40-49', '50-59', '60-69', '70-79']
smoking_points = {
"m": [8, 5, 3, 1, 0],
"f": [9, 7, 4, 2, 1]
}
# Based on the age, look up the index
# For the given index in the points array return the value
index = 0
value = None
gender = gender.lower()
bracket = SgFramingham.__find_age_index(age, age_brackets)
index = age_brackets.index(bracket)
# should check we have a valid index
if index >= 0:
value = smoking_points[gender.lower()][index]
return value
@staticmethod
def calculate_cholesterol_points(age, gender, total_cholesterol, hdl_cholesterol):
age_brackets = ['20-39', '40-49', '50-59', '60-69', '70-79']
if gender.lower() == 'm':
chol_points = np.array(
[[0, 0, 0, 0, 0], [4, 3, 2, 1, 0], [7, 5, 3, 1, 0], [9, 6, 4, 2, 1], [11, 8, 5, 3, 1]])
elif gender.lower() == 'f':
chol_points = np.array(
[[0, 0, 0, 0, 0], [4, 3, 2, 1, 1], [8, 6, 4, 2, 1], [11, 8, 5, 3, 2], [13, 10, 7, 4, 2]])
row_names = ['<4.1', '4.1-5.1', '5.2-6.1', '6.2-7.2', '>=7.3']
tchol_points_df = pd.DataFrame(chol_points, index=row_names, columns=age_brackets)
# first check the cholesterol range and get the row_index
if total_cholesterol < 4.1:
chol_range = '<4.1'
elif total_cholesterol <= 5.1:
chol_range = '4.1-5.1'
elif total_cholesterol <= 6.1:
chol_range = '5.2-6.1'
elif total_cholesterol <= 7.2:
chol_range = '6.2-7.2'
elif total_cholesterol >= 7.3:
chol_range = '>=7.3'
else:
chol_range = '>=7.3'
# then return the column index based on age range
age_index = SgFramingham.__find_age_index(age, age_brackets)
# look up the value from the df
# looking up with keys
cholesterol_points = tchol_points_df[age_index][chol_range]
if hdl_cholesterol < 1.0:
hdl_points = +2
elif hdl_cholesterol <= 1.2:
hdl_points = 1
elif hdl_cholesterol <= 1.5:
hdl_points = 0
elif hdl_cholesterol >= 1.6:
hdl_points = -1
cholesterol_points = cholesterol_points + hdl_points
return cholesterol_points
@staticmethod
def calculate_bp_points(gender, sbp, sbp_rx):
row_names = ['<120', '120-129', '130-139', '140-159', '>=160']
col_names = ['treated', 'untreated']
if gender == 'm':
sbp_points = np.array([[0, 0], [0, 1], [1, 2], [1, 2], [2, 3]])
elif gender == 'f':
sbp_points = np.array([[0, 0], [1, 3], [2, 4], [3, 5], [4, 6]])
else:
sbp_points = np.array([[0, 0], [1, 3], [2, 4], [3, 5], [4, 6]])
bp_df = pd.DataFrame(sbp_points, index=row_names, columns=col_names)
if sbp < 120:
sbp_index = '<120'
elif sbp < 130:
sbp_index = '120-129'
elif sbp < 140:
sbp_index = '130-139'
elif sbp < 160:
sbp_index = '140-159'
elif sbp >= 160:
sbp_index = '>=160'
else:
sbp_index = '>=160'
if sbp_rx:
col_index = 'treated'
else:
col_index = 'untreated'
bp_points = bp_df[col_index][sbp_index]
return bp_points
@staticmethod
def calculate_fre_score(params):
# Unpack the parameters
gender = params.get('gender')
age = params.get('age')
ethnicity = params.get('ethnicity')
total_cholesterol = CholesterolConverter(params.get('total_cholesterol')) \
.from_unit(params.get('cholesterol_unit')) \
.to_unit(SgFramingham.__default_cholesterol_unit) \
.converted
hdl_cholesterol = CholesterolConverter(params.get('hdl_cholesterol')) \
.from_unit(params.get('cholesterol_unit')) \
.to_unit(SgFramingham.__default_cholesterol_unit) \
.converted
on_bp_medication = params.get('bp_medication')
systolic = params.get('systolic')
is_smoker = params.get('is_smoker')
# has_diabetes = params.get('has_diabetes')
age_points = SgFramingham().age_modifier_fre_points(age, gender)
if is_smoker:
smoking_points = SgFramingham().calculate_smoking_points(age, gender)
else:
smoking_points = 0
cholesterol_points = SgFramingham().calculate_cholesterol_points(
age, gender, total_cholesterol, hdl_cholesterol,
)
sbp_points = SgFramingham().calculate_bp_points(gender, systolic, on_bp_medication)
fre_points = int(age_points) + int(smoking_points) + int(cholesterol_points) + int(sbp_points)
# convert the points to a score based
col_names = ['chinese', 'malay', 'indian']
if gender == 'm':
filename = ('%s/sg_risk/%s' % (
os.path.dirname(os.path.realpath(__file__)),
'sg_10year_risk_male.csv',
))
else:
filename = ('%s/sg_risk/%s' % (
os.path.dirname(os.path.realpath(__file__)),
'sg_10year_risk_female.csv',
))
fre_pd = pd.read_csv(filename, header=0, index_col=0)
fre_risk = fre_pd[col_names]
# look up the risk score based on the dataframe
fre_risk_score = fre_risk[ethnicity][fre_points]
return fre_risk_score
@staticmethod
def cvd_risk_level(cvd_risk):
if cvd_risk < 10:
return 'Low'
elif cvd_risk < 15:
return 'Medium'
elif cvd_risk >= 15:
return 'High'
@staticmethod
def calculate(params):
"""
Parameters
----------
params: dict
Dictionary includes 'gender', 'ethnicity', age', 'total_cholesterol',
'hdl_cholesterol', 'systolic', 'on_bp_medication',
'is_smoker', 'has_diabetes'.
Example
-------
>>> params = {
... 'gender': 'M',
... 'age': 40,
... 'ethnicity': 'malay',
... 'total_cholesterol': 180,
... 'cholesterol_unit': 'mg/dl',
... 'hdl_cholesterol': 45,
... 'systolic': 125,
... 'on_bp_medication': False,
... 'is_smoker': False,
... 'has_diabetes': False,
... }
>>> SgFramingham().calculate(params)
Returns
-------
dict
Framingham risk score and heart age and risk_range
"""
params = ParamFormatter(params).formatted
print(params)
cvd_risk = int(SgFramingham().calculate_fre_score(params))
heart_age = None
# heart_age = SgFramingham.__calculate_heart_age(cvd_risk, params['gender'])
risk_range = SgFramingham().cvd_risk_level(cvd_risk)
return {
'raw_risk': float('%.4f' % (round(cvd_risk, 4))),
'risk': cvd_risk,
'heart_age': heart_age,
'risk_range': risk_range,
}
@staticmethod
def get_sample_params():
return FraminghamParamsBuilder() \
.gender('F') \
.age(40) \
.ethnicity('malay') \
.t_chol(170, 'mg/dl') \
.hdl_chol(45, 'mg/dl') \
.sbp(125) \
.build()
| apache-2.0 |
anirudhjayaraman/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
navrasio/mxnet | example/ssd/dataset/pycocotools/coco.py | 21 | 18778 | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
# from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
# rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
raise NotImplementedError("maskUtils disabled!")
else:
rle = [ann['segmentation']]
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
# ann['area'] = maskUtils.area(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
if not 'bbox' in ann:
# ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
raise NotImplementedError("maskUtils disabled!")
elif type(segm['counts']) == list:
# uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
raise NotImplementedError("maskUtils disabled!")
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
return m
| apache-2.0 |
shanqing-cai/MRI_analysis | gen_bips_aparc12_time_series.py | 1 | 16144 | #!/usr/bin/python
import os
import sys
import glob
import pickle
import argparse
import numpy as np
import scipy.stats as stats
from subprocess import Popen, PIPE
from scai_utils import read_ctab
#import matplotlib.pyplot as plt
DATA_dir = '/users/cais/STUT/DATA'
FSDATA_dir = '/users/cais/STUT/FSDATA'
ANALYSIS_DIR = '/users/cais/STUT/analysis'
bips_resting_dir = '/users/cais/STUT/analysis'
bips_resting_dir_2 = "/users/cais/STUT/analysis/resting_bips_2"
#ASAP_TABLE = '/software/atlas/ASAP_labels.txt'
APARC12_TABLE = '/users/cais/STUT/slFRS17.ctab'
ASAP_SC_TABLE = '/software/atlas/ASAP_subcortical_labels.txt'
HEMIS = ['lh', 'rh']
def saydo(cmd):
print('\n%s\n'%cmd)
os.system(cmd)
def get_roi_ids(asap_table, sc_table):
#=== Cortical ROIs ===#
tablef = open(asap_table, 'r')
txt = tablef.read()
tablef.close()
txt = txt.split('\n')
t_rois = []
t_ids = []
for t in txt:
if len(t) == 0:
continue
tt = t.split(' ')
while tt.count('') > 0:
tt.remove('')
if len(tt) == 0:
continue
if tt[1] == 'None' or tt[1] == 'White' or tt[1] == 'Gray' \
or tt[1] == 'CN' or tt[1].startswith('None') \
or tt[1] == 'Unknown':
continue
t_rois.append(tt[1])
t_ids.append(tt[0])
s_rois = t_rois
s_ids = t_ids
b_rois = []
b_ids = []
#=== Subcortical ROIs ===#
tablef = open(sc_table, 'r')
txt = tablef.read()
tablef.close()
txt = txt.split('\n')
t_rois = []
t_ids = []
for t in txt:
if len(t) == 0:
continue
t = t.replace('\t', ' ')
tt = t.split(' ')
while tt.count('') > 0:
tt.remove('')
if len(tt) == 0:
continue
if tt[1] == 'Unknown' or tt[1].count("Vent") == 1 \
or tt[1].count("White-Matter") == 1 \
or tt[1] == "Brain-Stem" \
or tt[1].count("Accumbens") == 1:
continue
if tt[1].count("Left-") == 1:
b_rois.append(tt[1].replace('Left-', 'lh_'))
elif tt[1].count("Right-") == 1:
b_rois.append(tt[1].replace('Right-', 'rh_'))
b_ids.append(int(tt[0]))
return (s_rois, s_ids, b_rois, b_ids)
if __name__ == '__main__':
'''
if len(sys.argv) < 3:
print('Usage: gen_bips_aparc12_time_series.py sID imgMode [opts]')
print(' imgMode = {fullspectrum | z_no_outliers_bandpassed}')
print(' opts = {-altaparc}')
print(' -altaparc: specify an alternative aparc file, other than DATA/aparc12.nii.gz')
sys.exit(0)
'''
parser = argparse.ArgumentParser(description= "Generate resting-state fMRI ROI time series based on the aparc12 parcellation")
parser.add_argument("sID", type=str, help="Subject ID")
parser.add_argument("imgMode", type=str, help="Image mode: {fullspectrum, z_no_outliers_bandpassed, z_no_outliers_bandpassed2, bpnrm, bpnrm2, bp2}")
parser.add_argument("--altaparc", dest="altaparc", default="", \
help="Alternative aparc12 file name")
parser.add_argument("--cuthead", dest="cuthead", default="", \
help="Remove the first specified number of runs")
parser.add_argument("--rebinarize", dest="rebinarize", action="store_true",\
help="Force re-generation of ROI masks with mri_binarize")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
sID = args.sID
imgMode = args.imgMode
altaparc = args.altaparc
cuthead = args.cuthead
rebinarize = args.rebinarize
if len(cuthead) == 0:
cuthead = 0
else:
cuthead = int(cuthead)
if cuthead < 0:
raise ValueError, "cuthead must be a positive integer."
#sID = sys.argv[1]
#imgMode = sys.argv[2]
# ctabfn = os.path.join(FSDATA_dir, sID, 'label', 'aparc.annot.ctab')
ctabfn = APARC12_TABLE
'''
[ids, rois] = read_ctab(ctabfn)
sys.exit(0)
# Expand the cortical rois into both hemispheres
c_rois = []
c_ids = []
for (i0, hemi) in enumerate(HEMIS):
for (i1, roi) in enumerate(rois):
c_rois.append('%s_%s'%(hemi, roi))
c_ids.append(1000 + 1000 * i0 + ids[i1])
'''
#hemi = sys.argv[2]
print('sID = %s\n'%sID)
#print('hemi = %s'%hemi)
print("altaparc = %s\n"%altaparc)
if len(altaparc) == 0:
aparc_fn = os.path.join(DATA_dir, sID, 'aparc12.nii.gz')
else:
aparc_fn = altaparc
print("INFO: Using altaparc = %s"%aparc_fn)
if not os.path.isfile(aparc_fn):
raise IOError, 'aparc file not found: %s'%aparc_fn
print('aparc_fn = %s'%aparc_fn)
if imgMode == 'z_no_outliers_bandpassed':
resting_4d_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'output', 'zscored', 'fwhm_5.0', \
'%s_r00_z_no_outliers_bandpassed.nii.gz'%sID)
elif imgMode == "z_no_outliers_bandpassed2":
bips_resting_dir = bips_resting_dir_2
resting_4d_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'output', 'zscored', 'fwhm_0.0', \
'%s_r00_z_no_outliers_bandpassed.nii.gz'%sID)
elif imgMode == 'fullspectrum':
resting_4d_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'output', 'fullspectrum', 'fwhm_5.0', \
'%s_r00_fullspectrum.nii'%sID)
elif imgMode == "bpnrm":
resting_4d_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'output', 'bandpassed', \
'fwhm_0.0', "%s_r00_bandpassed.nii"%(sID))
elif imgMode == "bpnrm2":
bips_resting_dir = bips_resting_dir_2
resting_4d_fn = os.path.join(bips_resting_dir, sID, \
"preproc", "output", "bandpassed", \
"fwhm_0.0", "%s_r00_bandpassed.nii.gz"%(sID))
elif imgMode == "bp2":
bips_resting_dir = bips_resting_dir_2
resting_4d_fn = os.path.join(bips_resting_dir, sID, \
"preproc", "output", "bandpassed", \
"fwhm_0.0", "%s_r00_bandpassed.nii.gz"%(sID))
else:
raise ValueError, 'Invalid imgMode: %s'%imgMode
if not os.path.isfile(resting_4d_fn):
raise IOError, '4D resting func file not found: %s'%resting_4d_fn
print('resting_4d_fn = %s'%resting_4d_fn)
if imgMode.endswith('2'):
resting_mean_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'mean', '%s_mean.nii.gz'%sID)
else:
resting_mean_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'mean', '%s_mean.nii'%sID)
if not os.path.isfile(resting_mean_fn):
raise IOError, 'mean resting func file not found: %s'%resting_mean_fn
print('resting_mean_fn = %s'%resting_mean_fn)
bbreg_fsl_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'bbreg', '%s_register.mat'%sID)
if not os.path.isfile(bbreg_fsl_fn):
raise IOError, 'FSL-format bbreg file not found: %s'%bbreg_fsl_fn
print('bbreg_fsl_fn = %s'%bbreg_fsl_fn)
bbreg_inv_fsl_fn = os.path.join(bips_resting_dir, sID, \
'preproc', 'bbreg', '%s_register_struct2func.mat'%sID)
inv_xfm_cmd = 'convert_xfm -omat %s -inverse %s'%(bbreg_inv_fsl_fn, bbreg_fsl_fn)
saydo(inv_xfm_cmd)
#sys.exit(0)
# Transform the aparc file to the resting-func space
tmp_dir = os.path.join(ANALYSIS_DIR, sID, 'masks12')
if not os.path.isdir(tmp_dir):
os.system('mkdir -p %s'%tmp_dir)
print('Created directory %s'%tmp_dir)
else:
#os.system('rm -r %s/*'%tmp_dir)
#print('Cleaned directory %s'%tmp_dir)
print("Directory already exists: %s"%tmp_dir)
aparc_func_fn = os.path.join(tmp_dir, 'aparc_func.nii.gz')
xfm_cmd = 'flirt -in %s -ref %s -applyxfm -init %s -out %s -interp nearestneighbour'\
%(aparc_fn, resting_mean_fn, bbreg_inv_fsl_fn, aparc_func_fn)
saydo(xfm_cmd)
# Generate the list of cortical and subcortical ROIs
(rois, ids, sc_rois, sc_ids) = get_roi_ids(APARC12_TABLE, ASAP_SC_TABLE)
c_rois = []
c_ids = []
for (i0, hemi) in enumerate(HEMIS):
for (i1, roi) in enumerate(rois):
c_rois.append('%s_%s'%(hemi, roi))
c_ids.append(1000 + 1000 * i0 + int(ids[i1]))
#
b_rois = c_rois + sc_rois
b_ids = c_ids + sc_ids
nc = len(c_rois) # Number of cortical ROIs
# Determine the number of frames in the 4D resting fMRI file
(stdout, stderr) = Popen(['mri_info', resting_4d_fn, '-P', '100'], \
stdout=PIPE).communicate()
stdout = stdout.split('\n')
bFound = False
for t_line in stdout:
if t_line.count('dimensions:') == 1:
bFound = True
nFrames = int(t_line.split(' ')[-1])
if not bFound:
raise ValueError, 'Unable to get fMRI series number of frames.'
# Determine the outliers, if bpnrm mode is used
if imgMode == "bpnrm":
art_fn = os.path.join(bips_resting_dir, sID, 'preproc', 'art', \
'art._restingunwarped_outliers.txt')
else:
art_fn = os.path.join(bips_resting_dir, sID, 'preproc', 'art', \
'art._restingunwarped.nii_outliers.txt')
if not os.path.isfile(art_fn):
raise IOError, "Cannot find art outliers file: %s"%art_fn
print("art_fn = %s"%art_fn)
art_f = open(art_fn, 'r')
art_txt = art_f.read().split('\n')
art_f.close()
outliers = []
for t_line in art_txt:
if len(t_line) > 0:
outliers.append(int(t_line))
if imgMode == "bpnrm" or imgMode == "bpnrm2":
print("%d outliers found."%(len(outliers)))
nFrames = nFrames - len(outliers)
if len(outliers) > 0:
print("nFrames --> %s"%(nFrames))
# Process cut-head frames
if cuthead > 0:
if imgMode == "bpnrm" or imgMode == "bpnrm2":
raise Exception, "Current, cuthead mode is not supported under bpnrm or bpnrm2 mode"
chframes = []
if cuthead >= nFrames:
raise ValueError, "cuthead = %d >= nFrames = %d"%(cuthead, nFrames)
for i0 in range(cuthead):
chframes.append(i0)
# Remove outlier time points that have already been removed by bips
for olr in outliers:
if chframes.count(olr) == 1:
chframes.remove(olr)
print("Removing outlier %d from chframes"%(olr))
chframes0 = chframes
chframes = []
for i0 in range(len(chframes0)):
chframes.append(i0)
nFrames = nFrames - len(chframes)
print("cuthead = %d: nFreames: %d --> %d"%(cuthead, nFrames + len(chframes), nFrames))
else:
chframes = []
# Calculate the frame-by-frame in-brain mean intensity, for normalization
if imgMode == "bpnrm" or imgMode == "bpnrm2":
brainmean = np.zeros([nFrames])
brainmask = os.path.join(bips_resting_dir, sID, 'preproc', 'mask', \
"%s_brainmask.nii"%(sID))
if not os.path.isfile(brainmask):
raise IOError, "Canont find brain mask: %s"%(brainmask)
masked_mean_cmd = "fslstats -t %s -k %s -m"%(resting_4d_fn, brainmask)
(stdout, stderr) = Popen(masked_mean_cmd.split(' '), \
stdout=PIPE, stderr=PIPE).communicate()
meantxt = stdout.split('\n')
cnt = 0
for j0 in range(nFrames + len(outliers)):
if outliers.count(j0) == 1:
print("Skipping frame j0 = %d"%(j0))
continue
else:
brainmean[cnt] = float(meantxt[j0])
cnt = cnt + 1
if len(np.nonzero(brainmean == 0)[0]) > 0:
raise Exception, "Failed to calculate brain-wise intensity mean for all frames."
nROIs = len(b_rois)
bold_tab = np.array([[np.nan] * nFrames] * nROIs)
for (i1, t_roi) in enumerate(b_rois):
t_mask_fn = os.path.join(tmp_dir, 'mask_%s_rf.nii.gz'%t_roi)
t_id = b_ids[i1]
if os.path.isfile(t_mask_fn) and (not rebinarize):
print('INFO: mask file already exists: %s'%t_mask_fn)
else:
binarize_cmd = 'mri_binarize --i %s --min %d --max %d --o %s'\
%(aparc_func_fn, t_id, t_id, t_mask_fn)
saydo(binarize_cmd)
#if t_id < 100:
# sys.exit(0)
tmp_4d_fn = os.path.join(tmp_dir, 'tmp_4d_%s.nii.gz'%imgMode)
multiply_cmd = 'fslmaths %s -mul %s %s'\
%(resting_4d_fn, t_mask_fn, tmp_4d_fn)
saydo(multiply_cmd)
#mean_cmd = 'fslstats -t %s -M'
print('Extrating ROI-mean time course from ROI %s (%d) (%d / %d = %f%%)... \n'
%(t_roi, t_id, i1, nROIs, float(i1) / float(nROIs) * 1e2))
(stdout, stderr) = Popen(['fslstats', '-t', tmp_4d_fn, '-M'],
stdout=PIPE).communicate()
t_bold_sig = stdout.split('\n')
if (imgMode == "bpnrm" or imgMode == "bpnrm2") and len(outliers) > 0:
t_bold_sig_0 = t_bold_sig
t_bold_sig = []
for (j0, t_val) in enumerate(t_bold_sig_0):
if outliers.count(j0) == 1:
continue
else:
t_bold_sig.append(t_val)
elif len(chframes) > 0:
t_bold_sig_0 = t_bold_sig
t_bold_sig = []
for (j0, t_val) in enumerate(t_bold_sig_0):
if chframes.count(j0) == 1:
print("Cut head: skipping frame %d"%(j0))
continue
else:
t_bold_sig.append(t_val)
if imgMode == "bpnrm" or imgMode == "bpnrm2" :
# Do intensity normalization
t_sig = []
for (j0, t_val) in enumerate(t_bold_sig):
if len(t_val) > 0:
t_sig.append(float(t_val))
t_sig = np.array(t_sig)
t_sig = t_sig / brainmean - 1
t_sig = t_sig - np.mean(t_sig)
for j1 in range(nFrames):
bold_tab[i1][j1] = t_sig[j1]
else:
for j1 in range(nFrames):
bold_tab[i1][j1] = float(t_bold_sig[j1])
# Write immediate result to disk
bold_tab_tmp_fn = os.path.join(tmp_dir, 'bold_tab_aparc12_tmp.pkl')
fout = open(bold_tab_tmp_fn, 'wb')
pickle.dump(bold_tab, fout)
fout.close()
print('bold_tab saved to (pickle) %s\n'%bold_tab_tmp_fn)
# Compute the pairwise correlations
corr_tab = np.array([[np.nan] * nROIs] * nROIs)
for i0 in range(nROIs):
for i1 in range(nROIs):
if (i1 <= i0):
continue
t_x = bold_tab[i0]
t_y = bold_tab[i1]
cc = np.corrcoef(t_x, t_y)
corr_tab[i0][i1] = cc[0][1]
#sys.exit(0)
# Save final results to disk
bips_resting_roi_corr = {'b_rois': b_rois, \
'b_ids': b_ids, \
'bold_tab': bold_tab, \
'corr_tab': corr_tab}
out_fn = os.path.join(ANALYSIS_DIR, sID, 'roi_corr_aparc12.csc.%s.pkl'%imgMode)
if cuthead > 0:
out_fn = out_fn.replace(".pkl", ".ch%d.pkl"%(cuthead))
fout = open(out_fn, 'wb')
pickle.dump(bips_resting_roi_corr, fout)
fout.close()
print('Final results saved to (pickle) %s\n'%out_fn)
| bsd-3-clause |
aerler/GeoPy | src/datasets/ERA5.py | 1 | 20429 | '''
Created on Nov. 07, 2020
A module to read ERA5 data; this includes converting GRIB files to NetCDF-4,
as well as functions to load the converted and aggregated data.
@author: Andre R. Erler, GPL v3
'''
# external imports
import os.path as osp
import pandas as pd
import numpy as np
import netCDF4 as nc # netCDF4-python module
import xarray as xr
from collections import namedtuple
# internal imports
from datasets.common import getRootFolder
from geodata.gdal import GridDefinition
from datasets.misc import loadXRDataset, getFolderFileName
from geospatial.netcdf_tools import autoChunk
## Meta-vardata
dataset_name = 'ERA5'
root_folder = getRootFolder(dataset_name=dataset_name, fallback_name='NRCan') # get dataset root folder based on environment variables
# SnoDAS grid definition
projdict = dict(proj='longlat',lon_0=0,lat_0=0,x_0=0,y_0=0) # wraps at dateline
proj4_string = '+proj=longlat +ellps=WGS84 +datum=WGS84 +lon_0=0 +lat_0=0 +x_0=0 +y_0=0 +name={} +no_defs'.format(dataset_name)
# ERA5-Land
ERA5Land_geotransform = (-180, 0.1, 0, -90, 0, 0.1)
ERA5Land_size = (3600,1800) # (x,y) map size of grid
ERA5Land_grid = GridDefinition(name=dataset_name, projection=None, geotransform=ERA5Land_geotransform, size=ERA5Land_size)
# southern Ontario
SON10_geotransform = (-85, 0.1, 0, 41, 0, 0.1)
SON10_size = (111,61) # (x,y) map size of grid
SON10_grid = GridDefinition(name=dataset_name, projection=None, geotransform=ERA5Land_geotransform, size=ERA5Land_size)
varatts_list = dict()
# attributes of variables in ERA5-Land
varatts_list['ERA5L'] = dict(# forcing/flux variables
tp = dict(name='precip', units='kg/m^2/s',scalefactor=1000./86400., long_name='Total Precipitation'), # units of meters water equiv. / day
pev = dict(name='pet_era5', units='kg/m^2/s',scalefactor=-1000./86400., long_name='Potential Evapotranspiration'), # units of meters water equiv. / day; negative values
# state variables
sd = dict(name='snow', units='kg/m^2', scalefactor=1.e3, long_name='Snow Water Equivalent'), # units of meters water equivalent
# axes (don't have their own file)
time_stamp = dict(name='time_stamp', units='', long_name='Time Stamp'), # readable time stamp (string)
time = dict(name='time', units='days', long_name='Days'), # time coordinate
lon = dict(name='lon', units='deg', long_name='Longitude'), # geographic longitude
lat = dict(name='lat', units='deg', long_name='Latitude'), # geographic latitude
# derived variables
dswe = dict(name='dswe',units='kg/m^2/s', long_name='SWE Changes'),
liqwatflx = dict(name='liqwatflx', units='kg/m^2/s', long_name='Liquid Water Flux'),
)
# list of variables to load
default_varlists = {name:[atts['name'] for atts in varatts.values()] for name,varatts in varatts_list.items()}
# list of sub-datasets/subsets with titles
DSNT = namedtuple(typename='Dataset', field_names=['name','interval','resolution','title',])
dataset_attributes = dict(ERA5L = DSNT(name='ERA5L',interval='1h', resolution=0.1, title='ERA5-Land',), # downscaled land reanalysis
ERA5S = DSNT(name='ERA5S',interval='1h', resolution=0.3, title='ERA5-Sfc',), # regular surface; not verified
ERA5A = DSNT(name='ERA5A',interval='6h', resolution=0.3, title='ERA5-Atm',),) # regular 3D; not verified
# settings for NetCDF-4 files
avgfolder = root_folder + dataset_name.lower()+'avg/'
avgfile = 'era5{0:s}_clim{1:s}.nc' # the filename needs to be extended: biascorrection, grid and period
tsfile = 'era5_{0:s}{1:s}{2:s}_monthly.nc' # extend with biascorrection, variable and grid type
daily_folder = root_folder + dataset_name.lower()+'_daily/'
netcdf_filename = 'era5_{:s}_daily.nc' # extend with variable name
netcdf_dtype = np.dtype('<f4') # little-endian 32-bit float
netcdf_settings = dict(chunksizes=(8,ERA5Land_size[0]/16,ERA5Land_size[1]/32))
## functions to load NetCDF datasets (using xarray)
def loadERA5_Daily(varname=None, varlist=None, dataset=None, subset=None, grid=None, resolution=None, shape=None, station=None,
resampling=None, varatts=None, varmap=None, lgeoref=True, geoargs=None, lfliplat=False, aggregation='daily',
mode='daily', chunks=True, multi_chunks=None, lxarray=True, lgeospatial=True, **kwargs):
''' function to load daily ERA5 data from NetCDF-4 files using xarray and add some projection information '''
if not ( lxarray and lgeospatial ):
raise NotImplementedError("Only loading via geospatial.xarray_tools is currently implemented.")
if dataset and subset:
if dataset != subset:
raise ValueError((dataset,subset))
elif dataset and not subset:
subset = dataset
if resolution is None:
if grid and grid[:3] in ('son','snw',): resolution = 'SON60'
else: resolution = 'NA10' # default
if varatts is None:
if grid is None and station is None and shape is None: varatts = varatts_list[subset] # original files
default_varlist = default_varlists.get(dataset, None)
xds = loadXRDataset(varname=varname, varlist=varlist, dataset='ERA5', subset=subset, grid=grid, resolution=resolution, shape=shape,
station=station, default_varlist=default_varlist, resampling=resampling, varatts=varatts, varmap=varmap, mode=mode,
aggregation=aggregation, lgeoref=lgeoref, geoargs=geoargs, chunks=chunks, multi_chunks=multi_chunks, **kwargs)
# flip latitude dimension
if lfliplat and 'latitude' in xds.coords:
xds = xds.reindex(latitude=xds.latitude[::-1])
# update name and title with sub-dataset
xds.attrs['name'] = subset
xds.attrs['title'] = dataset_attributes[subset].title + xds.attrs['title'][len(subset)-1:]
return xds
## Dataset API
dataset_name # dataset name
root_folder # root folder of the dataset
orig_file_pattern = netcdf_filename # filename pattern: variable name (daily)
ts_file_pattern = tsfile # filename pattern: variable name and grid
clim_file_pattern = avgfile # filename pattern: grid and period
data_folder = avgfolder # folder for user data
grid_def = {'':ERA5Land_grid} # no special name, since there is only one...
LTM_grids = [] # grids that have long-term mean data
TS_grids = ['',] # grids that have time-series data
grid_res = {res:0.25 for res in TS_grids} # no special name, since there is only one...
default_grid = ERA5Land_grid
# functions to access specific datasets
loadLongTermMean = None # climatology provided by publisher
loadDailyTimeSeries = loadERA5_Daily # daily time-series data
# monthly time-series data for batch processing
def loadTimeSeries(lxarray=False, **kwargs): raise NotImplementedError(lxarray=lxarray, **kwargs)
loadClimatology = None # pre-processed, standardized climatology
loadStationClimatology = None # climatologies without associated grid (e.g. stations)
loadStationTimeSeries = None # time-series without associated grid (e.g. stations)
loadShapeClimatology = None # climatologies without associated grid (e.g. provinces or basins)
loadShapeTimeSeries = None # time-series without associated grid (e.g. provinces or basins)
## abuse for testing
if __name__ == '__main__':
import time, gc, os
#print('xarray version: '+xr.__version__+'\n')
xr.set_options(keep_attrs=True)
# import dask
# from dask.distributed import Client, LocalCluster
# # force multiprocessing (4 cores)
# cluster = LocalCluster(n_workers=2, memory_limit='1GB')
# cluster = LocalCluster(n_workers=4, memory_limit='6GB')
# cluster = LocalCluster(n_workers=1)
# client = Client(cluster)
modes = []
# modes += ['load_Point_Climatology']
# modes += ['load_Point_Timeseries']
modes += ['derived_variables' ]
# modes += ['load_Daily' ]
# modes += ['monthly_mean' ]
# modes += ['load_TimeSeries' ]
# modes += ['monthly_normal' ]
# modes += ['load_Climatology' ]
grid = None; resampling = None
dataset = 'ERA5L'
# resolution = 'SON10'
resolution = 'NA10'
# resolution = 'AU10'
# variable list
# varlist = ['snow']
varlist = ['snow','dswe','precip','pet_era5','liqwatflx']
# period = (2010,2019)
# period = (1997,2018)
# period = (1980,2018)
# loop over modes
for mode in modes:
if mode == 'load_Climatology':
pass
# lxarray = False
# ds = loadERA5(varlist=varlist, period=period, grid=grid,
# lxarray=lxarray) # load regular GeoPy dataset
# print(ds)
# print('')
# varname = list(ds.variables.keys())[0]
# var = ds[varname]
# print(var)
#
# if lxarray:
# print(('Size in Memory: {:6.1f} MB'.format(var.nbytes/1024./1024.)))
elif mode == 'load_Point_Climatology':
pass
# # load point climatology
# print('')
# if pntset in ('shpavg','glbshp'): dataset = loadERA5_Shp(shape=pntset, period=(2009,2018))
# elif pntset in ('oncat'): dataset = loadERA5_Shp(shape=pntset, grid=grid, period=(2011,2019))
# else: raise NotImplementedError(pntset)
# print(dataset)
# print('')
# print((dataset.time))
# print((dataset.time.coord))
elif mode == 'load_Point_Timeseries':
pass
# # load point climatology
# print('')
# if pntset in ('oncat'): dataset = loadERA5_ShpTS(shape=pntset, grid=grid, )
# else: raise NotImplementedError(pntset)
# print(dataset)
# print('')
# print((dataset.time))
# print((dataset.time.coord))
elif mode == 'monthly_normal':
pass
elif mode == 'load_TimeSeries':
pass
# lxarray = False
# varname = varlist[0]
# xds = loadERA5_TS(varlist=varlist,
# grid=grid, lxarray=lxarray, geoargs=geoargs) # 32 time chunks may be possible
# print(xds)
# print('')
# xv = xds[varname]
# print(xv)
# if lxarray:
# print(('Size in Memory: {:6.1f} MB'.format(xv.nbytes/1024./1024.)))
elif mode == 'monthly_mean':
pass
elif mode == 'load_Daily':
varlist = ['snow','dswe']
xds = loadERA5_Daily(varlist=varlist, resolution=resolution, dataset=None, subset='ERA5L', grid=grid,
chunks=True, lgeoref=True)
print(xds)
# print('')
xv = xds.data_vars['snow']
# # xv = list(xds.data_vars.values())[0]
xv = xv.loc['2011-06-01':'2012-06-01',:,:]
# # xv = xv.loc['2011-01-01',:,:]
print(xv)
print(xv.mean())
print(('Size in Memory: {:6.1f} MB'.format(xv.nbytes/1024./1024.)))
elif mode == 'derived_variables':
start = time.time()
lexec = True
lappend_master = False
ts_name = 'time_stamp'
dataset = 'ERA5L'
load_chunks = True
# load variables
# derived_varlist = ['dswe',]; load_list = ['snow']
derived_varlist = ['liqwatflx',]; load_list = ['dswe', 'precip']
varatts = varatts_list[dataset]
xds = loadERA5_Daily(varlist=load_list, subset=dataset, resolution=resolution, grid=grid,
chunks=load_chunks, lfliplat=False)
# N.B.: need to avoid loading derived variables, because they may not have been extended yet (time length)
print(xds)
# optional slicing (time slicing completed below)
start_date = None; end_date = None # auto-detect available data
# start_date = '2011-01-01'; end_date = '2011-01-08'
# slice and load time coordinate
xds = xds.loc[{'time':slice(start_date,end_date),}]
if ts_name in xds:
tsvar = xds[ts_name].load()
else:
tax = xds.coords['time']
ts_data = [pd.to_datetime(dt).strftime('%Y-%m-%d_%H:%M:%S') for dt in tax.data]
tsvar = xr.DataArray(data=ts_data, coords=(tax,), name='time_stamp', attrs=varatts['time_stamp'])
# loop over variables
for varname in derived_varlist:
# target dataset
lskip = False
folder,filename = getFolderFileName(varname=varname, dataset='ERA5', subset=dataset, resolution=resolution, grid=grid,
resampling=resampling, mode='daily', lcreateFolder=True)
nc_filepath = '{}/{}'.format(folder,filename)
if lappend_master and osp.exists(nc_filepath):
ncds = nc.Dataset(nc_filepath, mode='a')
ncvar3 = ncds[varname]
ncts = ncds[ts_name]
nctc = ncds['time'] # time coordinate
# update start date for after present data
start_date = pd.to_datetime(ncts[-1]) + pd.to_timedelta(1,unit='D')
if end_date is None: end_date = tsvar.data[-1]
end_date = pd.to_datetime(end_date)
if start_date > end_date:
print(("\nNothing to do - timeseries complete:\n {} > {}".format(start_date,end_date)))
ncds.close()
lskip = True
else:
lappend = True
# update slicing (should not do anything if sliced before)
print(("\n Appending data from {} to {}.\n".format(start_date.strftime("%Y-%m-%d"),end_date.strftime("%Y-%m-%d"))))
xds = xds.loc[{'time':slice(start_date,end_date),}]
tsvar = tsvar.loc[{'time':slice(start_date,end_date),}]
else:
lappend = False
if not lskip:
print('\n')
default_varatts = varatts[varname] # need to ensure netCDF compatibility
## define actual computation
if varname == 'liqwatflx':
ref_var = xds['precip']; note = "masked/missing values have been replaced by zero"
xvar = ref_var.fillna(0) - xds['dswe'].fillna(0) # fill missing values with zero
# N.B.: missing values are NaN in xarray; we need to fill with 0, or masked/missing values
# in snowmelt will mask/invalidate valid values in precip
elif varname == 'dswe':
ref_var = xds['snow']; note = "Rate of Daily SWE Changes"
assert ref_var.attrs['units'] == 'kg/m^2', ref_var.attrs['units']
#xvar = ref_var.differentiate('time', datetime_unit='s')
xvar = ref_var.diff('time', n=1) / 86400 # per second
# shift time axis
time_axis = xvar.coords['time'].data - np.timedelta64(1,'D')
xvar = xvar.assign_coords(time=time_axis).broadcast_like(ref_var)
# define/copy metadata
xvar.attrs = ref_var.attrs.copy()
xvar = xvar.rename(varname)
for att in ('name','units','long_name',): # don't copy scale factors etc...
if att in default_varatts: xvar.attrs[att] = default_varatts[att]
assert xvar.attrs['name'] == xvar.name, xvar.attrs
for att in list(xvar.attrs.keys()):
if att.startswith('old_') or att in ('original_name','standard_name'):
del xvar.attrs[att] # does not apply anymore
xvar.attrs['note'] = note
# set chunking for operation
chunks = ref_var.encoding['chunksizes'] if load_chunks is True else load_chunks.copy()
if chunks:
if isinstance(chunks,dict):
chunks = tuple(chunks[dim] for dim in xvar.dims)
xvar = xvar.chunk(chunks=chunks)
print('Chunks:',xvar.chunks)
# # visualize task graph
# viz_file = daily_folder+'dask_sum.svg'
# xvar3.data.visualize(filename=viz_file)
# print(viz_file)
## now save data, according to destination/append mode
if lappend:
# append results to an existing file
print('\n')
# define chunking
offset = ncts.shape[0]; t_max = offset + tsvar.shape[0]
tc,yc,xc = xvar.chunks # starting points of all blocks...
tc = np.concatenate([[0],np.cumsum(tc[:-1], dtype=np.int)])
yc = np.concatenate([[0],np.cumsum(yc[:-1], dtype=np.int)])
xc = np.concatenate([[0],np.cumsum(xc[:-1], dtype=np.int)])
# xvar3 = xvar3.chunk(chunks=(tc,xvar3.shape[1],xvar3.shape[2]))
# function to save each block individually (not sure if this works in parallel)
dummy = np.zeros((1,1,1), dtype=np.int8)
def save_chunk(block, block_id=None):
ts = offset + tc[block_id[0]]; te = ts + block.shape[0]
ys = yc[block_id[1]]; ye = ys + block.shape[1]
xs = xc[block_id[2]]; xe = xs + block.shape[2]
#print(((ts,te),(ys,ye),(xs,xe)))
#print(block.shape)
ncvar3[ts:te,ys:ye,xs:xe] = block
return dummy
# append to NC variable
xvar.data.map_blocks(save_chunk, chunks=dummy.shape, dtype=dummy.dtype).compute() # drop_axis=(0,1,2),
# update time stamps and time axis
nctc[offset:t_max] = np.arange(offset,t_max)
for i in range(tsvar.shape[0]): ncts[i+offset] = tsvar.data[i]
ncds.sync()
print('\n')
print(ncds)
ncds.close()
del xvar, ncds
else:
# save results in new file
nds = xr.Dataset({ts_name:tsvar, varname:xvar,}, attrs=xds.attrs.copy())
nds.coords['time'].attrs.pop('units',None) # needs to be free for use by xarray
print('\n')
print(nds)
print(nc_filepath)
# write to NetCDF
tmp_filepath = nc_filepath + '.tmp' # use temporary file during creation
var_enc = dict(chunksizes=chunks, zlib=True, complevel=1, _FillValue=np.NaN, dtype=netcdf_dtype)
task = nds.to_netcdf(tmp_filepath, mode='w', format='NETCDF4', unlimited_dims=['time'], engine='netcdf4',
encoding={varname:var_enc,}, compute=False)
if lexec:
task.compute()
else:
print(var_enc)
print(task)
task.visualize(filename=folder+'netcdf.svg') # This file is never produced
del nds, xvar
# replace original file
if os.path.exists(nc_filepath): os.remove(nc_filepath)
os.rename(tmp_filepath, nc_filepath)
# clean up
gc.collect()
# print timing
end = time.time()
print(('\n Required time: {:.0f} seconds\n'.format(end-start)))
| gpl-3.0 |
tdhopper/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
ogaway/WillowProject | BinaryOption/binaryoption.py | 1 | 7138 | # coding: UTF-8
from willow.willow import *
from wifunc import *
import numpy as np
from pandas import *
import matplotlib.pyplot as plt
def session(me):
# 為替レートの推移を何回するか変更可能
ts = 50
if me == 0:
add("<h1>バイナリーオプション モニター</h1>")
numset()
take({"client": me})
plnum = get_num()
numset_end(plnum)
wiput(plnum, {"tag": "a"})
wait(1)
witake(plnum, {"tag": "b"})
waithide(1)
start()
take({"client": me})
hide("#start")
wiput(plnum, {"tag": "c"})
data = [100]
time = [0]
def ping():
put({"client": me})
wiput(plnum, {"tag": "d", "data": data[0], "time": time})
plt.plot(time, data)
plt.ylim( 90, 110)
plt.xlim(0, 50)
plt.savefig("img/image.png")
add("<p id='info%s'>%s回目 : %s円</p>" % (0, 1, data[0]))
add("<img id='img%s' src='img/image.png'>" % 0)
background(ping, 4)
take({"client": me})
for i in range(ts-1):
data.append(np.random.randn()+data[i])
time.append(i+1)
wiput(plnum, {"tag": "d", "data": data[i+1], "time": time})
plt.plot(time, data, color="k")
plt.savefig("img/image.png")
hide("#info%s" % (i))
hide("#img%s" % (i))
add("<p id='info%s'>%s回目 : %s円</p>" % (i+1, i+2, data[i+1]))
add("<img id='img%s' src='img/image.png'>" % (i+1))
background(ping, 4)
take({"client": me})
add("<p>これで実験を終了します。</p>")
else:
add("<h1>バイナリーオプション クライアントNo.%s</h1>" % me)
wait(1)
# 実験説明開始(a)
take({"tag": "a"})
add(open("binary.html"))
add(ts, "#ts1")
add(ts-6, "#ts2")
waithide(1)
ready()
# 実験内容把握(b)
take({"client": me})
put({"tag": "b"})
hide("#ready")
wait(2)
# 実験開始(c)
take({"tag": "c"})
waithide(2)
show("#start")
# 関数,変数の準備
def set_input(a):
add("<p id='input%s'>賭金<input type='text' id='bet%s' >円<br />"
"<input type='submit' value='円安(上方向)' id='0'>"
"<input type='submit' value='円高(下方向)' id='1'></p>" % (a, a), "#info")
def hide_input(a):
hide("#input%s" % a)
def ping():
put({"id": 2, "client": me})
def win(i, bet, all, profit):
add("<p id='counter%s'>おめでとうございます!あなたは勝ちました。<br />"
"掛け金の2倍の額が手元に入りました。</p>" % i, "#info")
let(all, "#all")
let(profit, "#profit")
def lose(i, bet, all, profit):
add("<p id='counter%s'>残念でした。あなたの負けです。<br />"
"掛け金は全て没収されました。</p>" % i, "#info")
let(all, "#all")
let(profit, "#profit")
data = []
time = []
all = 100000
profit = 0
counter = 0
counter_input = 0
bet = 0
type = 0
# 画面生成
set_input(0)
add(100000, "#all")
add(0, "#profit")
for i in range(ts):
# レート情報受け取り(d)
msg = take({"tag": "d"})
data.append(msg["data"])
time.append(msg["time"])
# レート情報更新
let(data[i], "#rate")
let(i+1, "#number")
hide("#img%s" % (i-1))
add("<img id='img%s' src='img/image.png'>" % i)
# 推移数が45を超えたら賭けは終了。
if i == ts-5:
hide_input(counter_input)
add("<p>賭けの受付を終了しました。%s回目の推移が終わるまでお待ちください。</p>" % ts, "#info")
# 賭けをするかどうか待機(既に賭けている場合は4秒待機)
background(ping, 4)
msg = take({"client": me})
# 既に賭けている場合(if1_1)
if counter >= 1 and counter <=5:
hide("#counter%s" % (i-1))
add("<p id='counter%s'>あと%s回目の推移で決定します。</p>" % (i, 6-counter), "#info")
counter += 1
# 賭けが勝ったか負けたか(if1_2)
elif counter == 6:
# 円安に賭けていた場合
if type == 0:
if data[i-6] <= data[i]:
all += bet
profit += bet
win(i, bet, all, profit)
else:
all -= bet
profit -= bet
lose(i, bet, all, profit)
# 円高に賭けていた場合
elif type == 1:
if data[i-6] >= data[i]:
all += bet
profit += bet
win(i, bet, all, profit)
else:
all -= bet
profit -= bet
lose(i, bet, all, profit)
counter = -1
counter_input += 1
hide("#counter%s" % (i-1))
if i <= ts -6:
set_input(counter_input)
# 賭けの次の推移でinfoからwin,loseを消す(if1_3)
elif counter == -1:
hide("#counter%s" % (i-1))
hide("#betinfo%s" % (counter_input-1))
counter = 0
# 円安のボタンが押された場合(if2_1)
if msg["id"] == "0":
hide_input(counter_input)
bet = int(peek("#bet%s" % counter_input))
add("<p id='betinfo%s'>あなたは6回の推移後に為替レートが%s円の状態よりも"
"円安(グラフで上方向)になるということに%s円賭けました。</p>" % (counter_input, data[i], bet), "#info")
add("<p id='counter%s'>あと%s回目の推移で決定します。</p>" % (i, 6-counter), "#info")
counter = 1
type = 0
# 円高のボタンが押された場合(if2_2)
elif msg["id"] == "1":
hide_input(counter_input)
bet = int(peek("#bet%s" % counter_input))
add("<p id='betinfo%s'>あなたは6回の推移後に為替レートが%s円の状態よりも"
"円高(グラフで下方向)になるということに%s円賭けました。</p>" % (counter_input, data[i], bet), "#info")
add("<p id='counter%s'>あと%s回目の推移で決定します。</p>" % (i, 6-counter), "#info")
counter = 1
type = 1
add("<p>これで実験を終了します。</p>", "#info")
run(session)
| gpl-3.0 |
rbiswas4/Cadence | gedankenLSST/sninLSST.py | 1 | 4355 | import numpy as np
import pandas as pd
from lsst.sims.catUtils.supernovae import SNObject
from opsimsummary import summarize_opsim as oss
from astropy.table import Table
__all__ = ['SNObs']
class SNObs(oss.SummaryOpsim):
def __init__(self, t0, fieldID=None, raCol=None, decCol=None, ra=0.,
dec=0., peakabsmagBessellB=-19.3,
summarydf=None, snState={'z':0.5}, lsst_bp=None):
oss.SummaryOpsim.__init__(self, summarydf=summarydf)
self.fieldID = fieldID
self.raCol = raCol
self.decCol = decCol
self._ra = np.radians(ra)
self._dec = np.radians(dec)
self.summary = summarydf
self._peakabsmagBessellB = peakabsmagBessellB
self.t0 = t0
self._lc = None
self._numDropped = None
self._snState = snState
self.lsst_bp = lsst_bp
self.lowrange = -30.
self.highrange = 50.
@property
def radeg(self):
if self._ra != 0. and self._dec != 0.:
return np.degrees(self._ra)
if self.fieldID is not None:
ra = self.ra(self.fieldID)
elif self.raCol is not None:
ra = self.summary[self.raCol].iloc[0]
else:
ra = self._ra
return np.degrees(ra)
@property
def decdeg(self):
if self._dec != 0. and self._dec != 0.:
return np.degrees(self._dec)
if self.fieldID is not None:
dec = self.dec(self.fieldID)
elif self.decCol is not None:
dec = self.summary[self.decCol].iloc[0]
else:
dec = self._dec
return np.degrees(dec)
@property
def snState(self):
if self.SN.SNstate is None:
SNstate = self._snState
else:
SNstate = self.SN.SNstate
return SNstate
@snState.setter
def snState(self, value):
self._snState = value
return self._snState
@property
def SN(self):
"""
`lsst.sims.catsim.SNObject` instance with peakMJD set to t0
"""
#if self.snState is not None:
# return SNObject.fromSNState(self.snState)
sn = SNObject(ra=self.radeg, dec=self.decdeg)
sn.set(t0=self.t0)
sn.set(**self._snState)
sn.set_source_peakabsmag(self._peakabsmagBessellB, 'bessellB', 'ab')
return sn
def SNCosmoLC(self, scattered=False, seed=0):
lc = self.lightcurve
lc['modelFlux'] = lc['flux']
# add scatter if desired
np.random.seed(seed)
lc['deviation'] = np.random.normal(size=len(lc['flux']))
if scattered:
lc['flux'] = lc['flux'] + lc['deviation'] * lc['fluxerr']
return Table(lc.to_records())
@property
def lightcurve(self, lowrange=-30., highrange=50. ):
sn = self.SN
# dataframe.set_index('obsHistID')
# timewindowlow
timelow = sn.get('t0') + lowrange
timehigh = sn.get('t0') + highrange
# Model range
modellow = sn.mintime()
modelhigh = sn.maxtime()
if modellow > timelow:
timelow = modellow
if modelhigh < timehigh:
timehigh = modelhigh
if self.fieldID is None:
dataframe = self.summary
else:
dataframe = self.simlib(fieldID=self.fieldID)
x = dataframe.query('expMJD > @timelow and expMJD < @timehigh')
df = x.copy(deep=True)
colnames = ['time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys', 'SNR',
'finSeeing', 'airmass', 'filtSkyBrightness','fiveSigmaDepth',
'propID', 'night', 'DetectionEfficiency']
df['band'] = df['filter'].apply(lambda x: x.lower())
df['flux'] = df.apply(lambda row: sn.catsimBandFlux(row['expMJD'],
self.lsst_bp[row['band']]), axis=1)
df['fluxerr'] = df.apply(lambda row: sn.catsimBandFluxError(row['expMJD'],
self.lsst_bp[row['band']],
m5=row['fiveSigmaDepth']), axis=1)
df['zp'] = 0.
df['zpsys'] = 'ab'
df.rename(columns={'expMJD':'time'}, inplace=True)
os = len(df)
df = df.query('flux > 0. and fluxerr > 0.')
s = len(df)
df['SNR'] = df['flux'] / df['fluxerr']
return df
| mit |
jimcunderwood/MissionPlanner | Lib/site-packages/numpy/core/function_base.py | 82 | 5474 | __all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similiar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similiar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
| gpl-3.0 |
zfrenchee/pandas | pandas/core/indexes/frozen.py | 20 | 4619 | """
frozen (immutable) data structures to support MultiIndexing
These are used for:
- .names (FrozenList)
- .levels & .labels (FrozenNDArray)
"""
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.io.formats.printing import pprint_thing
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Sidenote: This has to be of type list, otherwise it messes up PyTables
# typechecks
def __add__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(super(FrozenList, self).__add__(other))
__iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super(FrozenList, self).__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super(FrozenList, self).__getitem__(n))
return super(FrozenList, self).__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super(FrozenList, self).__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super(FrozenList, self).__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__.__name__)
def __unicode__(self):
return pprint_thing(self, quote_strings=True,
escape_chars=('\t', '\r', '\n'))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def searchsorted(self, v, side='left', sorter=None):
"""
Find indices where elements of v should be inserted
in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""
# we are much more performant if the searched
# indexer is the same type as the array
# this doesn't matter for int64, but DOES
# matter for smaller int dtypes
# https://github.com/numpy/numpy/issues/5370
try:
v = self.dtype.type(v)
except:
pass
return super(FrozenNDArray, self).searchsorted(
v, side=side, sorter=sorter)
def _ensure_frozen(array_like, categories, copy=False):
array_like = coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
| bsd-3-clause |
AzamYahya/shogun | examples/undocumented/python_modular/graphical/util.py | 26 | 2670 | """ Utilities for matplotlib examples """
import pylab
from numpy import ones, array, double, meshgrid, reshape, linspace, \
concatenate, ravel, pi, sinc
from numpy.random import randn, rand
from modshogun import BinaryLabels, RegressionLabels, RealFeatures, SparseRealFeatures
QUITKEY='q'
NUM_EXAMPLES=100
DISTANCE=2
def quit (event):
if event.key==QUITKEY or event.key==QUITKEY.upper():
pylab.close()
def set_title (title):
quitmsg=" (press '"+QUITKEY+"' to quit)"
complete=title+quitmsg
manager=pylab.get_current_fig_manager()
# now we have to wrap the toolkit
if hasattr(manager, 'window'):
if hasattr(manager.window, 'setCaption'): # QT
manager.window.setCaption(complete)
if hasattr(manager.window, 'set_title'): # GTK
manager.window.set_title(complete)
elif hasattr(manager.window, 'title'): # TK
manager.window.title(complete)
def get_realdata(positive=True):
if positive:
return randn(2, NUM_EXAMPLES)+DISTANCE
else:
return randn(2, NUM_EXAMPLES)-DISTANCE
def get_realfeatures(pos, neg):
arr=array((pos, neg))
features = concatenate(arr, axis=1)
return RealFeatures(features)
def get_labels(raw=False, type='binary'):
data = concatenate(array(
(-ones(NUM_EXAMPLES, dtype=double), ones(NUM_EXAMPLES, dtype=double))
))
if raw:
return data
else:
if type == 'binary':
return BinaryLabels(data)
if type == 'regression':
return RegressionLabels(data)
return None
def compute_output_plot_isolines(classifier, kernel=None, train=None, sparse=False, pos=None, neg=None, regression=False):
size=100
if pos is not None and neg is not None:
x1_max=max(1.2*pos[0,:])
x1_min=min(1.2*neg[0,:])
x2_min=min(1.2*neg[1,:])
x2_max=max(1.2*pos[1,:])
x1=linspace(x1_min, x1_max, size)
x2=linspace(x2_min, x2_max, size)
else:
x1=linspace(-5, 5, size)
x2=linspace(-5, 5, size)
x, y=meshgrid(x1, x2)
dense=RealFeatures(array((ravel(x), ravel(y))))
if sparse:
test=SparseRealFeatures()
test.obtain_from_simple(dense)
else:
test=dense
if kernel and train:
kernel.init(train, test)
else:
classifier.set_features(test)
labels = None
if regression:
labels=classifier.apply().get_labels()
else:
labels=classifier.apply().get_values()
z=labels.reshape((size, size))
return x, y, z
def get_sinedata():
x=4*rand(1, NUM_EXAMPLES)-DISTANCE
x.sort()
y=sinc(pi*x)+0.1*randn(1, NUM_EXAMPLES)
return x, y
def compute_output_plot_isolines_sine(classifier, kernel, train, regression=False):
x=4*rand(1, 500)-2
x.sort()
test=RealFeatures(x)
kernel.init(train, test)
if regression:
y=classifier.apply().get_labels()
else:
y=classifier.apply().get_values()
return x, y
| gpl-3.0 |
waterponey/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
HeraclesHX/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
schets/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
DmitryYurov/BornAgain | Examples/python/fitting/ex03_ExtendedExamples/specular/RealLifeReflectometryFitting.py | 1 | 9258 | """
Real life example:
Fitting data from an X-ray reflectometer
The sample is composed of a thin
silver nano-particle layer on a silicon
substrate. The substrate is covered with
SiO2 layer. The nano-particle layer has negligible density
and does not considerably affect
the observed reflectometry picture.
The following parameters of the experiment
are fitted:
1. beam intensity
2. footprint correction factor
3. beam angular divergence
4. Material concentration in the SiO2 layer
5. Thickness of SiO2 layer
6. Sample roughness
Fitting is done in two steps:
First the whole range of experimental data is fitted,
then the data related to the instrument is fixed and
on the second step only the right-hand part of
experimental data (i.e. the part of the reflectometry curve
associated with bigger incident angles)
is concerned for fitting. At the second
stage only the sample parameters are fitted,
since only they affect the shape of the reflectometry
curve at bigger incident angles.
"""
from matplotlib import pyplot as plt
import numpy as np
from os import path
import bornagain as ba
from scipy.optimize import differential_evolution
def get_real_data(filename="mg6a_Merged.txt.gz"):
"""
Loads real data files and merges them once.
Returns a Nx3 array (N - the number of experimental data entries)
with first column being coordinates,
second one being values,
and the third one being weights to restore intensity values from experiment
"""
if not hasattr(get_real_data, "data"):
filepath = path.join(path.dirname(path.realpath(__file__)), filename)
real_data = np.loadtxt(filepath, usecols=(0, 1, 3), skiprows=1)
# translating axis values from double incident angle (degrees)
# to incident angle (radians)
real_data[:, 0] *= np.pi / 360
get_real_data.data = real_data
return get_real_data.data.copy()
def get_real_data_axis(start, end):
"""
Get axis coordinates of the experimental data
:param start: first bin to extract
:param end: last bin to extract
:return: 1D array with axis coordinates
"""
return get_real_data()[start:end, 0]
def get_real_data_values(start, end):
"""
Get experimental data values as a 1D array
:param start: first bin to extract
:param end: last bin to extract
:return: 1D array with experimental data values
"""
return get_real_data()[start:end, 1]
def get_weights(start, end):
"""
Get weights to restore genuine intensity of experimental instrument
:param start: first bin to extract
:param end: last bin to extract
:return: 1D array with weights to restore beam intensity
"""
return get_real_data()[start:end, 2]
def create_simulation(arg_dict, bin_start, bin_end):
"""
Creates and returns specular simulation
"""
wavelength = 1.54 * ba.angstrom
alpha_distr = ba.RangedDistributionGaussian(30, 3)
footprint = ba.FootprintFactorGaussian(arg_dict["footprint_factor"])
scan = ba.AngularSpecScan(wavelength,
get_real_data_axis(bin_start, bin_end))
scan.setAbsoluteAngularResolution(alpha_distr, arg_dict["divergence"])
scan.setFootprintFactor(footprint)
simulation = ba.SpecularSimulation()
simulation.setScan(scan)
simulation.setBeamIntensity(arg_dict["intensity"])
return simulation
def buildSample(arg_dict):
"""
Creates sample and returns it
"""
# defining materials
m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0)
m_si_o2 = ba.HomogeneousMaterial("SiO2",
8.57040868e-06 * arg_dict["concentration"],
1.11016654e-07 * arg_dict["concentration"])
m_si = ba.HomogeneousMaterial("Si", 7.57211137e-06, 1.72728178e-07)
# roughness
r_si = ba.LayerRoughness(arg_dict["roughness"], 0, 0)
# layers
air_layer = ba.Layer(m_air)
oxide_layer = ba.Layer(m_si_o2, arg_dict["thickness"])
substrate_layer = ba.Layer(m_si)
# assembling multilayer
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayerWithTopRoughness(oxide_layer, r_si)
multi_layer.addLayerWithTopRoughness(substrate_layer, r_si)
return multi_layer
def run_simulation(arg_dict, bin_start=0, bin_end=-1):
"""
Runs simulation and returns its result
"""
simulation = create_simulation(arg_dict, bin_start, bin_end)
simulation.setSample(buildSample(arg_dict))
simulation.runSimulation()
return simulation.result()
def chi_2(real_data, sim_data, weights):
"""
Computes chi_2 metrics and returns its value
"""
sim_data_upsc = np.multiply(weights, sim_data)
sim_data_upsc[sim_data_upsc is 0] = 1e-30
real_data_upsc = np.multiply(weights, real_data)
diff = real_data_upsc - sim_data_upsc
return np.sum(np.divide(np.multiply(diff,diff), sim_data_upsc))
def create_par_dict(*arg):
"""
Creates a dictionary with parameter names and values
and returns it
"""
return {'intensity': arg[0],
'footprint_factor': arg[1],
'divergence':arg[2],
'concentration': arg[3],
'thickness': arg[4],
'roughness': arg[5]
}
def objective_primary(args):
"""
Objective function for preliminary stage of optimization
"""
bin_start = 15 # first bin in the experimental data to calculate
bin_end = -1 # last bin in the experimental data to calculate
arg_dict = create_par_dict(*args)
sim_result = run_simulation(arg_dict, bin_start, bin_end)
sim_data = sim_result.array()
return chi_2(get_real_data_values(bin_start, bin_end),
sim_data, get_weights(bin_start, bin_end))
def objective_fine(args, intensity, footprint_factor, divergence):
"""
Objective function for tuning the right-hand side of experimental data
"""
bin_start = 404 # first bin in the experimental data to calculate
bin_end = -1 # last bin in the experimental data to calculate
arg_dict = create_par_dict(intensity, footprint_factor, divergence, *args)
sim_result = run_simulation(arg_dict, bin_start, bin_end)
sim_data = sim_result.array()
return chi_2(get_real_data_values(bin_start, bin_end),
sim_data, get_weights(bin_start, bin_end))
def run_fitting():
"""
Runs fitting and returns its result
"""
# running preliminary optimization on the total range of experimental data.
bounds = [(1e6, 1e8), # beam intensity
(0.0, 0.1), # beam-to-sample width ratio
(0.0, 0.08 * ba.deg), # beam_divergence
(0.0, 1.0), # oxide_concentration
(0.0, 2.0 * ba.nm), # oxide_thickness
(0.0, 2.0 * ba.nm)] # roughness
print("Start preliminary fitting of experimental data:\n")
preliminary_result = differential_evolution(objective_primary, bounds,
maxiter=20, popsize=60,
mutation=(0.5, 1.5),
disp=True, tol=1e-5)
bounds = [(0.0, 1.0), # oxide_concentration
(0.0, 2.0 * ba.nm), # oxide_thickness
(0.0, 2.0 * ba.nm)] # roughness
fixed_args = (preliminary_result.x[0], # beam intensity
preliminary_result.x[1], # beam-to-sample width ratio
preliminary_result.x[2] # beam divergence
)
print("\nStart fitting big incident angle part of experimental data:\n")
fine_tuning_result = differential_evolution(objective_fine, bounds,
fixed_args, maxiter=20,
popsize=40, mutation=(0.5, 1.5),
disp=True, tol=1e-5)
result = create_par_dict(*fixed_args, *fine_tuning_result.x)
print("\nFitting result:")
print(result,"\n")
return result
def plot_result(sim_result, ref_result, bin_start=0, bin_end=-1):
"""
Plots the graphs of obtained simulation data
"""
sim_data = sim_result.array()
ref_data = ref_result.array()
plt.semilogy(get_real_data_axis(bin_start, bin_end) * 180 / np.pi,
get_real_data_values(bin_start, bin_end),
sim_result.axis(), sim_data,
ref_result.axis(), ref_data)
xlabel = ba.get_axes_labels(sim_result, ba.AxesUnits.DEFAULT)[0]
ylabel = "Intensity"
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
plt.legend(['Experimental data', 'Simulation', 'Reference'],
loc='upper right', fontsize=16)
plt.show()
if __name__ == '__main__':
fit_data = run_fitting()
ref_data = create_par_dict(3.78271438e+06, # beam intensity
9.58009763e-04, # beam-to-sample width ratio
2.30471294e-04, # beam angular divergence
0.58721753, # oxide concentration
1.25559347, # oxide thickness
0.19281863) # roughness
plot_result(run_simulation(fit_data), run_simulation(ref_data))
| gpl-3.0 |
cactusbin/nyt | matplotlib/examples/pylab_examples/clippedline.py | 12 | 1422 | """
Clip a line according to the current xlimits, and change the marker
style when zoomed in.
It is not clear this example is still needed or valid; clipping
is now automatic for Line2D objects when x is sorted in
ascending order.
"""
from matplotlib.lines import Line2D
import numpy as np
from pylab import figure, show
class ClippedLine(Line2D):
"""
Clip the xlimits to the axes view limits -- this example assumes x is sorted
"""
def __init__(self, ax, *args, **kwargs):
Line2D.__init__(self, *args, **kwargs)
self.ax = ax
def set_data(self, *args, **kwargs):
Line2D.set_data(self, *args, **kwargs)
self.recache()
self.xorig = np.array(self._x)
self.yorig = np.array(self._y)
def draw(self, renderer):
xlim = self.ax.get_xlim()
ind0, ind1 = np.searchsorted(self.xorig, xlim)
self._x = self.xorig[ind0:ind1]
self._y = self.yorig[ind0:ind1]
N = len(self._x)
if N<1000:
self._marker = 's'
self._linestyle = '-'
else:
self._marker = None
self._linestyle = '-'
Line2D.draw(self, renderer)
fig = figure()
ax = fig.add_subplot(111, autoscale_on=False)
t = np.arange(0.0, 100.0, 0.01)
s = np.sin(2*np.pi*t)
line = ClippedLine(ax, t, s, color='g', ls='-', lw=2)
ax.add_line(line)
ax.set_xlim(10,30)
ax.set_ylim(-1.1,1.1)
show()
| unlicense |
igobrilhante/random-trajectory-generator | plots/plot_degree_networks.py | 1 | 1669 | from utils import db_utils
__author__ = 'igobrilhante'
import matplotlib.pyplot as plt
import numpy as np
from mpltools import style
import brewer2mpl
import utils
from mpltools import layout
style.use('ggplot')
figsize = layout.figaspect(scale=0.8)
fig, axes = plt.subplots(figsize=figsize)
axes.xaxis.label.set_color('#000000')
axes.yaxis.label.set_size(18)
axes.xaxis.label.set_size(18)
axes.yaxis.label.set_color('#000000')
axes.xaxis.major.formatter._useMathText = True
axes.yaxis.major.formatter._useMathText = True
axes.set_yscale('log')
axes.set_xscale('log')
for spine in axes.spines:
axes.spines[spine].set_color('#aeaeae')
axes.set_axis_bgcolor('white')
markers = ['o', '^', 's', 'D', 'o', "p", '8', '*']
marker_size = 10
line_width = 0.0
alpha = 0.8
hours = ["5", "6", "6"]
dow = "wd"
colors = brewer2mpl.get_map('YlGnBu', 'sequential', 9).mpl_colors
colors = [colors[3], colors[5], colors[8]]
i = 0
cities = ["Pisa", "Firenze", "Milan"]
labels = ["Pisa", "Florence", "Milan"]
for city in cities:
table = "network."+city+"_fs_poiclusterf_traj_" + dow + "_" + hours[i] + "h_trajs_node$"
agg = utils.agg_degree(db_utils.load_degree(table))
plt.plot(agg.degree, agg.total/float(np.sum(agg.total)), color=colors[i], label=labels[i], marker=markers[i], markersize=marker_size, linewidth=line_width, markeredgewidth=0.0, alpha=alpha)
i += 1
plt.tick_params(axis='both', which='major', labelsize=16, colors="#000000")
# plt.xlim([0, 1000])
# plt.ylim([-100, 100])
plt.xlabel('Degree')
plt.ylabel('Probability')
fig.tight_layout()
#
leg = plt.legend(loc=1, prop={'size': 16})
leg.get_frame().set_facecolor("white")
plt.show() | mit |
zhreshold/mxnet | python/mxnet/numpy/multiarray.py | 2 | 394970 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, unused-argument
"""numpy ndarray and util functions."""
try:
from __builtin__ import all as py_all
from __builtin__ import slice as py_slice
except ImportError:
from builtins import all as py_all
from builtins import slice as py_slice
from array import array as native_array
import functools
import ctypes
import warnings
import numpy as _np
from .. import _deferred_compute as dc
from ..autograd import is_recording
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP
from ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\
get_oshape_of_gather_nd_op
from ..ndarray._internal import _set_np_ndarray_class
from . import _op as _mx_np_op
from ..base import check_call, _LIB, NDArrayHandle, c_array
from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types
from ..context import Context
from ..util import set_module, wrap_np_unary_func, wrap_np_binary_func,\
is_np_default_dtype
from ..context import current_context
from ..ndarray import numpy as _mx_nd_np
from ..ndarray.numpy import _internal as _npi
from ..ndarray.ndarray import _storage_type, from_numpy
from .utils import _get_np_op
from .fallback import * # pylint: disable=wildcard-import,unused-wildcard-import
from . import fallback
__all__ = ['ndarray', 'empty', 'empty_like', 'array', 'shape', 'median',
'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'all', 'any', 'broadcast_to',
'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod', 'power', 'bitwise_not',
'delete', 'trace', 'transpose',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'invert',
'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',
'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'histogram',
'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append', 'argsort',
'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange',
'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'flatnonzero', 'tril_indices',
'concatenate', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'amax', 'amin', 'max', 'min',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',
'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman',
'logical_and', 'logical_or', 'logical_xor',
'flip', 'flipud', 'fliplr', 'around', 'round', 'round_', 'arctan2', 'hypot',
'triu_indices_from', 'triu_indices', 'tri',
'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad',
'unique', 'lcm', 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'cross', 'kron', 'equal', 'not_equal', 'interp',
'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero',
'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul',
'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',
'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'rollaxis', 'diag', 'diagonal']
__all__ += fallback.__all__
# Return code for dispatching indexing function call
_NDARRAY_UNSUPPORTED_INDEXING = -1
_NDARRAY_BASIC_INDEXING = 0
_NDARRAY_ADVANCED_INDEXING = 1
_NDARRAY_EMPTY_TUPLE_INDEXING = 2
# Return code for 0-d boolean array handler
_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
# This function is copied from ndarray.py since pylint
# keeps giving false alarm error of undefined-all-variable
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): # pylint: disable=redefined-outer-name
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `ndarray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def _reshape_view(a, *shape): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int, or n ints
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(a.shape)``.
Some dimensions of the shape can take special value -1, which
infers the dimension of the output shape by using the remainder of the
input dimensions keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
Returns
-------
ndarray
An array with desired shape that shares data with this array.
"""
if len(shape) == 1 and isinstance(shape[0], (list, tuple)):
shape = shape[0]
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayReshape64(a.handle,
len(shape),
c_array(ctypes.c_int64, shape),
False,
ctypes.byref(handle)))
return ndarray(handle=handle, writable=a.writable)
def _as_mx_np_array(object, ctx=None):
"""Convert object to mxnet.numpy.ndarray."""
if isinstance(object, _np.ndarray):
if not object.flags['C_CONTIGUOUS']:
object = _np.ascontiguousarray(object, dtype=object.dtype)
ret = from_numpy(object, array_cls=ndarray)
return ret if ctx is None else ret.as_in_ctx(ctx=ctx)
elif isinstance(object, (integer_types, numeric_types)):
return object
elif isinstance(object, (list, tuple)):
tmp = [_as_mx_np_array(arr) for arr in object]
return object.__class__(tmp)
elif isinstance(object, (_np.bool_, _np.bool)):
return array(object, dtype=_np.bool_, ctx=ctx)
else:
raise TypeError('Does not support converting {} to mx.np.ndarray.'.format(str(type(object))))
def _as_onp_array(object):
"""Convert object to mxnet.numpy.ndarray."""
cur_ctx = None
if isinstance(object, ndarray):
return object.asnumpy(), object.ctx
elif isinstance(object, (list, tuple)):
tmp = []
for arr in object:
arr, tmp_ctx = _as_onp_array(arr)
# if isinstance(arr, (list, tuple)):
# raise TypeError('type {} not supported'.format(str(type(arr))))
tmp.append(arr)
if cur_ctx is None:
cur_ctx = tmp_ctx
elif tmp_ctx is not None and cur_ctx != tmp_ctx:
raise ValueError('Ambiguous to set the context for the output ndarray since' # pylint: disable=too-few-format-args
' input ndarrays are allocated on different devices: {} and {}'
.format(str(cur_ctx, tmp_ctx)))
return object.__class__(tmp), cur_ctx
else:
return object, cur_ctx
# Have to use 0 as default value for stype since pylint does not allow
# importing _STORAGE_TYPE_DEFAULT from ndarray.py.
def _np_ndarray_cls(handle, writable=True, stype=0):
if stype == -1:
stype = _storage_type(handle)
if stype != 0:
raise ValueError('_np_ndarray_cls currently only supports default storage '
'type, while received stype = {}'.format(stype))
return ndarray(handle, writable=writable)
_set_np_ndarray_class(_np_ndarray_cls)
_NUMPY_ARRAY_FUNCTION_DICT = {}
_NUMPY_ARRAY_UFUNC_DICT = {}
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD = {}
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD = {}
def wrap_mxnp_np_ufunc(func):
"""
A convenience decorator for wrapping for python overload-able ops to provide type
casting for mixed use of mx_np and onp inputs.
Parameters
----------
func : a python overload-able binary function to be wrapped for type casting.
Returns
-------
Function
A function wrapped with type casted.
"""
@functools.wraps(func)
def _wrap_mxnp_np_ufunc(x1, x2):
if isinstance(x2, _np.ndarray):
x2 = _as_mx_np_array(x2, ctx=x1.ctx)
return func(x1, x2)
return _wrap_mxnp_np_ufunc
@set_module('mxnet.numpy') # pylint: disable=invalid-name
class ndarray(NDArray):
"""
ndarray(handle, writable=True):
An array object represents a multidimensional, homogeneous array of fixed-size items.
An associated data-type object describes the format of each element in the array
(its byte-order, how many bytes it occupies in memory, whether it is an integer, a
floating point number, or something else, etc.). Arrays should be constructed using
`array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `mxnet.numpy` module and examine the
methods and attributes of an array.
Parameters
----------
handle: int
The ndarray handle in backend (C++).
writable: bool
Indicates whether inplace-assignment is allowed for the array.
Attributes
----------
T : ndarray
Transpose of the array.
dtype : dtype object
Describes the format of the elements in the array.
size : int
Number of elements in the array.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
"""
@staticmethod
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray
to this function. The operators must comply with the ufunc definition in NumPy.
The following code is adapted from CuPy.
Casting rules for operator with mx_np and onp (inplace op will keep its type)
| Expression | a type | b type | out type|
| --- | --- | --- | --- |
| `a += b` | onp | mx_np | onp |
| `a += b` | mx_np | onp | mx_np |
| `c = a + b` | onp | mx_np | mx_np |
| `c = a + b` | mx_np | onp | mx_np |
"""
ufunc_list = ["add", "subtract", "multiply", "divide", "true_divide", "floor_divide", "power",
"remainder", "bitwise_and", "bitwise_or", "bitwise_xor", "left_shift", "right_shift",
"greater", "greater_equal", "less", "less_equal", "not_equal", "equal", "matmul"]
if 'out' in kwargs:
# need to unfold tuple argument in kwargs
out = kwargs['out']
if len(out) != 1:
raise ValueError('The `out` parameter must have exactly one ndarray')
kwargs['out'] = out[0]
if method == '__call__':
name = ufunc.__name__
mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)
onp_op = _get_np_op(name)
if mx_ufunc is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(name)
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
if onp_op not in _FALLBACK_ARRAY_UFUNC_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation", name)
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD[onp_op] = True
out = onp_op(*new_inputs, **kwargs)
return _as_mx_np_array(out, ctx=inputs[0].ctx)
# ops with np mx_np
elif name in ufunc_list and isinstance(inputs[0], _np.ndarray):
# inplace
if 'out' in kwargs:
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
return onp_op(*new_inputs, **kwargs)
else:
new_inputs = [_as_mx_np_array(arg, ctx=inputs[1].ctx)
if isinstance(arg, _np.ndarray) else arg for arg in inputs]
return mx_ufunc(*new_inputs, **kwargs)
else:
return mx_ufunc(*inputs, **kwargs)
else:
return NotImplemented
@staticmethod
def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy operators that comply with the array function protocol to
this function.
"""
mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)
func_name = func.__name__
if mx_np_func is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(func)
new_args, cur_ctx = _as_onp_array(args)
if cur_ctx is None:
raise ValueError('Unknown context for the input ndarrays. It is probably a bug. Please'
' create an issue on GitHub.')
new_kwargs = {}
for k, v in kwargs.items():
new_kwargs[k] = v.asnumpy() if isinstance(v, ndarray) else v
if func not in _FALLBACK_ARRAY_FUNCTION_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation.", func_name)
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD[func] = True
out = func(*new_args, **new_kwargs)
return _as_mx_np_array(out, ctx=cur_ctx)
else:
# Note: this allows subclasses that don't override
# __array_function__ to handle mxnet.numpy.ndarray objects
if not py_all(issubclass(t, ndarray) for t in types):
return NotImplemented
return mx_np_func(*args, **kwargs)
def _get_np_basic_indexing(self, key):
"""
This function indexes ``self`` with a tuple of `slice` objects only.
"""
key_nd = tuple(idx for idx in key if idx is not None)
if len(key_nd) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(key_nd))
)
if len(key_nd) > self.ndim:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(key_nd), self.ndim)
)
none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name
slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)
new_axes = self._new_axes_after_basic_indexing(none_axes, key)
# Check bounds for integer axes
for ax in int_axes: # pylint: disable=invalid-name
if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:
raise IndexError(
'index {} is out of bounds for axis {} with size {}'
''.format(key_nd[ax], ax, self.shape[ax]))
if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):
# Create a shared-memory view by using low-level flat slicing
flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(
slc_key, self.shape
)
handle = NDArrayHandle()
flat_self = self.reshape_view(-1)
check_call(
_LIB.MXNDArraySlice(
flat_self.handle,
mx_uint(flat_begin),
mx_uint(flat_end),
ctypes.byref(handle),
)
)
sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)
sliced = self.__class__(handle=handle, writable=self.writable)
if 0 in sliced_shape:
sliced = sliced.reshape(sliced_shape)
else:
sliced = sliced.reshape_view(sliced_shape)
else:
begin, end, step = self._basic_indexing_key_to_begin_end_step(
slc_key, self.shape, keep_none=True
)
sliced = _npi.slice(self, begin, end, step)
# Reshape to final shape due to integer and `None` entries in `key`.
final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
if sliced.size == 0:
return sliced.reshape(tuple(final_shape))
else:
return sliced.reshape_view(tuple(final_shape))
def _get_np_empty_tuple_indexing(self, key):
new_shape = []
num_none = 0
for i, idx in enumerate(key):
if idx is None:
new_shape.append(1) # expand dimension
num_none += 1
elif idx == ():
new_shape.append(0) # 0 shape
elif idx == slice(None, None, None):
new_shape.append(self.shape[i - num_none])
return empty(new_shape, dtype=self.dtype)
def _get_np_advanced_indexing(self, key):
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
sliced = _npi.gather_nd(self, idcs)
# Reshape due to `None` entries in `key`.
if new_axes:
final_shape = [sliced.shape[i] for i in range(sliced.ndim)]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
return sliced.reshape(tuple(final_shape))
else:
return sliced
def _set_np_advanced_indexing(self, key, value):
"""This function is called by __setitem__ when key is an advanced index."""
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)
value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)
self._scatter_set_nd(value_nd, idcs)
# pylint: disable=redefined-outer-name
def _get_np_boolean_indexing(self, key, ndim, shape):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single
boolean indexing for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced indexing.
"""
key_shape = key.shape
key_ndim = len(key_shape)
if ndim < key_ndim:
raise IndexError('too many indices, whose ndim = {}, for array with ndim = {}'
.format(key_ndim, ndim))
for i in range(key_ndim):
if key_shape[i] != shape[i]:
raise IndexError('boolean index did not match indexed array along dimension {};'
' dimension is {} but corresponding boolean dimension is {}'
.format(i, shape[i], key_shape[i]))
remaining_dims = shape[key_ndim:]
data = _reshape_view(self, -1, *remaining_dims)
key = _reshape_view(key, -1)
return _reshape_view(_npi.boolean_mask(data, key), -1, *remaining_dims)
def _set_np_boolean_indexing(self, key, value):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single boolean assign for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced assign.
"""
if isinstance(value, numeric_types):
_npi.boolean_mask_assign_scalar(data=self, mask=key,
value=int(value) if isinstance(value, bool) else value,
start_axis=0, out=self)
elif isinstance(value, ndarray):
_npi.boolean_mask_assign_tensor(data=self, mask=key, value=value, start_axis=0, out=self)
else:
raise NotImplementedError('type %s is not supported.'%(type(value)))
# pylint: disable=too-many-return-statements
def __getitem__(self, key):
"""Return self[key].
Returns a sliced view of this array if the elements fetched are contiguous in memory;
otherwise, returns a newly created NDArray.
This functions supports advanced indexing defined in the following reference with
some restrictions. Boolean indexing is supported only for a single boolean ndarray
as a key. Mixing boolean ndarray with other index types is not supported in ``advanced``
indexing.
For basic indexing, i.e., if ``key`` consists only of integers,
``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is
returned that shares memory with this array if the accessed portion is
contiguous in memory.
Otherwise, a newly created ``ndarray`` is returned.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
Indexing key.
Examples
--------
The default is to give explicit indices for all axes:
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> x[0, :2]
array([0., 1.])
>>> x[:, :-1]
array([[0., 1.],
[3., 4.]])
If fewer indices are given, they are automatically supplemented by an
appropriate number of ``slice(None)`` ("``:``") to the right. For
instance, a single integer indexes along the first axis:
>>> x[0]
array([0., 1., 2.])
>>> x[1:]
array([[3., 4., 5.]])
To omit a range of axes that should be kept as-is, an `Ellipsis`
("``...``") can be used:
>>> x = np.arange(16).reshape(2, 2, 2, 2)
>>> x[0, ..., 1]
array([[1., 3.],
[5., 7.]])
>>> x[0, :, :, 1] # equivalent
array([[1., 3.],
[5., 7.]])
New axes of length 1 can be created by inserting ``None``
(`numpy.newaxis`) in the index:
>>> x = np.arange(6).reshape(2, 3)
>>> x[None, :, :]
array([[[0., 1., 2.],
[3., 4., 5.]]])
>>> x[None, :, :].shape
(1, 2, 3)
If the indexed portion of the array is contiguous in memory, no data
is copied. Instead, a shared-memory view of the original array is
returned, and changes to that view affect the original array:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[0] # contiguous
>>> y
array([[0., 1.],
[2., 3.]])
>>> y[:] = -1
>>> x
array([[[-1., -1.],
[-1., -1.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[1, :1, :] # contiguous
>>> y
array([[4., 5.]])
>>> y[:] = -1
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[-1., -1.],
[ 6., 7.]]])
>>> x = np.arange(0, 8).reshape(2, 2, 2)
>>> y = x[:, :, 1] # not contiguous
>>> y
array([[1., 3.],
[5., 7.]])
>>> y[:] = -1
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
If the indexing key contains `list`, `numpy.ndarray` or `NDArray`
objects, advanced indexing is triggered, which always returns a
copy:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> x[[0, 1]]
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> x[[0, 1], :] # equivalent
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
Get negative elements in an ndarray through boolean array indexing
>>> x = np.array([1., -1., -2., 3])
>>> x[x < 0]
array([-1., -2.])
For more imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
ndim = self.ndim # pylint: disable=redefined-outer-name
shape = self.shape # pylint: disable=redefined-outer-name
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool, ctx=self.ctx)
if isinstance(key, list):
try:
new_key = _np.array(key)
if new_key.dtype == _np.bool_:
key = new_key
except Exception as err:
raise TypeError('{}'.format(str(err)))
if isinstance(key, _np.ndarray):
if dc.is_deferred_compute():
raise TypeError('Indexing with a numpy array is not supported in HybridBlock.')
if key.dtype == _np.bool_:
key = array(key, dtype='bool', ctx=self.ctx)
# Handle single boolean index of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced indexing.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool_:
return self._get_np_boolean_indexing(key, ndim, shape)
if ndim == 0 and key != ():
raise IndexError('scalar tensor can only accept `()` as index')
# Handle simple cases for higher speed
if isinstance(key, tuple) and len(key) == 0:
return self
if isinstance(key, tuple) and len(key) == ndim\
and py_all(isinstance(idx, integer_types) for idx in key):
out = self
for idx in key:
out = out[idx]
return out
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is None or key.step == 1:
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
elif key.step == 0:
raise ValueError("slice step cannot be zero")
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be prepended
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
indexing_dispatch_code = get_indexing_dispatch_code(key)
if indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
# won't be affected by zero-dim boolean indices
return self._get_np_empty_tuple_indexing(key)
elif indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_basic_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_basic_indexing(key)
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
if dc.is_deferred_compute():
raise TypeError('Advanced indexing is not supported in HybridBlock.')
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_adanced_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_advanced_indexing(key)
else:
raise RuntimeError
# pylint: disable=inconsistent-return-statements
def __setitem__(self, key, value):
"""Sets ``self[key]`` to ``value``.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_,
with the restriction that boolean array indexing is not supported.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
The indexing key.
value : scalar or array-like object that can be broadcast to the shape of self[key]
The value to set.
Examples
--------
>>> x = np.zeros((2, 3))
>>> x[:] = 1
>>> x
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> x[:, 1:2] = 2
>>> x
array([[ 1., 2., 1.],
[ 1., 2., 1.]])
>>> x[1:2, 1:] = 3
>>> x
array([[ 1., 2., 1.],
[ 1., 3., 3.]])
>>> x[1:, 0:2] = np.zeros((1, 2))
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 3.]])
>>> x[1, 2] = 4
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 4.]])
>>> x[[0], [1, 2]] = 5
>>> x
array([[ 1., 5., 5.],
[ 0., 0., 4.]])
>>> x[::-1, 0:2:2] = [6]
>>> x
array([[ 6., 5., 5.],
[ 6., 0., 4.]])
For imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
if isinstance(value, NDArray) and not isinstance(value, ndarray):
raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool)
# Handle single boolean assign of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced assign.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool:
return self._set_np_boolean_indexing(key, value)
# handle basic and advanced indexing
if self.ndim == 0:
if not isinstance(key, tuple) or len(key) != 0:
raise IndexError('scalar tensor can only accept `()` as index')
if isinstance(value, numeric_types):
self._full(value)
elif isinstance(value, ndarray) and value.size == 1:
if value.shape != self.shape:
value = value.reshape(self.shape)
value.copyto(self)
elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:
if isinstance(value, _np.generic) or value.shape != self.shape:
value = value.reshape(self.shape)
self._sync_copyfrom(value)
else:
raise ValueError('setting an array element with a sequence.')
else:
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend == _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be expanded
# prepend actually has no influence on __setitem__
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return # no action is needed
slc_key = tuple(idx for idx in key if idx is not None)
if len(slc_key) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(slc_key))
)
if len(slc_key) > self.ndim and self.ndim != 0:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(slc_key), self.ndim)
)
indexing_dispatch_code = get_indexing_dispatch_code(slc_key)
if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class
elif indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
pass # no action needed
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
self._set_np_advanced_indexing(key, value)
else:
raise ValueError(
'Indexing NDArray with index {} of type {} is not supported'
''.format(key, type(key))
)
def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):
"""Return a broadcast `ndarray` with same context and dtype as ``self``.
For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the
value_nd is assigned to not yet expanded space in original array.
`value`: numeric types or array like.
`bcast_shape`: a shape tuple.
`squeeze_axes`: a sequence of axes to squeeze in the value array.
Note: mxnet.numpy.ndarray not support NDArray as assigned value.
"""
if isinstance(value, numeric_types):
value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype)
elif isinstance(value, self.__class__):
value_nd = value.as_in_ctx(self.ctx)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.ctx, dtype=self.dtype)
except:
raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '
'object {} of type {}'.format(value, type(value)))
# For advanced indexing setitem, if there is None in indices, we need to squeeze the
# assigned value_nd since None is also ignored in slicing the original array.
if squeeze_axes and value_nd.ndim > len(bcast_shape):
squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])
value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))
# handle the cases like the following
# a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b
# b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed
if value_nd.ndim > len(bcast_shape):
squeeze_axes = []
for i in range(value_nd.ndim - len(bcast_shape)):
if value_nd.shape[i] == 1:
squeeze_axes.append(i)
else:
break
if squeeze_axes:
value_nd = value_nd.squeeze(squeeze_axes)
if value_nd.shape != bcast_shape:
if value_nd.size == 0:
value_nd = value_nd.reshape(bcast_shape)
else:
value_nd = value_nd.broadcast_to(bcast_shape)
return value_nd
@wrap_mxnp_np_ufunc
def __add__(self, other):
"""x.__add__(y) <=> x + y"""
return add(self, other)
@wrap_mxnp_np_ufunc
def __iadd__(self, other):
"""x.__iadd__(y) <=> x += y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return add(self, other, out=self)
def __invert__(self):
"""x.__invert__() <=> ~x"""
return invert(self)
@wrap_mxnp_np_ufunc
def __and__(self, other):
"""x.__and__(y) <=> x & y"""
return bitwise_and(self, other)
@wrap_mxnp_np_ufunc
def __or__(self, other):
"""x.__or__(y) <=> x | y"""
return bitwise_or(self, other)
@wrap_mxnp_np_ufunc
def __xor__(self, other):
"""x.__xor__(y) <=> x ^ y"""
return bitwise_xor(self, other)
@wrap_mxnp_np_ufunc
def __iand__(self, other):
"""x.__iand__(y) <=> x &= y"""
return bitwise_and(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ior__(self, other):
"""x.__ior__(y) <=> x |= y"""
return bitwise_or(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ixor__(self, other):
"""x.__ixor__(y) <=> x ^= y"""
return bitwise_xor(self, other, out=self)
def __round__(self, n=0):
"""x.__round__(n)"""
return round(self, decimals=n)
def __abs__(self):
"""x.__abs__()"""
return absolute(self)
def __ceil__(self):
"""x.__ceil__()"""
return ceil(self)
def __floor__(self):
"""x.__floor__()"""
return floor(self)
def __trunc__(self):
"""x.__trunc__()"""
return trunc(self)
@wrap_mxnp_np_ufunc
def __sub__(self, other):
"""x.__sub__(y) <=> x - y"""
return subtract(self, other)
@wrap_mxnp_np_ufunc
def __isub__(self, other):
"""x.__isub__(y) <=> x -= y"""
if not self.writable:
raise ValueError('trying to subtract from a readonly ndarray')
return subtract(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rsub__(self, other):
"""x.__rsub__(y) <=> y - x"""
return subtract(other, self)
@wrap_mxnp_np_ufunc
def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)
def __neg__(self):
return negative(self)
@wrap_mxnp_np_ufunc
def __imul__(self, other):
"""x.__imul__(y) <=> x *= y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return multiply(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rmul__(self, other):
"""x.__rmul__(y) <=> y * x"""
return self.__mul__(other)
@wrap_mxnp_np_ufunc
def __div__(self, other):
"""x.__div__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __idiv__(self, other):
"""x.__idiv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __truediv__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rtruediv__(self, other):
"""x.__rtruediv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __itruediv__(self, other):
"""x.__itruediv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __mod__(self, other):
"""x.__mod__(y) <=> x % y"""
return mod(self, other)
@wrap_mxnp_np_ufunc
def __rmod__(self, other):
"""x.__rmod__(y) <=> y % x"""
return mod(other, self)
@wrap_mxnp_np_ufunc
def __imod__(self, other):
"""x.__imod__(y) <=> x %= y"""
return mod(self, other, out=self)
@wrap_mxnp_np_ufunc
def __pow__(self, other):
"""x.__pow__(y) <=> x ** y"""
return power(self, other)
@wrap_mxnp_np_ufunc
def __rpow__(self, other):
"""x.__rpow__(y) <=> y ** x"""
return power(other, self)
@wrap_mxnp_np_ufunc
def __eq__(self, other):
"""x.__eq__(y) <=> x == y"""
return equal(self, other)
def __hash__(self):
raise NotImplementedError
@wrap_mxnp_np_ufunc
def __ne__(self, other):
"""x.__ne__(y) <=> x != y"""
return not_equal(self, other)
@wrap_mxnp_np_ufunc
def __gt__(self, other):
"""x.__gt__(y) <=> x > y"""
return greater(self, other)
@wrap_mxnp_np_ufunc
def __ge__(self, other):
"""x.__ge__(y) <=> x >= y"""
return greater_equal(self, other)
@wrap_mxnp_np_ufunc
def __lt__(self, other):
"""x.__lt__(y) <=> x < y"""
return less(self, other)
@wrap_mxnp_np_ufunc
def __le__(self, other):
"""x.__le__(y) <=> x <= y"""
return less_equal(self, other)
@wrap_mxnp_np_ufunc
def __matmul__(self, other):
"""x.__matmul__(y) <=> x @ y"""
return matmul(self, other)
@wrap_mxnp_np_ufunc
def __rmatmul__(self, other):
"""x.__rmatmul__(y) <=> y @ x"""
return matmul(other, self)
@wrap_mxnp_np_ufunc
def __imatmul__(self, other):
"""x.__imatmul__(y) <=> x @= y"""
return matmul(self, other, out=self)
def __bool__(self):
num_elements = self.size
if num_elements == 0:
warnings.simplefilter('default')
warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'
' future this will result in an error.', DeprecationWarning)
return False
elif num_elements == 1:
return bool(self.item())
else:
raise ValueError("The truth value of an ndarray with multiple elements is ambiguous.")
__nonzero__ = __bool__
def __float__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return float(self.item())
def __int__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return int(self.item())
def __len__(self):
"""Number of elements along the first axis."""
shape = self.shape # pylint: disable=redefined-outer-name
if len(shape) == 0:
raise TypeError('len() of unsized object')
return self.shape[0]
def __reduce__(self):
return ndarray, (None,), self.__getstate__()
def item(self, *args):
"""Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
*args : Arguments (variable number and type)
none: in this case, the method only works for arrays with one element (a.size == 1),
which element is copied into a standard Python scalar object and returned.
int_type: this argument is interpreted as a flat index into the array, specifying which
element to copy and return.
tuple of int_types: functions as does a single int_type argument, except that the
argument is interpreted as an nd-index into the array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable Python scalar.
"""
# TODO(junwu): no need to call asnumpy() on the whole array.
return self.asnumpy().item(*args)
def nonzero(self):
"""Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""
return nonzero(self)
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Same as self.transpose(). This always returns a copy of self."""
return self.transpose()
# pylint: enable= invalid-name, undefined-variable
def all(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.all(self, axis=axis, out=out, keepdims=keepdims)
def any(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.any(self, axis=axis, out=out, keepdims=keepdims)
def as_nd_ndarray(self):
"""Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods."""
hdl = NDArrayHandle()
check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(handle=hdl, writable=self.writable)
def as_np_ndarray(self):
"""A convenience function for creating a numpy ndarray from the current ndarray
with zero copy. For this class, it just returns itself since it's already a
numpy ndarray."""
return self
def __repr__(self):
"""
Returns a string representation of the array.
The dtype of the ndarray will be appended if it's inconsistent with current dtype.
The context of the ndarray will be appended for devices other than CPU.
Examples
--------
>>> from mxnet import np, npx
>>> a = np.random.uniform(size=(2, 3))
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> print(a)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]]
>>> a.dtype
dtype('float32')
>>> npx.set_np_float64()
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], dtype=float32)
>>> npx.set_np_float64(default_float64=False)
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> b = a.astype(np.float64)
>>> b
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64)
>>> print(b)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]]
>>> b.dtype
dtype('float64')
>>> c = a.copyto(npx.gpu(0))
>>> c
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], ctx=gpu(0))
>>> print(c)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]] @gpu(0)
>>> d = b.copyto(npx.gpu(0))
>>> d
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64, ctx=gpu(0))
>>> print(d)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]] @gpu(0)
"""
array_str = self.asnumpy().__repr__()
dtype = self.dtype
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
if 'dtype=' in array_str:
if dtype == default_dtype:
array_str = array_str[:array_str.rindex(',')] + ')'
elif dtype not in (default_dtype, _np.bool_):
array_str = array_str[:-1] + ', dtype={})'.format(dtype)
context = self.ctx
if context.device_type == 'cpu':
return array_str
return array_str[:-1] + ', ctx={})'.format(str(context))
def __str__(self):
"""Returns a string representation of the array."""
array_str = self.asnumpy().__str__()
context = self.ctx
if context.device_type == 'cpu' or self.ndim == 0:
return array_str
return '{array} @{ctx}'.format(array=array_str, ctx=context)
def __format__(self, fmt):
"""Return value.__format__(format_spec). Overwrite to include 0-d array"""
if self.ndim == 0:
return self.item().__format__(fmt)
elif len(fmt) == 0:
return self.__str__().__format__(fmt)
else:
raise TypeError("Cannot format mxnet.numpy.ndarray with format_spec")
def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ
"""Attach a gradient buffer to this ndarray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
"""
grad = _mx_nd_np.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle)))
@property
def grad(self):
"""Returns gradient buffer attached to this ndarray."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _np_ndarray_cls(hdl)
def detach(self):
"""Returns a new ndarray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _np_ndarray_cls(hdl)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument, too-many-arguments
"""
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array with `dtype`.
Notes
-----
This function differs from the official `ndarray`'s ``astype`` function in the following
aspects:
- `order` only supports 'C' and 'K'.
- `casting` only supports 'unsafe'.
- `subok` only supports ``True``.
"""
if order is not None and order != 'K' and order != 'C':
raise ValueError('order must be either \'K\' or \'C\'')
if casting != 'unsafe':
raise ValueError('casting must be equal to \'unsafe\'')
if not subok:
raise ValueError('subok must be equal to True')
if dtype is None:
dtype = _np.float32
if not copy and _np.dtype(dtype) == self.dtype:
return self
return _npi.cast(self, dtype=dtype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``ndarray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``np.ndarray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : ndarray or Context
The destination array or context.
Returns
-------
out: ndarray
The copied array. If ``other`` is an ``ndarray``, then the return value
and ``other`` will point to the same ``ndarray``.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = np.zeros((2, 3), ctx=npx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if isinstance(other, ndarray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _npi.copyto(self, out=other)
elif isinstance(other, Context):
hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _npi.copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscalar(self):
raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')
def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the maximum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmax(self, axis, out)
def as_in_context(self, context):
"""This function has been deprecated. Please refer to ``ndarray.as_in_ctx``."""
warnings.warn('ndarray.as_in_context has been renamed to'
' ndarray.as_in_ctx', DeprecationWarning)
return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()
def as_in_ctx(self, ctx):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
ndarray
The target array.
"""
if self.ctx == ctx:
return self
return self.copyto(ctx)
@property
def ctx(self):
"""Device context of the array.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.ctx
cpu(0)
>>> type(x.ctx)
<class 'mxnet.context.Context'>
>>> y = np.zeros((2, 3), npx.gpu(0))
>>> y.ctx
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def context(self):
"""This function has been deprecated. Please refer to ``ndarray.ctx``."""
warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)
return self.as_nd_ndarray().context
def copy(self, order='C'): # pylint: disable=arguments-differ
"""Return a coyp of the array, keeping the same context.
Parameters
----------
order : str
The memory layout of the copy. Currently, only c-contiguous memory
layout is supported.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = x.copy()
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if order != 'C':
raise NotImplementedError('ndarray.copy only supports order=\'C\', while '
'received {}'.format(str(order)))
return self.copyto(self.ctx)
def dot(self, b, out=None):
"""Dot product of two arrays.
Refer to ``numpy.dot`` for full documentation."""
return _mx_np_op.dot(self, b, out=out)
def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Returns a copy of the array with a new shape.
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""
order = 'C'
if len(kwargs) > 1:
raise TypeError('function takes at most 1 keyword argument')
if len(kwargs) == 1:
if 'order' not in kwargs:
raise TypeError("'{}' is an invalid keyword argument for this function"
.format(list(kwargs.keys())[0]))
order = kwargs.pop('order', 'C')
if order != 'C':
raise NotImplementedError('only supports C-order,'
' while received {}'.format(order))
if len(args) == 0:
raise TypeError('reshape() takes exactly 1 argument (0 given)')
if len(args) == 1 and isinstance(args[0], tuple):
return _mx_np_op.reshape(self, newshape=args[0], order=order)
else:
return _mx_np_op.reshape(self, newshape=args, order=order)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')
def reshape_view(self, *shape, **kwargs): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Inheritated from NDArray.reshape.
"""
return super(ndarray, self).reshape(*shape, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ
"""Repeat elements of an array."""
return repeat(self, repeats=repeats, axis=axis)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')
def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ
"""Return a copy of the array with axis1 and axis2 interchanged.
Refer to `mxnet.numpy.swapaxes` for full documentation.
"""
return swapaxes(self, axis1, axis2)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split')
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')
def slice_assign_scalar(self, value, begin, end, step):
"""
Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape
and will be cast to the same dtype of the ndarray.
Parameters
----------
value: numeric value
Value and this ndarray should be of the same data type.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)
def slice_assign(self, rhs, begin, end, step):
"""
Assign the rhs to a cropped subset of this ndarray in place.
Returns the view of this ndarray.
Parameters
----------
rhs: ndarray.
rhs and this NDArray should be of the same data type, and on the same device.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
out : ndarray
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> assigned = np.zeros((1, 1, 2))
>>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)
def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return take(self, indices, axis, mode=mode)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')
def sort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
raise sort(self, axis=axis, kind=kind, order=order)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')
def argsort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return argsort(self, axis=axis, kind=kind, order=order)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')
def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the minium values along the given axis.
Refer to `mxnet.numpy.argmin` for full documentation."""
return argmin(self, axis, out)
def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ
"""Return an array whose values are limited to [min, max].
One of max or min must be given.
"""
return clip(self, min, max, out=out)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')
def flatten(self, order='C'): # pylint: disable=arguments-differ
"""Return a copy of the array collapsed into one dimension."""
return self.reshape(-1, order=order)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')
def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')
def tile(self, reps): # pylint: disable=arguments-differ
"""Construct an array by repeating A the number of times given by reps.
Refer to `mxnet.numpy.tile` for full documentation."""
return tile(self, reps=reps)
def transpose(self, *axes): # pylint: disable=arguments-differ
"""Permute the dimensions of an array."""
if len(axes) == 0:
axes = None
elif len(axes) == 1:
if isinstance(axes[0], (tuple, list)):
axes = axes[0]
elif axes[0] is None:
axes = None
return transpose(self, axes=axes)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')
def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the sum of the array elements over the given axis."""
return sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')
def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the product of the array elements over the given axis."""
return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')
def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Returns the average of the array elements along given axis."""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# pylint: disable=too-many-arguments, arguments-differ
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the standard deviation of the array elements along given axis."""
return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the variance of the array elements, along given axis."""
return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
# pylint: enable=too-many-arguments, arguments-differ
def cumsum(self, axis=None, dtype=None, out=None):
"""Return the cumulative sum of the elements along the given axis."""
return _mx_nd_np.cumsum(self, axis=axis, dtype=dtype, out=out)
def tolist(self):
return self.asnumpy().tolist()
def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the maximum along a given axis."""
return _mx_nd_np.max(self, axis=axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return _mx_nd_np.min(self, axis=axis, out=out, keepdims=keepdims)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')
def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return round(self, decimals=decimals, out=out, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log')
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute square')
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')
def squeeze(self, axis=None): # pylint: disable=arguments-differ
"""Remove single-dimensional entries from the shape of a."""
return squeeze(self, axis=axis)
def broadcast_to(self, shape): # pylint: disable=redefined-outer-name
return _mx_nd_np.broadcast_to(self, shape)
def broadcast_like(self, other):
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def _full(self, value):
"""
Currently for internal use only. Implemented for __setitem__.
Assign to self an array of self's same shape and type, filled with value.
"""
return _mx_nd_np.full(self.shape, value, ctx=self.ctx, dtype=self.dtype, out=self)
# pylint: disable=redefined-outer-name
def _scatter_set_nd(self, value_nd, indices):
"""
This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing
"""
return _npi.scatter_set_nd(
lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
)
# pylint: enable=redefined-outer-name
@property
def shape(self):
return super(ndarray, self).shape
@property
def ndim(self):
"""Number of array dimensions."""
return len(self.shape)
@property
def size(self):
"""Number of elements in the array."""
return super(ndarray, self).size
@property
def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = np.zeros((2,3))
>>> x.dtype
dtype('float32')
>>> y = np.zeros((2,3), dtype='int32')
>>> y.dtype
dtype('int32')
"""
return _np.dtype(super(ndarray, self).dtype)
def tostype(self, stype):
raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')
@set_module('mxnet.numpy')
def empty(shape, dtype=float, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`.
Note that this behavior is different from NumPy's `empty` function where `float64`
is the default value, here you can set your default dtype as 'float32' or 'float64'
because `float32` is considered as the default data type in deep learning.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and order.
Examples
--------
>>> np.empty([2, 2])
array([[ 0.000000e+00, -2.524355e-29],
[ nan, -8.592023e+09]]) # uninitialized
>>> np.empty([2, 2], dtype=int)
array([[8751743591039004782, 3196766424264760104],
[7583328881310196768, 562950123910254]], dtype=int64) # uninitialized
"""
if order != 'C':
raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'
.format(str(order)))
if ctx is None:
ctx = current_context()
if dtype is None or dtype is float:
dtype = _np.float64 if is_np_default_dtype() else _np.float32
if isinstance(shape, int):
shape = (shape,)
return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype))
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def array(object, dtype=None, ctx=None):
"""
Create an array.
Parameters
----------
object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array.
The default dtype is ``object.dtype`` if `object` is an `ndarray`, `float32` otherwise.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
- When npx.is_np_default_dtype() returns False, default dtype is float32;
- When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
Examples
--------
>>> np.array([1, 2, 3])
array([1., 2., 3.])
>>> np.array([[1, 2], [3, 4]])
array([[1., 2.],
[3., 4.]])
>>> np.array([[1, 0], [0, 1]], dtype=bool)
array([[ True, False],
[False, True]])
>>> np.array([1, 2, 3]).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.array([1, 2, 3]).dtype
dtype('float64')
"""
if ctx is None:
ctx = current_context()
if isinstance(object, _np.ndarray):
if is_np_default_dtype():
dtype = object.dtype if dtype is None else dtype
else:
dtype = _np.float32 if dtype is None or object.dtype is _np.float64 else dtype
if isinstance(object, ndarray):
dtype = object.dtype if dtype is None else dtype
elif isinstance(object, NDArray):
raise ValueError("If you're trying to create a mxnet.numpy.ndarray "
"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.")
else:
if dtype is None:
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
dtype = object.dtype if hasattr(object, "dtype") else default_dtype
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
# printing out the error raised by official NumPy's array function
# for transparency on users' side
raise TypeError('{}'.format(str(e)))
ret = empty(object.shape, dtype=dtype, ctx=ctx)
if len(object.shape) == 0:
ret[()] = object
else:
ret[:] = object
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return _mx_nd_np.shape(a)
@set_module('mxnet.numpy')
def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `zeros` function where `float64`
is the default value, here we can set 'float32' or 'float64' as your default dtype,
because `float32` is considered as the default data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
Examples
--------
>>> np.zeros(5)
array([0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0], dtype=int64)
>>> np.zeros((2, 1))
array([[0.],
[0.]])
"""
return _mx_nd_np.zeros(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is depend on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `ones` function where
`float64` is the default value.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1], dtype=int64)
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
return _mx_nd_np.ones(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
return _mx_nd_np.broadcast_to(array, shape)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Has an additional `ctx` argument to specify the device
- Has an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
return _mx_nd_np.full(shape, fill_value, order=order, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
return _mx_nd_np.empty_like(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
all : ndarray, bool
A new boolean or array is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _mx_nd_np.all(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless axis is not None
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
any : bool or ndarray
A new boolean or ndarray is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _mx_nd_np.any(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
return _mx_nd_np.identity(n, dtype, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
return _mx_nd_np.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
- Only support ndarray as input.
- Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
return _mx_nd_np.unique(ar, return_index, return_inverse, return_counts, axis)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>>
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
"""
return _mx_nd_np.add(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
"""
Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
"""
return _mx_nd_np.subtract(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
"""
return _mx_nd_np.multiply(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""
Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 or
float64 type, which depends on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Examples
--------
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.divide(x1, x2, out=out)
@set_module('mxnet.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 or
float64 type, which depends on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.true_divide(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.mod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.mod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.fmod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None, **kwargs):
"""
Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m):
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _mx_nd_np.matmul(a, b, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.remainder(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.remainder(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = np.arange(6)
>>> np.power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1., 2., 3., 3., 2., 1.],
[1., 2., 3., 3., 2., 1.]])
>>> np.power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
"""
return _mx_nd_np.power(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _mx_nd_np.lcm(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _mx_nd_np.sin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.cos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.sinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _mx_nd_np.cosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
----------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.tanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _mx_nd_np.log10(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _mx_nd_np.sqrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _mx_nd_np.cbrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _mx_nd_np.abs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fabs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs(np.array([-1.2, 1.2]))s
array([ 1.2, 1.2])
"""
return _mx_nd_np.fabs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _mx_nd_np.absolute(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _mx_nd_np.exp(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` for all elements in the array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _mx_nd_np.expm1(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _mx_nd_np.arcsin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _mx_nd_np.arccos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _mx_nd_np.arctan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
Scalars as input:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _mx_nd_np.sign(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using the default float32 dtype leads to slightly different behavior
>>> a = np.array([1, np.exp(1), np.exp(2), 0])
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _mx_nd_np.log(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _mx_nd_np.rint(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _mx_nd_np.log2(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _mx_nd_np.log1p(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _mx_nd_np.degrees(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x * 180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _mx_nd_np.rad2deg(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _mx_nd_np.radians(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _mx_nd_np.deg2rad(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _mx_nd_np.reciprocal(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _mx_nd_np.square(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
If provided, it must have a shape that the inputs broadcast to.
If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length
equal to the number of outputs.
Returns:
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples
--------
>>> np.negative(1)
-1
"""
return _mx_nd_np.negative(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _mx_nd_np.fix(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray or none, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples
---------
>>> np.tan(np.array([-np.pi, np.pi/2, np.pi]))
array([-8.7422777e-08, -2.2877332e+07, 8.7422777e-08])
"""
return _mx_nd_np.tan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _mx_nd_np.ceil(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The ceil of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _mx_nd_np.floor(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _mx_nd_np.trunc(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _mx_nd_np.logical_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _mx_nd_np.arcsinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _mx_nd_np.arccosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(1)
0.0
"""
return _mx_nd_np.arctanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
return _mx_nd_np.argsort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
return _mx_nd_np.sort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
return _mx_nd_np.tensordot(a, b, axes)
@set_module('mxnet.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or ndarray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
Examples
--------
>>> np.histogram(np.arange(4), bins=np.arange(5))
[array([1, 1, 1, 1], dtype=int64), array([0., 1., 2., 3., 4.])]
"""
return _mx_nd_np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def eye(N, M=None, k=0, dtype=float, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]], dtype=int64)
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return _mx_nd_np.eye(N, M, k, dtype, **kwargs)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None):
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
return _mx_nd_np.logspace(start, stop, num, endpoint, base, dtype, axis, ctx=ctx)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1., 2.]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
>>> y
array([[1.],
[2.]])
>>> y.shape
(2, 1)
Note that some examples may use None instead of np.newaxis. These are the same objects:
>>> np.newaxis is None
True
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.tile(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _mx_nd_np.tile(A, reps)
@set_module('mxnet.numpy')
def trace(a, offset=0, axis1=0, axis2=1, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : ndarray
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
out : ndarray, optional
Array into which the output is placed. It must be of the right shape
and right type to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
Examples
--------
>>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> np.trace(a)
array(3.)
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.trace(a)
array([6., 8.])
>>> a = np.arange(24).reshape((2, 2, 2, 3))
>>> np.trace(a).shape
(2, 3)
"""
return _mx_nd_np.trace(a, offset, axis1, axis2, out)
@set_module('mxnet.numpy')
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : ndarray
Input array.
axes : list of ints, optional
By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns
-------
p : ndarray
a with its axes permuted.
Notes
-----
This function differs from the original `numpy.transpose
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables are not supported
- the operator always returns an `ndarray` that does not share the memory with the input
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0., 1.],
[2., 3.]])
>>> np.transpose(x)
array([[0., 2.],
[1., 3.]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _mx_nd_np.transpose(a, axes)
@set_module('mxnet.numpy')
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int
The number of repetitions for each element.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _mx_nd_np.repeat(a, repeats, axis)
@set_module('mxnet.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _mx_nd_np.tril(m, k)
@set_module('mxnet.numpy')
def tri(N, M=None, k=0, dtype=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
return _mx_nd_np.tri(N, M, k, dtype, ctx)
@set_module('mxnet.numpy')
def triu_indices(n, k=0, m=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return _mx_nd_np.triu_indices(n, k, m, ctx)
@set_module('mxnet.numpy')
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
"""
return _mx_nd_np.triu_indices_from(arr, k)
@set_module('mxnet.numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
if m is None:
m = n
return tuple(_mx_nd_np.tril_indices(n, k, m))
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def triu(m, k=0):
r"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
return _mx_nd_np.triu(m, k)
@set_module('mxnet.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
- When npx.is_np_default_dtype() returns False, default dtype is float32;
- When npx.is_np_default_dtype() returns True, default dtype is int64.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
Examples
--------
>>> np.arange(3)
array([0., 1., 2.])
>>> np.arange(3.0)
array([0., 1., 2.])
>>> np.arange(3,7)
array([3., 4., 5., 6.])
>>> np.arange(3,7,2)
array([3., 5.])
>>> np.arange(3).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.arange(3).dtype
dtype('int64')
"""
return _mx_nd_np.arange(start, stop, step, dtype, ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
"""
return _mx_nd_np.split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
return _mx_nd_np.array_split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.vsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.vsplit.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> # With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
return _mx_nd_np.vsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def dsplit(ary, indices_or_sections):
r"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.dsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.dsplit.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 2,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
return _mx_nd_np.dsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def concatenate(seq, axis=0, out=None):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
"""
return _mx_nd_np.concatenate(seq, axis=axis, out=out)
@set_module('mxnet.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _mx_nd_np.append(arr, values, axis=axis)
@set_module('mxnet.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.rand(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> np.stack((a, b), axis=-1)
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.stack(arrays, axis=axis, out=out)
@set_module('mxnet.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.vstack(arrays)
@set_module('mxnet.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.row_stack(arrays)
@set_module('mxnet.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.column_stack(tup)
@set_module('mxnet.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.hstack(arrays)
@set_module('mxnet.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.maximum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.maximum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmax(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.fmax(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.fmax(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.minimum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.minimum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.minimum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The fmin of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmin(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.fmin(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.fmin(x1, x2, out=out)
@set_module('mxnet.numpy')
def max(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.max(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def min(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.min(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1.],
[2.],
[3.]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.swapaxes(x,0,2)
array([[[0., 4.],
[2., 6.]],
[[1., 5.],
[3., 7.]]])
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
array_like `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.])
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
from numbers import Number
if isinstance(a, Number):
# In case input is a scalar, the computation would fall back to native numpy.
# The value returned would be a python scalar.
return _np.clip(a, a_min, a_max, out=None)
return _mx_nd_np.clip(a, a_min, a_max, out=out)
@set_module('mxnet.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _mx_nd_np.argmax(a, axis, out)
@set_module('mxnet.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _mx_nd_np.argmin(a, axis, out)
@set_module('mxnet.numpy')
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.amax(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.amin(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 or float64 returned dtype:
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
return _mx_nd_np.average(a, axis=axis, weights=weights, returned=returned, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean.
For integer inputs, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
For floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32 or float64, which depends on your current default dtype
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55, dtype=float64)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _mx_nd_np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
return _mx_nd_np.delete(arr, obj, axis=axis)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance.
For arrays of integer type, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
For arrays of float types it is the same as the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _mx_nd_np.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def indices(dimensions, dtype=None, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `int64`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _mx_nd_np.copysign(x1, x2, out=out)
@set_module('mxnet.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
return _mx_nd_np.ravel(x, order)
@set_module('mxnet.numpy')
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
order : Only row-major is supported currently.
Returns:
-------------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
[[3. 6. 6.]
[4. 5. 1.]]
>>> np.unravel_index(1621, (6,7,8,9))
[3, 1, 4, 1]
"""
return _mx_nd_np.unravel_index(indices, shape, order=order)
@set_module('mxnet.numpy')
def flatnonzero(a):
r"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return _mx_nd_np.flatnonzero(a)
@set_module('mxnet.numpy')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return _mx_nd_np.diag_indices_from(arr)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hanning(M, dtype=None, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hanning(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hamming(M, dtype=None, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hamming(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def blackman(M, dtype=None, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
return _mx_nd_np.flip(m, axis, out=out)
@set_module('mxnet.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _mx_nd_np.around(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round_(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round_(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _mx_nd_np.arctan2(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _mx_nd_np.hypot(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([26, 5], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.bitwise_and(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype=np.int32), 5)
array([26, 6], dtype=int32)
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5], dtype=int32)
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.bitwise_xor(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype=np.int32), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _mx_nd_np.bitwise_or(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _mx_nd_np.ldexp(x1, x2, out)
@set_module('mxnet.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
array(30.)
>>> np.vdot(b, a)
array(30.)
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.numpy')
def inner(a, b):
r"""Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
array(2.)
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14., 38., 62.],
[ 86., 110., 134.]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.numpy')
def outer(a, b):
r"""Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.numpy')
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : ndarray
Components of the first vector(s).
b : ndarray
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
Notes
-----
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = np.array([1., 2., 3.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([-3., 6., -3.])
One vector with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Equivalently:
>>> x = np.array([1., 2., 0.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Both vectors with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5.])
>>> np.cross(x, y)
array(-3.)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.]])
>>> y = np.array([[4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[-3., 6., -3.],
[ 3., -6., 3.]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3., 3.],
[ 6., -6.],
[-3., 3.]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> y = np.array([[7., 8., 9.], [4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[ -6., 12., -6.],
[ 0., 0., 0.],
[ 6., -12., 6.]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24., 48., -24.],
[-30., 60., -30.],
[-36., 72., -36.]])
"""
return _mx_nd_np.cross(a, b, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
@set_module('mxnet.numpy')
def kron(a, b):
r"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : ndarray
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
"""
return _mx_nd_np.kron(a, b)
@set_module('mxnet.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _mx_nd_np.equal(x1, x2, out)
@set_module('mxnet.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _mx_nd_np.not_equal(x1, x2, out)
@set_module('mxnet.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _mx_nd_np.greater(x1, x2, out)
@set_module('mxnet.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _mx_nd_np.less(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_and(x1, x2, out=None):
r"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical AND is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_or, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.logical_and(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_or(x1, x2, out=None):
r"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([True, True])
"""
return _mx_nd_np.logical_or(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_xor(x1, x2, out=None):
r"""
Compute the truth value of x1 XOR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_or, bitwise_or
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.logical_xor(x1, x2, out)
@set_module('mxnet.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.greater_equal(x1, x2, out)
@set_module('mxnet.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.less_equal(x1, x2, out)
@set_module('mxnet.numpy')
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : ndarray
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
Notes
-----
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> np.roll(x, -2)
array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.roll(x2, 1)
array([[9., 0., 1., 2., 3.],
[4., 5., 6., 7., 8.]])
>>> np.roll(x2, -1)
array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 0.]])
>>> np.roll(x2, 1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, -1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, 1, axis=1)
array([[4., 0., 1., 2., 3.],
[9., 5., 6., 7., 8.]])
>>> np.roll(x2, -1, axis=1)
array([[1., 2., 3., 4., 0.],
[6., 7., 8., 9., 5.]])
"""
return _mx_nd_np.roll(a, shift, axis=axis)
@set_module('mxnet.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _mx_nd_np.rot90(m, k=k, axes=axes)
@set_module('mxnet.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
return _mx_nd_np.hsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
return _mx_nd_np.einsum(*operands, **kwargs)
@set_module('mxnet.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
return _mx_nd_np.insert(arr, obj, values, axis=axis)
@set_module('mxnet.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
return _mx_nd_np.nonzero(a)
@set_module('mxnet.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array
q : array_like
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.percentile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def median(a, axis=None, out=None, overwrite_input=None, keepdims=False):
r"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float32``, then the output data-type is
``np.float32``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([7., 2.])
"""
return _mx_nd_np.median(a, axis=axis, overwrite_input=overwrite_input,
keepdims=keepdims, out=out)
@set_module('mxnet.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be ndarray type even if it is a scalar
- do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _mx_nd_np.shares_memory(a, b, max_work)
@set_module('mxnet.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _mx_nd_np.may_share_memory(a, b, max_work)
@set_module('mxnet.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
This is the same as the type of a in most cases.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _mx_nd_np.diff(a, n=n, axis=axis)
@set_module('mxnet.numpy')
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : ndarray
If necessary, will be flattened before the differences are taken.
to_end : ndarray or scalar, optional
Number(s) to append at the end of the returned differences.
to_begin : ndarray or scalar, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1., 2., 3., -7.])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
rray([-99., 1., 2., 3., -7., 88., 99.])
The returned array is always 1D.
>>> y = np.array([[1, 2, 4], [1, 6, 24]])
>>> np.ediff1d(y)
array([ 1., 2., -3., 5., 18.])
>>> np.ediff1d(x, to_begin=y)
array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])
"""
return _mx_nd_np.ediff1d(ary, to_end=to_end, to_begin=to_begin)
@set_module('mxnet.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _mx_nd_np.resize(a, new_shape)
@set_module('mxnet.numpy')
def interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : ndarray
The x-coordinates of the interpolated values.
xp : 1-D array of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D array of floats
The y-coordinates of the data points, same length as `xp`.
left : optional float corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
return _mx_nd_np.interp(x, xp, fp, left=left, right=right, period=period)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.float64)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
return _mx_nd_np.full_like(a, fill_value=fill_value, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=0, dtype=dtype, order=order, ctx=ctx, out=ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=1, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def fill_diagonal(a, val, wrap=False):
"""
Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
locations with indices ``a[i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
The anti-diagonal can be filled by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.zeros((3, 3), int);
>>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
>>> a
array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
>>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
>>> a
array([[0, 0, 3],
[0, 2, 0],
[1, 0, 0]])
Note that the order in which the diagonal is filled varies depending
on the flip function.
"""
_mx_nd_np.fill_diagonal(a, val=val, wrap=wrap)
@set_module('mxnet.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : scalar
ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
Gluon does not support copy = False.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
return _mx_nd_np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
@set_module('mxnet.numpy')
def squeeze(x, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
return _mx_nd_np.squeeze(x, axis=axis)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _mx_nd_np.isnan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _mx_nd_np.isinf(x, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _mx_nd_np.isposinf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _mx_nd_np.isneginf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _mx_nd_np.isfinite(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def where(condition, x=None, y=None):
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
return _mx_nd_np.where(condition, x, y)
@set_module('mxnet.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
return _mx_nd_np.polyval(p, x)
@set_module('mxnet.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return _mx_nd_np.bincount(x, weights=weights, minlength=minlength)
@set_module('mxnet.numpy')
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.
See also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(np.array(1), np.array([3, 4]))
[array([1.]), array([3., 4.])]
"""
return _mx_nd_np.atleast_1d(*arys)
@set_module('mxnet.numpy')
def atleast_2d(*arys):
"""
Convert inputs to arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.
See also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))
[array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]
"""
return _mx_nd_np.atleast_2d(*arys)
@set_module('mxnet.numpy')
def atleast_3d(*arys):
"""
Convert inputs to arrays with at least three dimension.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 3.
For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),
and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).
See also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):
... print(arr, arr.shape)
...
[[[1.]
[2.]]] (1, 2, 1)
[[[1.]
[2.]]] (1, 2, 1)
[[[1. 2.]]] (1, 1, 2)
"""
return _mx_nd_np.atleast_3d(*arys)
@set_module('mxnet.numpy')
def pad(x, pad_width=None, mode="constant", **kwargs): # pylint: disable=too-many-arguments
# pylint: disable=too-many-return-statements
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> np.pad(a, (2, 2), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.pad(a, (2, 2), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.pad(a, ((2, 2), (2, 2)), pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
return _mx_nd_np.pad(x, pad_width=pad_width, mode=mode, **kwargs)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def prod(a, axis=None, dtype=None, out=None, keepdims=False, initial=None): # pylint: disable=too-many-arguments
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
where : not supported
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _mx_nd_np.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _mx_nd_np.cumsum(a, axis=axis, dtype=dtype, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : integer
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start: int, optional
The axis is rolled until it lies before this position.
The default, 0, results in a “complete” roll.
Returns
-------
res : ndarray
A view after applying rollaxis to `a` is returned.
-----
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
return _mx_nd_np.rollaxis(a, axis, start)
@set_module('mxnet.numpy')
def diag(v, k=0):
"""
Extracts a diagonal or constructs a diagonal array.
- 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.
- 2-D arrays: extracts the k-th Diagonal
Parameters
----------
array : ndarray
The array to apply diag method.
k : offset
extracts or constructs kth diagonal given input array
Returns
----------
out : ndarray
The extracted diagonal or constructed diagonal array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
return _mx_nd_np.diag(v, k=k)
@set_module('mxnet.numpy')
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _mx_nd_np.diagflat(v, k=k)
@set_module('mxnet.numpy')
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of
the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the
resulting array can be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
Parameters
----------
a : ndarray
Input data from which diagonal are taken.
offset: int, Optional
Offset of the diagonal from the main diagonal
axis1: int, Optional
Axis to be used as the first axis of the 2-D sub-arrays
axis2: int, Optional
Axis to be used as the second axis of the 2-D sub-arrays
Returns
-------
out : ndarray
Output result
Raises
-------
ValueError: If the dimension of a is less than 2.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> np.diagonal(a)
array([0, 3])
>>> np.diagonal(a, 1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>>a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.diagonal(a, 0, 0, 1)
array([[0, 6],
[1, 7]])
"""
return _mx_nd_np.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
# pylint: disable=redefined-outer-name, too-many-arguments
@set_module('mxnet.numpy')
def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
r"""
Sum of array elements over a given axis.
Parameters
----------
a : ndarray
Input data.
axis : None or int, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The default type is float32.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
initial: Currently only supports None as input, optional
Starting value for the sum.
Currently not implemented. Please use ``None`` as input or skip this argument.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
sum_along_axis : ndarray
An ndarray with the same shape as `a`, with the specified
axis removed. If an output array is specified, a reference to
`out` is returned.
Notes
-----
- Input type does not support Python native iterables.
- "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.
- "initial" param is not supported yet. Please use None as input.
- Arithmetic is modular when using integer types, and no error is raised on overflow.
- The sum of an empty array is the neutral element 0:
>>> a = np.empty(1)
>>> np.sum(a)
array(0.)
This function differs from the original `numpy.sum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.
- "initial" param is not supported yet. Please use ``None`` as input or skip it.
- The default type is float32.
Examples
--------
>>> a = np.array([0.5, 1.5])
>>> np.sum(a)
array(2.)
>>> a = np.array([0.5, 0.7, 0.2, 1.5])
>>> np.sum(a, dtype=np.int32)
array(2, dtype=int32)
>>> a = np.array([[0, 1], [0, 5]])
>>> np.sum(a)
array(6.)
>>> np.sum(a, axis=0)
array([0., 6.])
>>> np.sum(a, axis=1)
array([1., 5.])
With output ndarray:
>>> a = np.array([[0, 1], [0, 5]])
>>> b = np.ones((2,), dtype=np.float32)
>>> np.sum(a, axis = 0, out=b)
array([0., 6.])
>>> b
array([0., 6.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
array(-128, dtype=int8)
"""
return _mx_nd_np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where)
# pylint: enable=redefined-outer-name, too-many-arguments
| apache-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/io/parsers.py | 3 | 119039 | """
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import print_function
from collections import defaultdict
import re
import csv
import sys
import warnings
import datetime
from textwrap import fill
import numpy as np
from pandas import compat
from pandas.compat import (range, lrange, PY3, StringIO, lzip,
zip, string_types, map, u)
from pandas.core.dtypes.common import (
is_integer, _ensure_object,
is_list_like, is_integer_dtype,
is_float, is_dtype_equal,
is_object_dtype, is_string_dtype,
is_scalar, is_categorical_dtype)
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.index import Index, MultiIndex, RangeIndex
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.categorical import Categorical
from pandas.core import algorithms
from pandas.core.common import AbstractMethodError
from pandas.io.date_converters import generic_parser
from pandas.errors import ParserWarning, ParserError, EmptyDataError
from pandas.io.common import (get_filepath_or_buffer, is_file_like,
_validate_header_arg, _get_handle,
UnicodeReader, UTF8Recoder, _NA_VALUES,
BaseIterator, _infer_compression)
from pandas.core.tools import datetimes as tools
from pandas.util._decorators import Appender
import pandas._libs.lib as lib
import pandas._libs.parsers as parsers
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = u('\ufeff')
_parser_params = """Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, pathlib.Path, py._path.local.LocalPath or any \
object with a read() method (such as a file handle or StringIO)
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file could
be file ://localhost/path/to/table.csv
%s
delimiter : str, default ``None``
Alternative argument name for sep.
delim_whitespace : boolean, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
.. versionadded:: 0.18.1 support for the Python parser.
header : int or list of ints, default 'infer'
Row number(s) to use as the column names, and the start of the data.
Default behavior is as if set to 0 if no ``names`` passed, otherwise
``None``. Explicitly pass ``header=0`` to be able to replace existing
names. The header can be a list of integers that specify row locations for
a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not
specified will be skipped (e.g. 2 in this example is skipped). Note that
this parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so header=0 denotes the first line of data
rather than the first line of the file.
names : array-like, default None
List of column names to use. If file contains no header row, then you
should explicitly pass header=None. Duplicates in this list are not
allowed unless mangle_dupe_cols=True, which is the default.
index_col : int or sequence or False, default None
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
usecols : array-like or callable, default None
Return a subset of the columns. If array-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid array-like
`usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz'].
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
as_recarray : boolean, default False
DEPRECATED: this argument will be removed in a future version. Please call
`pd.read_csv(...).to_records()` instead.
Return a NumPy recarray instead of a DataFrame after parsing the data.
If set to True, this option takes precedence over the `squeeze` parameter.
In addition, as row indices are not available in such a format, the
`index_col` parameter will be ignored.
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
prefix : str, default None
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X.0'...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `str` or `object` to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
%s
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels
true_values : list, default None
Values to consider as True
false_values : list, default None
Values to consider as False
skipinitialspace : boolean, default False
Skip spaces after delimiter.
skiprows : list-like or integer or callable, default None
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c')
skip_footer : int, default 0
DEPRECATED: use the `skipfooter` parameter instead, as they are identical
nrows : int, default None
Number of rows of file to read. Useful for reading pieces of large files
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '""" + fill("', '".join(sorted(_NA_VALUES)),
70, subsequent_indent=" ") + """'`.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
na_filter : boolean, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
skip_blank_lines : boolean, default True
If True, skip over blank lines rather than interpreting as NaN values
parse_dates : boolean or list of ints or names or list of lists or dict, \
default False
* boolean. If True -> try parsing the index.
* list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result
'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : boolean, default False
If True and parse_dates is enabled, pandas will attempt to infer the format
of the datetime strings in the columns, and if it can be inferred, switch
to a faster method of parsing them. In some cases this can increase the
parsing speed by 5-10x.
keep_date_col : boolean, default False
If True and parse_dates specifies combining multiple columns then
keep the original columns.
date_parser : function, default None
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call date_parser in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by parse_dates) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by parse_dates into a single array
and pass that; and 3) call date_parser once for each row using one or more
strings (corresponding to the columns defined by parse_dates) as arguments.
dayfirst : boolean, default False
DD/MM format dates, international and European format
iterator : boolean, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, default None
Return TextFileReader object for iteration.
See the `IO Tools docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use gzip,
bz2, zip or xz if filepath_or_buffer is a string ending in '.gz', '.bz2',
'.zip', or 'xz', respectively, and no decompression otherwise. If using
'zip', the ZIP file must contain only one data file to be read in.
Set to None for no decompression.
.. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.
thousands : str, default None
Thousands separator
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
float_precision : string, default None
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
lineterminator : str (length 1), default None
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : boolean, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), default None
One-character string used to escape delimiter when quoting is QUOTE_NONE.
comment : str, default None
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if comment='#', parsing '#empty\\na,b,c\\n1,2,3'
with `header=0` will result in 'a,b,c' being
treated as the header.
encoding : str, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
dialect : str or csv.Dialect instance, default None
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
tupleize_cols : boolean, default False
Leave a list of tuples on columns as is (default is to convert to
a Multi Index on the columns)
error_bad_lines : boolean, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : boolean, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
low_memory : boolean, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser)
buffer_lines : int, default None
DEPRECATED: this argument will be removed in a future version because its
value is not respected by the parser
compact_ints : boolean, default False
DEPRECATED: this argument will be removed in a future version
If compact_ints is True, then for any column that is of integer dtype,
the parser will attempt to cast it as the smallest integer dtype possible,
either signed or unsigned depending on the specification from the
`use_unsigned` parameter.
use_unsigned : boolean, default False
DEPRECATED: this argument will be removed in a future version
If integer columns are being compacted (i.e. `compact_ints=True`), specify
whether the column should be compacted to the smallest signed or unsigned
integer dtype.
memory_map : boolean, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
Returns
-------
result : DataFrame or TextParser
"""
# engine is not used in read_fwf() so is factored out of the shared docstring
_engine_doc = """engine : {'c', 'python'}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete."""
_sep_doc = r"""sep : str, default {default}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used automatically. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``"""
_read_csv_doc = """
Read CSV (comma-separated) file into DataFrame
%s
""" % (_parser_params % (_sep_doc.format(default="','"), _engine_doc))
_read_table_doc = """
Read general delimited file into DataFrame
%s
""" % (_parser_params % (_sep_doc.format(default="\\t (tab-stop)"),
_engine_doc))
_fwf_widths = """\
colspecs : list of pairs (int, int) or 'infer'. optional
A list of pairs (tuples) giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of ints. optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
"""
_read_fwf_doc = """
Read a table of fixed-width formatted lines into DataFrame
%s
Also, 'delimiter' is used to specify the filler character of the
fields if it is not spaces (e.g., '~').
""" % (_parser_params % (_fwf_widths, ''))
def _validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name,
min_val=min_val)
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _read(filepath_or_buffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get('encoding', None)
if encoding is not None:
encoding = re.sub('_', '-', encoding).lower()
kwds['encoding'] = encoding
compression = kwds.get('compression')
compression = _infer_compression(filepath_or_buffer, compression)
filepath_or_buffer, _, compression = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression)
kwds['compression'] = compression
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
nrows = _validate_integer('nrows', kwds.get('nrows', None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
try:
data = parser.read(nrows)
finally:
parser.close()
return data
_parser_defaults = {
'delimiter': None,
'doublequote': True,
'escapechar': None,
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'skipinitialspace': False,
'lineterminator': None,
'header': 'infer',
'index_col': None,
'names': None,
'prefix': None,
'skiprows': None,
'na_values': None,
'true_values': None,
'false_values': None,
'converters': None,
'dtype': None,
'skipfooter': 0,
'keep_default_na': True,
'thousands': None,
'comment': None,
'decimal': b'.',
# 'engine': 'c',
'parse_dates': False,
'keep_date_col': False,
'dayfirst': False,
'date_parser': None,
'usecols': None,
'nrows': None,
# 'iterator': False,
'chunksize': None,
'verbose': False,
'encoding': None,
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
'tupleize_cols': False,
'infer_datetime_format': False,
'skip_blank_lines': True
}
_c_parser_defaults = {
'delim_whitespace': False,
'as_recarray': False,
'na_filter': True,
'compact_ints': False,
'use_unsigned': False,
'low_memory': True,
'memory_map': False,
'buffer_lines': None,
'error_bad_lines': True,
'warn_bad_lines': True,
'float_precision': None
}
_fwf_defaults = {
'colspecs': 'infer',
'widths': None,
}
_c_unsupported = set(['skipfooter'])
_python_unsupported = set([
'low_memory',
'buffer_lines',
'float_precision',
])
_deprecated_args = set([
'as_recarray',
'buffer_lines',
'compact_ints',
'use_unsigned',
])
def _make_parser_function(name, sep=','):
default_sep = sep
def parser_f(filepath_or_buffer,
sep=sep,
delimiter=None,
# Column and Index Locations and Names
header='infer',
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression='infer',
thousands=None,
decimal=b'.',
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
tupleize_cols=False,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
skip_footer=0, # deprecated
# Internal
doublequote=True,
delim_whitespace=False,
as_recarray=False,
compact_ints=False,
use_unsigned=False,
low_memory=_c_parser_defaults['low_memory'],
buffer_lines=None,
memory_map=False,
float_precision=None):
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
if delim_whitespace and delimiter is not default_sep:
raise ValueError("Specified a delimiter with both sep and"
" delim_whitespace=True; you can only"
" specify one.")
if engine is not None:
engine_specified = True
else:
engine = 'c'
engine_specified = False
if skip_footer != 0:
warnings.warn("The 'skip_footer' argument has "
"been deprecated and will be removed "
"in a future version. Please use the "
"'skipfooter' argument instead.",
FutureWarning, stacklevel=2)
kwds = dict(delimiter=delimiter,
engine=engine,
dialect=dialect,
compression=compression,
engine_specified=engine_specified,
doublequote=doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting,
skipinitialspace=skipinitialspace,
lineterminator=lineterminator,
header=header,
index_col=index_col,
names=names,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
thousands=thousands,
comment=comment,
decimal=decimal,
parse_dates=parse_dates,
keep_date_col=keep_date_col,
dayfirst=dayfirst,
date_parser=date_parser,
nrows=nrows,
iterator=iterator,
chunksize=chunksize,
skipfooter=skipfooter or skip_footer,
converters=converters,
dtype=dtype,
usecols=usecols,
verbose=verbose,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
float_precision=float_precision,
na_filter=na_filter,
compact_ints=compact_ints,
use_unsigned=use_unsigned,
delim_whitespace=delim_whitespace,
as_recarray=as_recarray,
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
buffer_lines=buffer_lines,
mangle_dupe_cols=mangle_dupe_cols,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
skip_blank_lines=skip_blank_lines)
return _read(filepath_or_buffer, kwds)
parser_f.__name__ = name
return parser_f
read_csv = _make_parser_function('read_csv', sep=',')
read_csv = Appender(_read_csv_doc)(read_csv)
read_table = _make_parser_function('read_table', sep='\t')
read_table = Appender(_read_table_doc)(read_table)
@Appender(_read_fwf_doc)
def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, 'infer') and widths is not None:
raise ValueError("You must specify only one of 'widths' and "
"'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds['colspecs'] = colspecs
kwds['engine'] = 'python-fwf'
return _read(filepath_or_buffer, kwds)
class TextFileReader(BaseIterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = 'python'
engine_specified = False
self._engine_specified = kwds.get('engine_specified', engine_specified)
if kwds.get('dialect') is not None:
dialect = kwds['dialect']
if dialect in csv.list_dialects():
dialect = csv.get_dialect(dialect)
# Any valid dialect should have these attributes.
# If any are missing, we will raise automatically.
for param in ('delimiter', 'doublequote', 'escapechar',
'skipinitialspace', 'quotechar', 'quoting'):
try:
dialect_val = getattr(dialect, param)
except AttributeError:
raise ValueError("Invalid dialect '{dialect}' provided"
.format(dialect=kwds['dialect']))
provided = kwds.get(param, _parser_defaults[param])
# Messages for conflicting values between the dialect instance
# and the actual parameters provided.
conflict_msgs = []
if dialect_val != provided:
conflict_msgs.append((
"Conflicting values for '{param}': '{val}' was "
"provided, but the dialect specifies '{diaval}'. "
"Using the dialect-specified value.".format(
param=param, val=provided, diaval=dialect_val)))
if conflict_msgs:
warnings.warn('\n\n'.join(conflict_msgs), ParserWarning,
stacklevel=2)
kwds[param] = dialect_val
if kwds.get('header', 'infer') == 'infer':
kwds['header'] = 0 if kwds.get('names') is None else None
self.orig_options = kwds
# miscellanea
self.engine = engine
self._engine = None
self._currow = 0
options = self._get_options_with_defaults(engine)
self.chunksize = options.pop('chunksize', None)
self.nrows = options.pop('nrows', None)
self.squeeze = options.pop('squeeze', False)
# might mutate self.engine
self.engine = self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if 'has_index_names' in kwds:
self.options['has_index_names'] = kwds['has_index_names']
self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in compat.iteritems(_parser_defaults):
value = kwds.get(argname, default)
# see gh-12935
if argname == 'mangle_dupe_cols' and not value:
raise ValueError('Setting mangle_dupe_cols=False is '
'not supported yet')
else:
options[argname] = value
for argname, default in compat.iteritems(_c_parser_defaults):
if argname in kwds:
value = kwds[argname]
if engine != 'c' and value != default:
if ('python' in engine and
argname not in _python_unsupported):
pass
else:
raise ValueError(
'The %r option is not supported with the'
' %r engine' % (argname, engine))
else:
value = default
options[argname] = value
if engine == 'python-fwf':
for argname, default in compat.iteritems(_fwf_defaults):
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f):
next_attr = "__next__" if PY3 else "next"
# The C engine doesn't need the file-like to have the "next" or
# "__next__" attribute. However, the Python engine explicitly calls
# "next(...)" when iterating through such an object, meaning it
# needs to have that attribute ("next" for Python 2.x, "__next__"
# for Python 3.x)
if engine != "c" and not hasattr(f, next_attr):
msg = ("The 'python' engine cannot iterate "
"through this file buffer.")
raise ValueError(msg)
return engine
def _clean_options(self, options, engine):
result = options.copy()
engine_specified = self._engine_specified
fallback_reason = None
sep = options['delimiter']
delim_whitespace = options['delim_whitespace']
# C engine not supported yet
if engine == 'c':
if options['skipfooter'] > 0:
fallback_reason = "the 'c' engine does not support"\
" skipfooter"
engine = 'python'
encoding = sys.getfilesystemencoding() or 'utf-8'
if sep is None and not delim_whitespace:
if engine == 'c':
fallback_reason = "the 'c' engine does not support"\
" sep=None with delim_whitespace=False"
engine = 'python'
elif sep is not None and len(sep) > 1:
if engine == 'c' and sep == '\s+':
result['delim_whitespace'] = True
del result['delimiter']
elif engine not in ('python', 'python-fwf'):
# wait until regex engine integrated
fallback_reason = "the 'c' engine does not support"\
" regex separators (separators > 1 char and"\
" different from '\s+' are"\
" interpreted as regex)"
engine = 'python'
elif delim_whitespace:
if 'python' in engine:
result['delimiter'] = '\s+'
elif sep is not None:
encodeable = True
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ('python', 'python-fwf'):
fallback_reason = "the separator encoded in {encoding}" \
" is > 1 char long, and the 'c' engine" \
" does not support such separators".format(
encoding=encoding)
engine = 'python'
quotechar = options['quotechar']
if (quotechar is not None and
isinstance(quotechar, (str, compat.text_type, bytes))):
if (len(quotechar) == 1 and ord(quotechar) > 127 and
engine not in ('python', 'python-fwf')):
fallback_reason = ("ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support "
"such quotechars")
engine = 'python'
if fallback_reason and engine_specified:
raise ValueError(fallback_reason)
if engine == 'c':
for arg in _c_unsupported:
del result[arg]
if 'python' in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
msg = ("Falling back to the 'python' engine because"
" {reason}, but this causes {option!r} to be"
" ignored as it is not supported by the 'python'"
" engine.").format(reason=fallback_reason,
option=arg)
raise ValueError(msg)
del result[arg]
if fallback_reason:
warnings.warn(("Falling back to the 'python' engine because"
" {0}; you can avoid this warning by specifying"
" engine='python'.").format(fallback_reason),
ParserWarning, stacklevel=5)
index_col = options['index_col']
names = options['names']
converters = options['converters']
na_values = options['na_values']
skiprows = options['skiprows']
# really delete this one
keep_default_na = result.pop('keep_default_na')
_validate_header_arg(options['header'])
depr_warning = ''
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
msg = ("The '{arg}' argument has been deprecated "
"and will be removed in a future version."
.format(arg=arg))
if arg == 'as_recarray':
msg += ' Please call pd.to_csv(...).to_records() instead.'
if result.get(arg, parser_default) != parser_default:
depr_warning += msg + '\n\n'
if depr_warning != '':
warnings.warn(depr_warning, FutureWarning, stacklevel=2)
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result['index_col'] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError('Type converters must be a dict or'
' subclass, input was '
'a {0!r}'.format(type(converters).__name__))
else:
converters = {}
# Converting values to NA
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != 'c':
if is_integer(skiprows):
skiprows = lrange(skiprows)
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result['names'] = names
result['converters'] = converters
result['na_values'] = na_values
result['na_fvalues'] = na_fvalues
result['skiprows'] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine='c'):
if engine == 'c':
self._engine = CParserWrapper(self.f, **self.options)
else:
if engine == 'python':
klass = PythonParser
elif engine == 'python-fwf':
klass = FixedWidthFieldParser
else:
raise ValueError('Unknown engine: {engine} (valid options are'
' "c", "python", or' ' "python-fwf")'.format(
engine=engine))
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
if nrows is not None:
if self.options.get('skipfooter'):
raise ValueError('skipfooter not supported for iteration')
ret = self._engine.read(nrows)
if self.options.get('as_recarray'):
return ret
# May alter columns / col_dict
index, columns, col_dict = self._create_index(ret)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(compat.next(compat.itervalues(col_dict)))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def _create_index(self, ret):
index, columns, col_dict = ret
return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def _is_index_col(col):
return col is not None and col is not False
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return set([i for i, name in enumerate(names)
if usecols(name)])
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : array-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
if usecols is not None:
if callable(usecols):
return usecols, None
usecols_dtype = lib.infer_dtype(usecols)
if usecols_dtype not in ('empty', 'integer',
'string', 'unicode'):
raise ValueError(msg)
return set(usecols), usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase(object):
def __init__(self, kwds):
self.names = kwds.get('names')
self.orig_names = None
self.prefix = kwds.pop('prefix', None)
self.index_col = kwds.get('index_col', None)
self.index_names = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(
kwds.pop('parse_dates', False))
self.date_parser = kwds.pop('date_parser', None)
self.dayfirst = kwds.pop('dayfirst', False)
self.keep_date_col = kwds.pop('keep_date_col', False)
self.na_values = kwds.get('na_values')
self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
self.as_recarray = kwds.get('as_recarray', False)
self.tupleize_cols = kwds.get('tupleize_cols', False)
self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)
self.infer_datetime_format = kwds.pop('infer_datetime_format', False)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format
)
# validate header options for mi
self.header = kwds.get('header')
if isinstance(self.header, (list, tuple, np.ndarray)):
if kwds.get('as_recarray'):
raise ValueError("cannot specify as_recarray when "
"specifying a multi-index header")
if kwds.get('usecols'):
raise ValueError("cannot specify usecols when "
"specifying a multi-index header")
if kwds.get('names'):
raise ValueError("cannot specify names when "
"specifying a multi-index header")
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple,
np.ndarray))
if not (is_sequence and
all(map(is_integer, self.index_col)) or
is_integer(self.index_col)):
raise ValueError("index_col must only contain row numbers "
"when specifying a multi-index header")
self._name_processed = False
self._first_chunk = True
# GH 13932
# keep references to file handles opened by the parser itself
self.handles = []
def close(self):
for f in self.handles:
f.close()
@property
def _has_complex_date_col(self):
return (isinstance(self.parse_dates, dict) or
(isinstance(self.parse_dates, list) and
len(self.parse_dates) > 0 and
isinstance(self.parse_dates[0], list)))
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return ((j == self.parse_dates) or
(name is not None and name == self.parse_dates))
else:
return ((j in self.parse_dates) or
(name is not None and name in self.parse_dates))
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
self.index_col)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple([r[i] for i in range(field_count) if i not in sic])
columns = lzip(*[extract(r) for r in header])
names = ic + columns
def tostr(x):
return str(x) if not isinstance(x, compat.string_types) else x
# if we find 'Unnamed' all of a single level, then our header was too
# long
for n in range(len(columns[0])):
if all(['Unnamed' in tostr(c[n]) for c in columns]):
raise ParserError(
"Passed header=[%s] are too many rows for this "
"multi_index of columns"
% ','.join([str(x) for x in self.header])
)
# clean the column names (if we have an index_col)
if len(ic):
col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None
for r in header]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
counts = {}
for i, col in enumerate(names):
cur_count = counts.get(col, 0)
if cur_count > 0:
names[i] = '%s.%d' % (col, cur_count)
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if (not self.tupleize_cols and len(columns) and
not isinstance(columns, MultiIndex) and
all([isinstance(c, tuple) for c in columns])):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _,
self.index_col) = _clean_index_names(list(columns),
self.index_col)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
index = index.set_names(indexnamerow[:coffset])
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, compat.string_types):
return col
raise ValueError('Index %s invalid' % col)
index = None
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in reversed(sorted(to_remove)):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, compat.string_types):
return icol
if col_names is None:
raise ValueError(('Must supply column order to use %s as '
'index') % str(icol))
for i, c in enumerate(col_names):
if i == icol:
return c
index = None
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in reversed(sorted(to_remove)):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True):
arrays = []
for i, arr in enumerate(index):
if (try_parse_dates and self._should_parse_dates(i)):
arr = self._date_conv(arr)
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_names)
return index
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None, dtypes=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(("Both a converter and dtype were specified "
"for column {0} - only the converter will "
"be used").format(c), ParserWarning,
stacklevel=7)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(
values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool=False)
else:
# skip inference if specified dtype is object
try_num_bool = not (cast_type and is_string_dtype(cast_type))
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool)
# type specificed in dtype param
if cast_type and not is_dtype_equal(cvals, cast_type):
cvals = self._cast_types(cvals, cast_type, c)
if issubclass(cvals.dtype.type, np.integer) and self.compact_ints:
cvals = lib.downcast_int64(
cvals, parsers.na_values,
self.use_unsigned)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns:
--------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
na_count = isnull(result).sum()
except Exception:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(result, na_values, False)
else:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = lib.maybe_convert_bool(values,
true_values=self.true_values,
false_values=self.false_values)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
# XXX this is for consistency with
# c-parser which parses all categories
# as strings
if not is_object_dtype(values):
values = astype_nansafe(values, str)
values = Categorical(values)
else:
try:
values = astype_nansafe(values, cast_type, copy=True)
except ValueError:
raise ValueError("Unable to convert column %s to "
"type %s" % (column, cast_type))
return values
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data, self._date_conv, self.parse_dates, self.index_col,
self.index_names, names, keep_date_col=self.keep_date_col)
return names, data
class CParserWrapper(ParserBase):
"""
"""
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
if 'utf-16' in (kwds.get('encoding') or ''):
if isinstance(src, compat.string_types):
src = open(src, 'rb')
self.handles.append(src)
src = UTF8Recoder(src, kwds['encoding'])
kwds['encoding'] = 'utf-8'
# #2442
kwds['allow_leading_cols'] = self.index_col is not False
self._reader = parsers.TextReader(src, **kwds)
# XXX
self.usecols, self.usecols_dtype = _validate_usecols_arg(
self._reader.usecols)
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = (
self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names,
passed_names
)
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = ['%s%d' % (self.prefix, i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
self.orig_names = self.names[:]
if self.usecols:
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
if (self.usecols_dtype == 'string' and
not set(usecols).issubset(self.orig_names)):
raise ValueError("Usecols do not match names.")
if len(self.names) > len(usecols):
self.names = [n for i, n in enumerate(self.names)
if (i in usecols or n in usecols)]
if len(self.names) < len(usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
_is_index_col(self.index_col)):
self._name_processed = True
(index_names, self.names,
self.index_col) = _clean_index_names(self.names,
self.index_col)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def close(self):
for f in self.handles:
f.close()
# close additional handles opened by C parser (for compression)
try:
self._reader.close()
except:
pass
def _set_noconvert_columns(self):
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
names = self.orig_names
if self.usecols_dtype == 'integer':
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = self.names[:]
else:
# Usecols is empty.
usecols = None
def _set(x):
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = names.index(x)
self._reader.set_noconvert(x)
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
def set_error_bad_lines(self, status):
self._reader.set_error_bad_lines(int(status))
def read(self, nrows=None):
try:
data = self._reader.read(nrows)
except StopIteration:
if self._first_chunk:
self._first_chunk = False
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names,
dtype=self.kwds.get('dtype'))
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
col_dict = dict(filter(lambda item: item[0] in columns,
col_dict.items()))
return index, columns, col_dict
else:
raise
# Done with first read, next time raise StopIteration
self._first_chunk = False
if self.as_recarray:
# what to do if there are leading columns?
return data
names = self.names
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError('file structure not yet supported')
# implicit index, no index names
arrays = []
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i,
try_parse_dates=True)
arrays.append(values)
index = MultiIndex.from_arrays(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
names = self._maybe_dedup_names(names)
# rename dict keys
data = sorted(data.items())
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
else:
# rename dict keys
data = sorted(data.items())
# ugh, mutation
names = list(self.orig_names)
names = self._maybe_dedup_names(names)
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data]
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
index, names = self._make_index(data, alldata, names)
# maybe create a mi on the columns
names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
def _filter_usecols(self, names):
# hackish
usecols = _evaluate_usecols(self.usecols, names)
if usecols is not None and len(names) != len(usecols):
names = [name for i, name in enumerate(names)
if i in usecols or name in usecols]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names,
self.index_col) = _clean_index_names(names, self.index_col)
return names, idx_names
def _maybe_parse_dates(self, values, index, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
def TextParser(*args, **kwds):
"""
Converts lists of lists/tuples into DataFrames with proper type inference
and optional (e.g. string to datetime) conversion. Also enables iterating
lazily over chunks of large files
Parameters
----------
data : file-like object or list
delimiter : separator character to use
dialect : str or csv.Dialect instance, default None
Ignored if delimiter is longer than 1 character
names : sequence, default
header : int, default 0
Row to use to parse column labels. Defaults to the first row. Prior
rows will be discarded
index_col : int or list, default None
Column or columns to use as the (possibly hierarchical) index
has_index_names: boolean, default False
True if the cols defined in index_col have an index name and are
not in the header
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN.
keep_default_na : bool, default True
thousands : str, default None
Thousands separator
comment : str, default None
Comment out remainder of line
parse_dates : boolean, default False
keep_date_col : boolean, default False
date_parser : function, default None
skiprows : list of integers
Row numbers to skip
skipfooter : int
Number of line at bottom of file to skip
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
encoding : string, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : boolean, default False
returns Series if only one column
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
float_precision : string, default None
Specifies which converter the C engine should use for floating-point
values. The options are None for the ordinary converter,
'high' for the high-precision converter, and 'round_trip' for the
round-trip converter.
"""
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
def count_empty_vals(vals):
return sum([1 for v in vals if v == '' or v is None])
class PythonParser(ParserBase):
def __init__(self, f, **kwds):
"""
Workhorse function for processing nested list into DataFrame
Should be replaced by np.genfromtxt eventually?
"""
ParserBase.__init__(self, kwds)
self.data = None
self.buf = []
self.pos = 0
self.line_pos = 0
self.encoding = kwds['encoding']
self.compression = kwds['compression']
self.memory_map = kwds['memory_map']
self.skiprows = kwds['skiprows']
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])
self.delimiter = kwds['delimiter']
self.quotechar = kwds['quotechar']
if isinstance(self.quotechar, compat.text_type):
self.quotechar = str(self.quotechar)
self.escapechar = kwds['escapechar']
self.doublequote = kwds['doublequote']
self.skipinitialspace = kwds['skipinitialspace']
self.lineterminator = kwds['lineterminator']
self.quoting = kwds['quoting']
self.usecols, _ = _validate_usecols_arg(kwds['usecols'])
self.skip_blank_lines = kwds['skip_blank_lines']
self.warn_bad_lines = kwds['warn_bad_lines']
self.error_bad_lines = kwds['error_bad_lines']
self.names_passed = kwds['names'] or None
self.na_filter = kwds['na_filter']
self.has_index_names = False
if 'has_index_names' in kwds:
self.has_index_names = kwds['has_index_names']
self.verbose = kwds['verbose']
self.converters = kwds['converters']
self.dtype = kwds['dtype']
self.compact_ints = kwds['compact_ints']
self.use_unsigned = kwds['use_unsigned']
self.thousands = kwds['thousands']
self.decimal = kwds['decimal']
self.comment = kwds['comment']
self._comment_lines = []
mode = 'r' if PY3 else 'rb'
f, handles = _get_handle(f, mode, encoding=self.encoding,
compression=self.compression,
memory_map=self.memory_map)
self.handles.extend(handles)
# Set self.data to something that can read lines.
if hasattr(f, 'readline'):
self._make_reader(f)
else:
self.data = f
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if is is specified.
self._col_indices = None
self.columns, self.num_original_columns = self._infer_columns()
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
self.columns, self.index_names, self.col_names, _ = (
self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names, self.orig_names, self.columns) = (
self._get_index_name(self.columns))
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
if len(self.decimal) != 1:
raise ValueError('Only length-1 decimal markers supported')
if self.thousands is None:
self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)
else:
self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,
self.decimal))
def _set_no_thousands_columns(self):
# Create a set of column ids that are not to be stripped of thousands
# operators.
noconvert_columns = set()
def _set(x):
if is_integer(x):
noconvert_columns.add(x)
else:
noconvert_columns.add(self.columns.index(x))
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
return noconvert_columns
def _make_reader(self, f):
sep = self.delimiter
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError('Custom line terminators not supported in '
'python parser (yet)')
class MyDialect(csv.Dialect):
delimiter = self.delimiter
quotechar = self.quotechar
escapechar = self.escapechar
doublequote = self.doublequote
skipinitialspace = self.skipinitialspace
quoting = self.quoting
lineterminator = '\n'
dia = MyDialect
sniff_sep = True
if sep is not None:
sniff_sep = False
dia.delimiter = sep
# attempt to sniff the delimiter
if sniff_sep:
line = f.readline()
while self.skipfunc(self.pos):
self.pos += 1
line = f.readline()
line = self._check_comments([line])[0]
self.pos += 1
self.line_pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
UnicodeReader(StringIO(line),
dialect=dia,
encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
reader = UnicodeReader(f, dialect=dia,
encoding=self.encoding,
strict=True)
else:
reader = csv.reader(f, dialect=dia,
strict=True)
else:
def _read():
line = f.readline()
if compat.PY2 and self.encoding:
line = line.decode(self.encoding)
pat = re.compile(sep)
yield pat.split(line.strip())
for line in f:
yield pat.split(line.strip())
reader = _read()
self.data = reader
def read(self, rows=None):
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
columns = list(self.orig_names)
if not len(content): # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names, self.dtype)
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
return index, columns, col_dict
# handle new style for names in index
count_empty_content_vals = count_empty_vals(content[0])
indexnamerow = None
if self.has_index_names and count_empty_content_vals == len(columns):
indexnamerow = content[0]
content = content[1:]
alldata = self._rows_to_cols(content)
data = self._exclude_implicit_index(alldata)
columns = self._maybe_dedup_names(self.columns)
columns, data = self._do_date_conversions(columns, data)
data = self._convert_data(data)
if self.as_recarray:
return self._to_recarray(data, columns)
index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
def _exclude_implicit_index(self, alldata):
names = self._maybe_dedup_names(self.orig_names)
if self._implicit_index:
excl_indices = self.index_col
data = {}
offset = 0
for i, col in enumerate(names):
while i + offset in excl_indices:
offset += 1
data[col] = alldata[i + offset]
else:
data = dict((k, v) for k, v in zip(names, alldata))
return data
# legacy
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
return self.read(nrows=size)
def _convert_data(self, data):
# apply converters
def _clean_mapping(mapping):
"converts col numbers to names"
clean = {}
for col, v in compat.iteritems(mapping):
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean[col] = v
return clean
clean_conv = _clean_mapping(self.converters)
if not isinstance(self.dtype, dict):
# handles single dtype applied to all columns
clean_dtypes = self.dtype
else:
clean_dtypes = _clean_mapping(self.dtype)
# Apply NA values.
clean_na_values = {}
clean_na_fvalues = {}
if isinstance(self.na_values, dict):
for col in self.na_values:
na_value = self.na_values[col]
na_fvalue = self.na_fvalues[col]
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean_na_values[col] = na_value
clean_na_fvalues[col] = na_fvalue
else:
clean_na_values = self.na_values
clean_na_fvalues = self.na_fvalues
return self._convert_to_ndarrays(data, clean_na_values,
clean_na_fvalues, self.verbose,
clean_conv, clean_dtypes)
def _to_recarray(self, data, columns):
dtypes = []
o = compat.OrderedDict()
# use the columns to "order" the keys
# in the unordered 'data' dictionary
for col in columns:
dtypes.append((str(col), data[col].dtype))
o[col] = data[col]
tuples = lzip(*o.values())
return np.array(tuples, dtypes)
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
if self.header is not None:
header = self.header
# we have a mi columns, so read an extra line
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = True
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration:
if self.line_pos < hr:
raise ValueError(
'Passed header=%s but only %d lines in file'
% (hr, self.line_pos + 1))
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self._clear_buffer()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns
if not self.names:
raise EmptyDataError(
"No columns to parse from file")
line = self.names[:]
unnamed_count = 0
this_columns = []
for i, c in enumerate(line):
if c == '':
if have_mi_columns:
this_columns.append('Unnamed: %d_level_%d'
% (i, level))
else:
this_columns.append('Unnamed: %d' % i)
unnamed_count += 1
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = {}
for i, col in enumerate(this_columns):
cur_count = counts.get(col, 0)
if cur_count > 0:
this_columns[i] = '%s.%d' % (col, cur_count)
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = (len(self.index_col)
if self.index_col is not None else 0)
if lc != unnamed_count and lc - ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if ((self.usecols is not None and
len(names) != len(self.usecols)) or
(self.usecols is None and
len(names) != len(columns[0]))):
raise ValueError('Number of passed names did not match '
'number of header fields in the file')
if len(columns) > 1:
raise TypeError('Cannot pass names with multi-index '
'columns')
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
try:
line = self._buffered_line()
except StopIteration:
if not names:
raise EmptyDataError(
"No columns to parse from file")
line = names[:]
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [['%s%d' % (self.prefix, i)
for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if (not callable(self.usecols) and
len(names) != len(self.usecols)):
raise ValueError(
'Number of passed names did not match number of '
'header fields in the file'
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any([isinstance(u, string_types) for u in self.usecols]):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for col in self.usecols:
if isinstance(col, string_types):
col_indices.append(usecols_key.index(col))
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns
def _buffered_line(self):
"""
Return a line from buffer, filling buffer if required.
"""
if len(self.buf) > 0:
return self.buf[0]
else:
return self._next_line()
def _check_for_bom(self, first_row):
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], compat.string_types):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
# This is to avoid warnings we get in Python 2.x if
# we find ourselves comparing with non-Unicode
if compat.PY2 and not isinstance(first_elt, unicode): # noqa
try:
first_elt = u(first_elt)
except UnicodeDecodeError:
return first_row
if first_elt != _BOM:
return first_row
first_row = first_row[0]
if len(first_row) > 1 and first_row[1] == self.quotechar:
start = 2
quote = first_row[1]
end = first_row[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row) > end + 1:
new_row += first_row[end + 1:]
return [new_row]
elif len(first_row) > 1:
return [first_row[1:]]
else:
# First row is just the BOM, so we
# return an empty string.
return [""]
def _is_line_empty(self, line):
"""
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
"""
return not line or all(not x for x in line)
def _next_line(self):
if isinstance(self.data, list):
while self.skipfunc(self.pos):
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if (not self.skip_blank_lines and
(self._is_line_empty(
self.data[self.pos - 1]) or line)):
break
elif self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
orig_line = self._next_iter_line(row_num=self.pos + 1)
self.pos += 1
if orig_line is not None:
line = self._check_comments([orig_line])[0]
if self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
elif self._is_line_empty(orig_line) or line:
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = 'Skipping line {row_num}: '.format(row_num=row_num)
sys.stderr.write(base + msg + '\n')
def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
if 'NULL byte' in msg:
msg = ('NULL byte detected. This byte '
'cannot be processed in Python\'s '
'native csv library at the moment, '
'so please pass in engine=\'c\' instead')
elif 'newline inside string' in msg:
msg = ('EOF inside string starting with '
'line ' + str(row_num))
if self.skipfooter > 0:
reason = ('Error could possibly be due to '
'parsing errors in the skipped footer rows '
'(the skipfooter keyword is only applied '
'after Python\'s csv library has parsed '
'all rows).')
msg += '. ' + reason
self._alert_malformed(msg, row_num)
return None
def _check_comments(self, lines):
if self.comment is None:
return lines
ret = []
for l in lines:
rl = []
for x in l:
if (not isinstance(x, compat.string_types) or
self.comment not in x):
rl.append(x)
else:
x = x[:x.find(self.comment)]
if len(x) > 0:
rl.append(x)
break
ret.append(rl)
return ret
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], compat.string_types) or
l[0].strip())):
ret.append(l)
return ret
def _check_thousands(self, lines):
if self.thousands is None:
return lines
return self._search_replace_num_columns(lines=lines,
search=self.thousands,
replace='')
def _search_replace_num_columns(self, lines, search, replace):
ret = []
for l in lines:
rl = []
for i, x in enumerate(l):
if (not isinstance(x, compat.string_types) or
search not in x or
(self._no_thousands_columns and
i in self._no_thousands_columns) or
self.nonnum.search(x.strip())):
rl.append(x)
else:
rl.append(x.replace(search, replace))
ret.append(rl)
return ret
def _check_decimal(self, lines):
if self.decimal == _parser_defaults['decimal']:
return lines
return self._search_replace_num_columns(lines=lines,
search=self.decimal,
replace='.')
def _clear_buffer(self):
self.buf = []
_implicit_index = False
def _get_index_name(self, columns):
"""
Try several cases to get lines:
0) There are headers on row 0 and row 1 and their
total summed lengths equals the length of the next line.
Treat row 0 as columns and row 1 as indices
1) Look for implicit index: there are more columns
on row 1 than row 0. If this is true, assume that row
1 lists index columns and row 0 lists normal columns.
2) Get index from the columns if it was listed.
"""
orig_names = list(columns)
columns = list(columns)
try:
line = self._next_line()
except StopIteration:
line = None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
# leave it 0, #2442
# Case 1
if self.index_col is not False:
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
if next_line is not None:
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = lrange(len(line))
self.buf = self.buf[1:]
for c in reversed(line):
columns.insert(0, c)
# Update list of original names to include all indices.
orig_names = list(columns)
self.num_original_columns = len(columns)
return line, orig_names, columns
if implicit_first_cols > 0:
# Case 1
self._implicit_index = True
if self.index_col is None:
self.index_col = lrange(implicit_first_cols)
index_name = None
else:
# Case 2
(index_name, columns_,
self.index_col) = _clean_index_names(columns, self.index_col)
return index_name, orig_names, columns
def _rows_to_cols(self, content):
col_len = self.num_original_columns
if self._implicit_index:
col_len += len(self.index_col)
max_len = max([len(row) for row in content])
# Check that there are no rows with too many
# elements in their row (rows with too few
# elements are padded with NaN).
if (max_len > col_len and
self.index_col is not False and
self.usecols is None):
footers = self.skipfooter if self.skipfooter else 0
bad_lines = []
iter_content = enumerate(content)
content_len = len(content)
content = []
for (i, l) in iter_content:
actual_len = len(l)
if actual_len > col_len:
if self.error_bad_lines or self.warn_bad_lines:
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, actual_len))
if self.error_bad_lines:
break
else:
content.append(l)
for row_num, actual_len in bad_lines:
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, actual_len))
if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
# see gh-13374
reason = ('Error could possibly be due to quotes being '
'ignored when a multi-char delimiter is used.')
msg += '. ' + reason
self._alert_malformed(msg, row_num + 1)
# see gh-13320
zipped_content = list(lib.to_object_array(
content, min_width=col_len).T)
if self.usecols:
if self._implicit_index:
zipped_content = [
a for i, a in enumerate(zipped_content)
if (i < len(self.index_col) or
i - len(self.index_col) in self._col_indices)]
else:
zipped_content = [a for i, a in enumerate(zipped_content)
if i in self._col_indices]
return zipped_content
def _get_lines(self, rows=None):
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(self.data, list):
if self.pos > len(self.data):
raise StopIteration
if rows is None:
new_rows = self.data[self.pos:]
new_pos = len(self.data)
else:
new_rows = self.data[self.pos:self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
while True:
new_row = self._next_iter_line(
row_num=self.pos + rows + 1)
rows += 1
if new_row is not None:
new_rows.append(new_row)
except StopIteration:
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[:-self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._remove_empty_lines(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
def _make_date_converter(date_parser=None, dayfirst=False,
infer_datetime_format=False):
def converter(*date_cols):
if date_parser is None:
strs = _concat_date_cols(date_cols)
try:
return tools.to_datetime(
_ensure_object(strs),
utc=None,
box=False,
dayfirst=dayfirst,
errors='ignore',
infer_datetime_format=infer_datetime_format
)
except:
return tools.to_datetime(
lib.try_parse_dates(strs, dayfirst=dayfirst))
else:
try:
result = tools.to_datetime(
date_parser(*date_cols), errors='ignore')
if isinstance(result, datetime.datetime):
raise Exception('scalar parser')
return result
except Exception:
try:
return tools.to_datetime(
lib.try_parse_dates(_concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst),
errors='ignore')
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(data_dict, converter, parse_spec,
index_col, index_names, columns,
keep_date_col=False):
def _isindex(colspec):
return ((isinstance(index_col, list) and
colspec in index_col) or
(isinstance(index_names, list) and
colspec in index_names))
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names)
if new_name in data_dict:
raise ValueError('New date column already in dict %s' %
new_name)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in compat.iteritems(parse_spec):
if new_name in data_dict:
raise ValueError('Date column %s already in dict' %
new_name)
_, col, old_names = _try_convert_dates(converter, colspec,
data_dict, orig_names)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(columns[c])
else:
colnames.append(c)
new_name = '_'.join([str(x) for x in colnames])
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = _NA_VALUES
else:
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
na_values = na_values.copy() # Prevent aliasing.
if keep_default_na:
for k, v in compat.iteritems(na_values):
if not is_list_like(v):
v = [v]
v = set(v) | _NA_VALUES
na_values[k] = v
na_fvalues = dict([
(k, _floatify_na_values(v)) for k, v in na_values.items() # noqa
])
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
cp_cols = list(columns)
index_names = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, compat.string_types):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# hack
if isinstance(index_names[0], compat.string_types)\
and 'Unnamed' in index_names[0]:
index_names[0] = None
return index_names, columns, index_col
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be np.object.
default_dtype = dtype or np.object
dtype = defaultdict(lambda: default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
for k, v in compat.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_col is None or index_col is False:
index = Index([])
else:
index = [Series([], dtype=dtype[index_name])
for index_name in index_names]
index = MultiIndex.from_arrays(index, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = dict((col_name,
Series([], dtype=dtype[col_name]))
for col_name in columns)
return index, columns, col_dict
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except:
pass
return result
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append("%s.0" % v)
result.append(str(v))
result.append(v)
except:
pass
try:
result.append(int(x))
except:
pass
return set(result)
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
return _NA_VALUES, set()
else:
return na_values, na_fvalues
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int):
colnames.append(columns[c])
return colnames
def _concat_date_cols(date_cols):
if len(date_cols) == 1:
if compat.PY3:
return np.array([compat.text_type(x) for x in date_cols[0]],
dtype=object)
else:
return np.array([
str(x) if not isinstance(x, compat.string_types) else x
for x in date_cols[0]
], dtype=object)
rs = np.array([' '.join([compat.text_type(y) for y in x])
for x in zip(*date_cols)], dtype=object)
return rs
class FixedWidthReader(BaseIterator):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, delimiter, comment, skiprows=None):
self.f = f
self.buffer = None
self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t '
self.comment = comment
if colspecs == 'infer':
self.colspecs = self.detect_colspecs(skiprows=skiprows)
else:
self.colspecs = colspecs
if not isinstance(self.colspecs, (tuple, list)):
raise TypeError("column specifications must be a list or tuple, "
"input was a %r" % type(colspecs).__name__)
for colspec in self.colspecs:
if not (isinstance(colspec, (tuple, list)) and
len(colspec) == 2 and
isinstance(colspec[0], (int, np.integer, type(None))) and
isinstance(colspec[1], (int, np.integer, type(None)))):
raise TypeError('Each column specification must be '
'2 element tuple or list of integers')
def get_rows(self, n, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= n lines)
from the rows returned to detect_colspecs because
it's simpler to leave the other locations with
skiprows logic alone than to modify them to deal
with the fact we skipped some rows here as well.
Parameters
----------
n : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= n:
break
self.buffer = iter(buffer_rows)
return detect_rows
def detect_colspecs(self, n=100, skiprows=None):
# Regex escape the delimiters
delimiters = ''.join([r'\%s' % x for x in self.delimiter])
pattern = re.compile('([^%s]+)' % delimiters)
rows = self.get_rows(n, skiprows)
if not rows:
raise EmptyDataError("No rows from which to infer column width")
max_len = max(map(len, rows))
mask = np.zeros(max_len + 1, dtype=int)
if self.comment is not None:
rows = [row.partition(self.comment)[0] for row in rows]
for row in rows:
for m in pattern.finditer(row):
mask[m.start():m.end()] = 1
shifted = np.roll(mask, 1)
shifted[0] = 0
edges = np.where((mask ^ shifted) == 1)[0]
edge_pairs = list(zip(edges[::2], edges[1::2]))
return edge_pairs
def __next__(self):
if self.buffer is not None:
try:
line = next(self.buffer)
except StopIteration:
self.buffer = None
line = next(self.f)
else:
line = next(self.f)
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.delimiter)
for (fromm, to) in self.colspecs]
class FixedWidthFieldParser(PythonParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See PythonParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = kwds.pop('colspecs')
PythonParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter,
self.comment, self.skiprows)
| mit |
crichardson17/starburst_atlas | SFH_comparison/SFH_plotter_UV.py | 1 | 11651 | ############################################################
########## Plotting File for SFH comparison Plots ##########
################## Data read from Cloudy ###################
################ Helen Meskhidze, Fall 2015 ################
#################### Elon University #######################
#------------------------------------------------------------------------------------------------------
'''
The inputs this code takes are the peaks files exported by my peaksreader.py
This code outputs UV SFH comparison plots, saved to the working directory
'''
#------------------------------------------------------------------------------------------------------
#Packages importing
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#red for Padova; black for Geneva
color1 = "#e50000" #red
color2 = "#000000" #black
plt.subplots_adjust(wspace=0, hspace=1) #remove space between plots
#subplot routine
def add_sub_plot(sub_num, desiredline):
plt.subplot(4,4,sub_num)
plt.scatter(xvals, peakspadcont[desiredline], c =color1, s = 8)
plt.plot(xvals, peakspadcont[desiredline], c =color1, label="Padova Continuous")
plt.scatter(xvals, peakspadinst[desiredline], c =color1, s = 8)
plt.plot(xvals, peakspadinst[desiredline], c =color1, linestyle='dotted', label="Padova Instantaneous")
plt.scatter(xvals, peaksgencont[desiredline], c =color2, s = 8)
plt.plot(xvals, peaksgencont[desiredline], c =color2, label="Geneva Continuous")
plt.scatter(xvals, peaksgeninst[desiredline], c =color2, s = 8)
plt.plot(xvals, peaksgeninst[desiredline], c =color2, linestyle='dotted', label = "Geneva Instantaneous")
#to label plot
if sub_num == 2:
figtext(.5,.95,'UV Emission Lines', fontsize=8, ha='center')
#set axis limits
plt.xlim(min(xvals),max(xvals))
plt.ylim(0,4.5)
plt.xticks(arange(0,8,1),fontsize=6)
plt.yticks(arange(0,4.5,.5),fontsize=6)
#some labels
if sub_num in [1,2,3,4]:
plt.tick_params(labelleft = 'off')
plt.tick_params(labelbottom = 'on')
plt.xlabel('Age (Myr)', fontsize=6)
plt.annotate(headers[desiredline], xy=(0.1,0.05), xytext=(0.1,0.05), fontsize = 6)
if sub_num in [5,6,7,8]:
plt.tick_params(labelleft = 'off')
plt.xlabel('Age (Myr)', fontsize=6)
plt.annotate(headers[desiredline], xy=(0.1,0.05), xytext=(0.1,0.05), fontsize = 6)
if sub_num == 1:
plt.ylabel('log($W _{\lambda}$)', fontsize=6)
plt.tick_params(labelleft = 'on')
if sub_num == 5:
plt.xlabel('Age (Myr)', fontsize=6)
plt.ylabel('log($W _{\lambda}$)', fontsize=6)
plt.tick_params(labelleft = 'on')
if sub_num in [4,8]:
plt.xticks(arange(0,9,1),fontsize=6)
if sub_num == 1:
plt.legend(bbox_to_anchor=(0., 1.2, 4., 0), loc=1, ncol=4, mode="expand", prop={'size':6}, borderaxespad=0.)
if sub_num == 5:
plt.legend(bbox_to_anchor=(0., 1.2, 4., 0), loc=1, ncol=4, mode="expand", prop={'size':6}, borderaxespad=0.)
# ---------------------------------------------------
numFiles = 5
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
os.chdir("./data")
#input files
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_cont_0"):
inputfile0 = file
print file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_cont_2"):
inputfile1 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_cont_4"):
inputfile2 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_cont_5"):
inputfile3 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_cont_6"):
inputfile4 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_cont_8"):
inputfile20 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_inst_0"):
inputfile5 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_inst_2"):
inputfile6 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_inst_4"):
inputfile7 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_inst_5"):
inputfile8 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_inst_6"):
inputfile9 = file
for file in os.listdir('./'):
if file.endswith("peaks_Geneva_inst_8"):
inputfile21 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_inst_0"):
inputfile10 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_inst_2"):
inputfile11 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_inst_4"):
inputfile12 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_inst_5"):
inputfile13 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_inst_6"):
inputfile14 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_inst_8"):
inputfile22 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_cont_0"):
inputfile15 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_cont_2"):
inputfile16 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_cont_4"):
inputfile17 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_cont_5"):
inputfile18 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_cont_6"):
inputfile19 = file
for file in os.listdir('./'):
if file.endswith("peaks_Padova_cont_8"):
inputfile23 = file
# importing headers file
for file in os.listdir('../'):
if file.endswith(".txt"):
headers = file
# ---------------------------------------------------
lines0 = [];
with open(inputfile0, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines0.append(row);
lines0 = asarray(lines0)
lines1 = [];
with open(inputfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines1.append(row);
lines1 = asarray(lines1)
lines2 = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines2.append(row);
lines2 = asarray(lines2)
lines3 = [];
with open(inputfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines3.append(row);
lines3 = asarray(lines3)
lines4 = [];
with open(inputfile4, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines4.append(row);
lines4 = asarray(lines4)
lines5 = [];
with open(inputfile5, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines5.append(row);
lines5 = asarray(lines5)
lines6 = [];
with open(inputfile6, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines6.append(row);
lines6 = asarray(lines6)
lines7 = [];
with open(inputfile7, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines7.append(row);
lines7 = asarray(lines7)
lines8 = [];
with open(inputfile8, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines8.append(row);
lines8 = asarray(lines8)
lines9 = [];
with open(inputfile9, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines9.append(row);
lines9 = asarray(lines9)
lines10 = [];
with open(inputfile10, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines10.append(row);
lines10 = asarray(lines10)
lines11 = [];
with open(inputfile11, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines11.append(row);
lines11 = asarray(lines11)
lines12 = [];
with open(inputfile12, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines12.append(row);
lines12 = asarray(lines12)
lines13 = [];
with open(inputfile13, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines13.append(row);
lines13 = asarray(lines13)
lines14 = [];
with open(inputfile14, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines14.append(row);
lines14 = asarray(lines14)
lines15 = [];
with open(inputfile15, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines15.append(row);
lines15 = asarray(lines15)
lines16 = [];
with open(inputfile16, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines16.append(row);
lines16 = asarray(lines16)
lines17 = [];
with open(inputfile17, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines17.append(row);
lines17 = asarray(lines17)
lines18 = [];
with open(inputfile18, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines18.append(row);
lines18 = asarray(lines18)
lines19 = [];
with open(inputfile19, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines19.append(row);
lines19 = asarray(lines19)
lines20 = [];
with open(inputfile20, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines20.append(row);
lines20 = asarray(lines20)
lines21 = [];
with open(inputfile21, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines21.append(row);
lines21 = asarray(lines21)
lines22 = [];
with open(inputfile22, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines22.append(row);
lines22 = asarray(lines22)
lines23 = [];
with open(inputfile23, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
lines23.append(row);
lines23 = asarray(lines23)
dataEmissionlines = [];
os.chdir("../")
with open(headers, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#create an array full of the peak values. the columns represent the times (0,2,4,5,6)
peakspadcont = zeros((len(lines6),6))
peakspadinst = zeros((len(lines6),6))
peaksgencont = zeros((len(lines6),6))
peaksgeninst = zeros((len(lines6),6))
peaksgencont[:,0] = lines0[:,0]
peaksgencont[:,1] = lines1[:,0]
peaksgencont[:,2] = lines2[:,0]
peaksgencont[:,3] = lines3[:,0]
peaksgencont[:,4] = lines4[:,0]
peaksgencont[:,5] = lines20[:,0]
peaksgeninst[:,0] = lines5[:,0]
peaksgeninst[:,1] = lines6[:,0]
peaksgeninst[:,2] = lines7[:,0]
peaksgeninst[:,3] = lines8[:,0]
peaksgeninst[:,4] = lines9[:,0]
peaksgeninst[:,5] = lines21[:,0]
peakspadinst[:,0] = lines10[:,0]
peakspadinst[:,1] = lines11[:,0]
peakspadinst[:,2] = lines12[:,0]
peakspadinst[:,3] = lines13[:,0]
peakspadinst[:,4] = lines14[:,0]
peakspadinst[:,5] = lines22[:,0]
peakspadcont[:,0] = lines15[:,0]
peakspadcont[:,1] = lines16[:,0]
peakspadcont[:,2] = lines17[:,0]
peakspadcont[:,3] = lines18[:,0]
peakspadcont[:,4] = lines19[:,0]
peakspadcont[:,5] = lines23[:,0]
headers = headers[1:] #the first is #linelist so let's make sure this will work
# ---------------------------------------------------
#xvals for age
xvals = [0,2,4,5,6,8]
print "data arraged"
#below is where you should specify which lines you'd like to plot
desired = [0,2,19,28,5,18,27,34]
# ---------------------------------------------------
plt.clf()
for i in range(8):
add_sub_plot(i+1,desired[i])
plt.savefig('SFH_Comp_UV.pdf')
print "plot saved and complete"
| gpl-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_lib.py | 1 | 14027 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas.compat import long, u, PY2
def _assert_same_values_and_dtype(res, exp):
tm.assert_equal(res.dtype, exp.dtype)
tm.assert_almost_equal(res, exp)
class TestMisc(tm.TestCase):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
self.assertTrue(lib.max_len_string_array(arr), 3)
# unicode
arr = a.astype('U').astype(object)
self.assertTrue(lib.max_len_string_array(arr), 3)
# bytes for python3
arr = a.astype('S').astype(object)
self.assertTrue(lib.max_len_string_array(arr), 3)
# raises
tm.assertRaises(TypeError,
lambda: lib.max_len_string_array(arr.astype('U')))
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2],
[2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
with self.assertRaises(IndexError):
target[indices]
with self.assertRaises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
with self.assertRaises(IndexError):
target[indices]
with self.assertRaises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar(pd.Series([1])))
self.assertFalse(lib.isscalar(pd.DataFrame()))
self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
self.assertFalse(lib.isscalar(pd.Panel()))
self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
self.assertFalse(lib.isscalar(pd.Index([])))
self.assertFalse(lib.isscalar(pd.Index([1])))
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
_assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
_assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
_assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
_assert_same_values_and_dtype(result, expected)
_assert_same_values_and_dtype(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
_assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
_assert_same_values_and_dtype(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
_assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, True, False], dtype=bool)
_assert_same_values_and_dtype(result, expected)
arr = np.array([True, False, None, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, np.nan, False], dtype=object)
_assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_decimals(self):
from decimal import Decimal
arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
_assert_same_values_and_dtype(result, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
njwilson23/scipy | scipy/stats/_stats_mstats_common.py | 1 | 7903 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a regression line
This computes a least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimate
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| bsd-3-clause |
jseabold/scipy | scipy/interpolate/tests/test_rbf.py | 41 | 4367 | #!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/datasets/__init__.py | 74 | 3616 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| mit |
newville/scikit-image | skimage/viewer/plugins/overlayplugin.py | 40 | 3615 | from warnings import warn
from ...util.dtype import dtype_range
from .base import Plugin
from ..utils import ClearColormap, update_axes_image
import six
from ..._shared.version_requirements import is_installed
__all__ = ['OverlayPlugin']
class OverlayPlugin(Plugin):
"""Plugin for ImageViewer that displays an overlay on top of main image.
The base Plugin class displays the filtered image directly on the viewer.
OverlayPlugin will instead overlay an image with a transparent colormap.
See base Plugin class for additional details.
Attributes
----------
overlay : array
Overlay displayed on top of image. This overlay defaults to a color map
with alpha values varying linearly from 0 to 1.
color : int
Color of overlay.
"""
colors = {'red': (1, 0, 0),
'yellow': (1, 1, 0),
'green': (0, 1, 0),
'cyan': (0, 1, 1)}
def __init__(self, **kwargs):
if not is_installed('matplotlib', '>=1.2'):
msg = "Matplotlib >= 1.2 required for OverlayPlugin."
warn(RuntimeWarning(msg))
super(OverlayPlugin, self).__init__(**kwargs)
self._overlay_plot = None
self._overlay = None
self.cmap = None
self.color_names = sorted(list(self.colors.keys()))
def attach(self, image_viewer):
super(OverlayPlugin, self).attach(image_viewer)
#TODO: `color` doesn't update GUI widget when set manually.
self.color = 0
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
ax = self.image_viewer.ax
if image is None:
ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
vmin, vmax = dtype_range[image.dtype.type]
self._overlay_plot = ax.imshow(image, cmap=self.cmap,
vmin=vmin, vmax=vmax)
else:
update_axes_image(self._overlay_plot, image)
if self.image_viewer.useblit:
self.image_viewer._blit_manager.background = None
self.image_viewer.redraw()
@property
def color(self):
return self._color
@color.setter
def color(self, index):
# Update colormap whenever color is changed.
if isinstance(index, six.string_types) and \
index not in self.color_names:
raise ValueError("%s not defined in OverlayPlugin.colors" % index)
else:
name = self.color_names[index]
self._color = name
rgb = self.colors[name]
self.cmap = ClearColormap(rgb)
if self._overlay_plot is not None:
self._overlay_plot.set_cmap(self.cmap)
self.image_viewer.redraw()
@property
def filtered_image(self):
"""Return filtered image.
This "filtered image" is used when saving from the plugin.
"""
return self.overlay
def display_filtered_image(self, image):
"""Display filtered image as an overlay on top of image in viewer."""
self.overlay = image
def closeEvent(self, event):
# clear overlay from ImageViewer on close
self.overlay = None
super(OverlayPlugin, self).closeEvent(event)
def output(self):
"""Return the overlaid image.
Returns
-------
overlay : array, same shape as image
The overlay currently displayed.
data : None
"""
return (self.overlay, None)
| bsd-3-clause |
amagoon/Neural-Network-Tools | Backpropagator.py | 1 | 15483 | # Copyright (c) 2015 Ephraim Rothschild
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'Ephraim Rothschild'
import Generic_OVA
import random
import numpy as np
from sklearn.svm import NuSVC
# Class used for storing the data to input for the neural network
class Input:
def __init__(self, x, y=None):
# x is the input vector to be sent to the Neural Network. y is the label for that input vector.
self.x = np.array(x)
self.y = y
# The class that uses backpropagation to predict a result based on an input vector
class BP:
def __init__(self, k, d, maxitter=10, bias=None, v=None, W=None):
# Parameters:
# k: the number of hidden units to use for the Neural Network
# d: The dimensionality of the input vectors to be used
# v: (Optional) The v vector weight. Only necessary if you are loading a saved set of weights.
# W: (Optional) The W weight matrix. Only necessary if you are loading a saved set of weights.
# maxitter: The number of times the train() method will loop when training the Neural Network.
# (100-1000 is recommended)
self.maxitter = maxitter
self.d = d
self.k = k
self.bias = bias
# Initializes weights with random values between -1 and 1
if W is None:
self.W = (np.random.rand(k, d)*2 - 1)/10
else:
self.W = W
if v is None:
self.v = (np.random.rand(k)*2 - 1)/10
else:
self.v = v
# Method for training the Neural Network
def train(self, inputs, nu=0.01):
# Parameters:
# inputs: An array of Input objects containing input vectors along with their corresponding labels.
# nu: The error rate. This will be multiplied by the gradient of the error function when subtracted from
# the weights. Value should be a very small number between 0 and 1 (ex: 0.01 or 0.001)
for _ in range(0, self.maxitter):
random.shuffle(inputs)
# Loops through each of the inputs
for input in inputs:
# Append Bias if one is specified
if self.bias: wb = np.append(input.x, self.bias)
else: wb = input.x
# Normalize input vectors for training
x = wb/np.linalg.norm(wb)
# a is equal to the weight matrix W multiplied by the input vector x
a = self.W.dot(x)
# h is equal to the vector containing hyperbolic tangent of each value in a
h = np.tanh(a)
# Find the error rate
y_hat = np.tanh(np.dot(self.v, np.array(h)))
error = input.y - y_hat
# Update v with the error*h proportional to nu
self.v = self.v+nu*error*np.array(h)
for i in range(0, self.k):
# Update the weight vectors by subtracting the gradient of the error function
self.W[i] = self.W[i] + nu*((error*self.v[i])*(1 - (np.tanh(a[i])**2)))*x
# Method for predicting a label given an Input containing an input vector
def predict(self, input):
# Parameters:
# input: An Input object containing an input vector to be used for predicting a label.
if isinstance(input, Input):
# Append a bias onto the input if one exists for the Neural Network
if self.bias:
wb = np.append(input.x, self.bias)
else:
wb = input.x
# Normalize the input vector
x = wb/np.linalg.norm(wb)
h = np.tanh(self.W.dot(x))
else:
# Append a bias onto the input if one exists for the Neural Network
if self.bias:
wb = np.append(input, self.bias)
else:
wb = input
# Normalize the input vector
x = wb/np.linalg.norm(wb)
h = np.tanh(self.W.dot(x))
return np.tanh(np.dot(np.array(h), self.v))
# Class that uses Scikit-Learn's implementation of SVM to predict labels
class svm():
def __init__(self):
# self.clf = SVC(kernel='rbf')
self.clf = NuSVC()
def train(self, inputs):
# Parameters:
# inputs: An array of Input objects containing input vectors along with their corresponding labels.
# Creates lists to use for fitting model
X = []
Y = []
for data in inputs:
X.append((data.x/np.linalg.norm(data.x)))
Y.append(data.y)
# Fit model
self.clf.fit(X, Y)
def predict(self, input):
# Parameters:
# input: An Input object containing an input vector to be used for predicting a label.
x = input.x/np.linalg.norm(input.x)
if isinstance(input, Input):
return self.clf.predict(x)
else:
x = input/np.linalg.norm(input)
return self.clf.predict(x)
# Classifier that uses a 'One vs All' classification method.
# Can store any kind of predictor as long as that predictor has a predict(x) method
class OVAClassifier:
def __init__(self, k, d, maxitter, nu=0.001):
# Parameters:
# k: the number of hidden units to use for the Neural Networks
# d: The dimensionality of the input vectors to be used
# maxitter: The number of times the train() method will loop when training the Neural Network.
# (100-1000 is recommended)
self.k = k
self.d = d
self.maxitter = maxitter
self.classes = Generic_OVA.OVA()
self.nu = nu
def add_class_from_inputs(self, inputs, label_to_classify):
# Parameters:
# inputs: an array of Input objects each containing an input vector and label
# label_to_classify: The label representing the "true" value for the given inputs. (ex: if you are
# trying to classify what a car looks like for the given input vectors, the value of this parameter
# should be something like "car".
backprop_class = BP(self.k, self.d, self.maxitter)
backprop_class.train(inputs, self.nu)
self.classes.add_predictor(backprop_class, label_to_classify)
def add_class_from_predictor(self, predictor, label_to_classify):
# Parameters:
# predictor: The BP object to be used for training and prediction
# label_to_classify: The label representing the "true" value for the given inputs. (ex: if you are
# trying to classify what a car looks like for the given input vectors, the value of this parameter
# should be something like "car".
self.classes.add_predictor(predictor, label_to_classify)
def add_svm_class_from_inputs(self, inputs, label_to_classify):
# Parameters:
# inputs: an array of Input objects each containing an input vector and label
# label_to_classify: The label representing the "true" value for the given inputs. (ex: if you are
# trying to classify what a car looks like for the given input vectors, the value of this parameter
# should be something like "car".
support_vector = svm()
support_vector.train(inputs)
self.classes.add_predictor(inputs, label_to_classify)
def predict(self, input):
# Parameters:
# input: This can either be an Input object containing an input vector of size k, or just a numpy array
# of size k to be used as the input vector.
#
# Returns:
# The predicted label given the input vector.
if isinstance(input, Input):
return self.classes.predict(input.x)
else:
return self.classes.predict(input)
def get_ova_result(self, x):
# Parameters:
# x: the input that you would like to predict on
#
# Returns:
# An array representing the probabilities of the input vector being labeled for each of the labels.
# This is sorted in using the default sorting method on the labels as keys
prob = self.classes.getProbabilities(x)
return [value for (key, value) in sorted(prob.items())]
class MultiLayerClassifier:
def __init__(self, k, d, maxitter, nu=0.01, layers=1):
print("Number of layers: ", layers)
# Parameters:
# k: the number of hidden units to use for the Neural Networks
# d: The dimensionality of the input vectors to be used
# maxitter: The number of times the train() method will loop when training the Neural Network.
# (100-1000 is recommended)
# nu: The error rate. This will be multiplied by the gradient of the error function when subtracted from
# the weights. Value should be a very small number between 0 and 1 (ex: 0.01 or 0.001)
# layers: The number of layer you want the neural network to use. The more layers, the more accurate the
# neural network will be, but the slower it will be to train. Since each layer is using a 2-layer
# backpropagation algorithm as its classifier, the number of true layers is actually the number of
# layers passed in for this parameter multiplied by 2. The default is 2, which is 4 true layers.
self.k = k
self.d = d
self.maxitter = maxitter
self.layers = layers
self.inputs = {}
self.nu = nu
self.classifier = OVAClassifier(k, d, maxitter)
def retrain(self):
print("Retraining layer", self.layers)
# Reset class's classifier
self.classifier = OVAClassifier(self.k, self.d, self.maxitter, self.nu)
# Create sub-layer's classifier using recursion
if self.layers > 0:
# Sets sub-layer to have the same k value, but d is equal to the number of classes, and the number of
# layers is one less then the current layer's value.
self.nextLayer = MultiLayerClassifier(self.k, len(self.inputs), self.maxitter, layers=self.layers-1)
else:
self.nextLayer = None
if self.nextLayer:
# Reset the inputs of the next layer
self.nextLayer.inputs = {}
# go through each of the inputs' keys
for key in self.inputs.keys():
# Create an array for positive inputs
positive_inputs_for_class = []
# Create an array for negative inputs
negative_inputs_for_class = []
# Create array that will hold both types of inputs
inputs_for_class = []
# Go through each of the various labels for our input vectors
for other_key in self.inputs.keys():
# Find out if this label is the same is the outer loop above
if other_key == key:
# If it is, go through each of the input vectors with that label, and
# append it to positive_inputs_for_class.
for inner_value in self.inputs[other_key]:
positive_inputs_for_class.append(Input(inner_value, 1))
else:
# If it isn't, go through each of the input vectors with that label, and
# append it to negative_inputs_for_class.
for inner_value in self.inputs[other_key]:
negative_inputs_for_class.append(Input(inner_value, -1))
# Shuffle our arrays containing the positive and negative input vectors
random.shuffle(positive_inputs_for_class)
random.shuffle(negative_inputs_for_class)
# From 0 to the length of the smallest array between positive_inputs_for_class,
# and negative_inputs_for_class
for i in range(0, min(len(positive_inputs_for_class), len(negative_inputs_for_class))):
# Append one positive and one negative input vector to inputs_for_class
inputs_for_class.append(positive_inputs_for_class[i])
inputs_for_class.append(negative_inputs_for_class[i])
# Train a neural net using the input vectors we just collected, to find the label from the outer loop above.
print("Adding class " + str(key) + " to layer", self.layers)
self.classifier.add_class_from_inputs(inputs_for_class, key)
# If there is a next layer:
if self.nextLayer:
# Go through of our input vectors
for key, value in self.inputs.items():
for inner_value in value:
# Add the ova-result from predicting the given input vector - as an input vector itself to the
# next layer, with the label given by the above input vector's correct label.
self.nextLayer.add_input_for_class(np.array(self.classifier.get_ova_result(inner_value)), key)
# Recurse
self.nextLayer.retrain()
def add_input_for_class(self, x, label):
# Parameters:
# x: The input vector to be added as training data
# label: The label that you want to give to your training data
if not (label in self.inputs):
self.inputs[label] = []
if isinstance(x, Input):
self.inputs[label].append(x.x)
else:
self.inputs[label].append(x)
def add_inputs_for_class(self, inputs, label):
for input in inputs:
self.add_input_for_class(input, label)
def train(self, inputs):
# Parameters:
# inputs: An array of Input objects containing input vectors along with their corresponding labels.
for input in inputs:
self.add_input_for_class(input.x, input.y)
self.retrain()
def predict(self, input):
# Parameters:
# x: This can either be an Input object containing an input vector of size k, or just a numpy array
# of size k to be used as the input vector.
if isinstance(input, Input):
x = input.x
else:
x = input
if self.nextLayer:
prop = self.classifier.get_ova_result(x)
return self.nextLayer.predict(np.array(prop))
else:
return self.classifier.predict(x)
| mit |
pythonvietnam/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
crosenth/csvpandas | csvpandas/subcommands/grep.py | 1 | 2001 | # This file is part of csvpandas
#
# csvpandas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# csvpandas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with csvpandas. If not, see <http://www.gnu.org/licenses/>.
"""Search for regex patterns
"""
import logging
import re
log = logging.getLogger(__name__)
def build_parser(parser):
# required inputs
parser.add_argument(
'pattern',
help=('search for pattern in column(s)'))
parser.add_argument(
'--columns',
metavar='COLS',
help=('Comma delimited list of column '
'names or indices if --no-header'))
parser.add_argument(
'--all',
action='store_true',
help='pattern must exist in any column(s) [any]')
parser.add_argument(
'-i',
'--ignore-case',
action='store_true',
help=('Ignore case distinctions in both '
'the PATTERN and the input files.'))
def action(args):
if args.columns:
columns = args.columns.split(',')
else:
columns = args.csv.columns.tolist()
if args.ignore_case:
pattern = re.compile(args.pattern, re.IGNORECASE)
else:
pattern = re.compile(args.pattern)
def search(string):
return bool(re.search(pattern, string))
df = args.csv
if args.all:
df = df[df[columns].apply(lambda x: x.map(search).all(), axis=1)]
else:
df = df[df[columns].apply(lambda x: x.map(search).any(), axis=1)]
df.to_csv(args.out, index=False)
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/fontconfig_pattern.py | 8 | 6538 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import re, sys
from pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser(object):
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException as e:
raise ValueError(
"Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
self._parser.resetCache()
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| mit |
alexsavio/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
q1ang/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
breedlun/clearplot | setup.py | 1 | 4942 | from setuptools import setup
#from pkg_resources import require, DistributionNotFound
from setuptools.command.install import install
import warnings
#Define the version of clearplot
cp_version = '1.2.1'
#Comment out this code in case we switch back to the Qt4Agg backend.
##For now we are using the Qt4Agg backend, which requires PyQt4 or PySide, but
##PyQt4 is prefered
#try:
# require('PyQt4')
# Qt_pkg = 'PyQt4'
#except DistributionNotFound:
# Qt_pkg = 'PySide'
#Set up the machinery to install custom fonts.
#Note: I originally tried to use the data_files keyword in distutil.setup() to
#install the font files, but this turned into a mess. Wheels do not support
#absolute file paths, and the pip project is basically forcing people to use
#wheels. Also, in order to find where matplotlib stores its true type fonts
#and fontList.cache, I had to import matplotlib before setup() had a chance to
#install matplotlib first. For more information see this stackoverflow post
#http://stackoverflow.com/questions/34193900/how-do-i-distribute-fonts-with-my-python-package/34204582
#and the contained links. Fortunately, we can subclass the setuptools install
#class in order to run custom commands during installation. See
#http://blog.niteoweb.com/setuptools-run-custom-code-in-setup-py/ for more
#information.
class move_ttf(install):
def run(self):
"""
Performs the usual install process and then copies the True Type fonts
that come with clearplot into matplotlib's True Type font directory,
and deletes the matplotlib fontList.cache
"""
#Perform the usual install process
install.run(self)
#Try to install custom fonts
try:
import os, shutil
import matplotlib as mpl
import clearplot as cp
#Find where matplotlib stores its True Type fonts
mpl_data_dir = os.path.dirname(mpl.matplotlib_fname())
mpl_ttf_dir = os.path.join(mpl_data_dir, 'fonts', 'ttf')
#Copy the font files to matplotlib's True Type font directory
#(I originally tried to move the font files instead of copy them,
#but it did not seem to work, so I gave up.)
cp_ttf_dir = os.path.join(os.path.dirname(cp.__file__), 'true_type_fonts')
for file_name in os.listdir(cp_ttf_dir):
if file_name[-4:] == '.ttf':
old_path = os.path.join(cp_ttf_dir, file_name)
new_path = os.path.join(mpl_ttf_dir, file_name)
shutil.copyfile(old_path, new_path)
print("Copying " + old_path + " -> " + new_path)
#Try to delete matplotlib's fontList cache
mpl_cache_dir = mpl.get_cachedir()
mpl_cache_dir_ls = os.listdir(mpl_cache_dir)
font_list_cache_names = ["fontList.cache", "fontList.py3k.cache"]
for font_list_cache_name in font_list_cache_names:
if font_list_cache_name in mpl_cache_dir_ls:
fontList_path = os.path.join(mpl_cache_dir, font_list_cache_name)
os.remove(fontList_path)
print("Deleted the matplotlib " + font_list_cache_name)
except:
warnings.warn("WARNING: An issue occured while installing the custom fonts for clearplot.")
setup(
name = 'clearplot',
packages = ['clearplot'], # this must be the same as the name above
version = cp_version,
description = 'Clearplot creates publication quality plots using matplotlib',
author = 'Benjamin Reedlunn',
author_email = '[email protected]',
license = 'MIT',
url = 'http://clearplot.readthedocs.org',
download_url = 'https://github.com/breedlun/clearplot/tarball/' + cp_version,
keywords = ['matplotlib', 'plotting'],
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Visualization'],
#Specify the dependencies and versions
install_requires = ['matplotlib >= 3.3.0', 'numpy >= 1.6'],
#Specify any non-python files to be distributed with the package
package_data = {'' : ['color_maps/*.csv', 'true_type_fonts/*.ttf']},
#Specify the custom install class
cmdclass={'install' : move_ttf}
) | mit |
tomsilver/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| gpl-3.0 |
rjpower/fastnet | fastnet/analysis.py | 1 | 6708 | #!/usr/bin/env
'''Functions for analyzing the output of fastnet checkpoint files.'''
from fastnet import util
from matplotlib.pyplot import gcf
from math import sqrt
import anydbm
import cPickle
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas
import pylab
import shelve
import sys
import zipfile
def find_latest(pattern):
import glob
files = glob.glob(pattern)
ftimes = sorted((os.stat(f).st_ctime, f) for f in files)
if ftimes:
return ftimes[-1][1]
return None
def select(df, **cond):
k, v = cond.items()[0]
idx = getattr(df, k) == v
for k, v in cond.items()[1:]:
idx2 = getattr(df, k) == v
idx = idx & idx2
return df[idx]
def _load_series(data, scale=1):
lp = [t['logprob'] for t,count,elapsed in data]
counts = np.array([count for t,count,elapsed in data]).cumsum()
examples = counts * scale
elapsed = np.array([elapsed for t,count,elapsed in data])
logprob = np.array([t[0] for t in lp])
prec = np.array([t[1] for t in lp])
return pandas.DataFrame({'lp' : logprob, 'pr' : prec, 'elapsed' : elapsed, 'examples' : examples})
def try_load_zip(state_f):
try:
zf = zipfile.ZipFile(state_f, 'r')
train_outputs = cPickle.loads(zf.read('train_outputs'))
test_outputs = cPickle.loads(zf.read('test_outputs'))
return train_outputs, test_outputs
except:
print sys.exc_info()
return None, None
def try_load_pickle(state_f):
try:
data = cPickle.loads(open(state_f).read())
train_outputs = data['train_outputs']
test_outputs = data['test_outputs']
return train_outputs, test_outputs
except:
print sys.exc_info()
return None, None
def try_load_shelf(state_f):
try:
data = shelve.open(state_f, flag='r')
train_outputs = data['train_outputs']
test_outputs = data['test_outputs']
return train_outputs, test_outputs
except:
print sys.exc_info()
return None, None
def load_checkpoint(pattern):
state_f = find_latest(pattern)
assert state_f is not None
train_outputs, test_outputs = try_load_zip(state_f)
if not train_outputs:
train_outputs, test_outputs = try_load_pickle(state_f)
if not train_outputs:
train_outputs, test_outputs = try_load_shelf(state_f)
assert train_outputs is not None
train_df = _load_series(train_outputs)
train_df['type'] = 'train'
test_df = _load_series(test_outputs)
test_df['type'] = 'test'
#return train_df, test_df
out = pandas.concat([train_df, test_df])
return out
def plot_df(df, x, y, save_to=None, title=None, merge=False,
x_label=None, y_label=None, legend=None,
transform_x=lambda k, x: x, transform_y=lambda k, y: y,
xlim=None, ylim=None):
from itertools import cycle
lines = cycle(["-","--","-.",":"])
colors = cycle('bgrcmyk')
if merge: f = gcf()
else: f = plt.figure()
if isinstance(df, dict):
for k in sorted(df.keys()):
v = df[k]
ax = f.add_subplot(111)
ax.plot(transform_x(k, v[x]), transform_y(k, v[y]),
linestyle=lines.next(), color=colors.next(), label='%s' % k)
else:
ax = f.add_subplot(111)
ax.plot(df[x], df[y], linestyle=lines.next(), color=colors.next())
ax.set_title(title)
if legend: ax.legend(title=legend)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
#ax.set_yscale('log')
f.set_figheight(8)
f.set_figwidth(12)
if save_to is not None:
pylab.savefig(save_to, bbox_inches=0)
def plot_series(frame, groupby, x, y, **kw):
g = frame.groupby(groupby)
df = dict([(k, g.get_group(k)) for k in g.groups.keys()])
kw['x_label'] = x
kw['y_label'] = y
plot_df(df, x, y, **kw)
def build_image(array):
if len(array.shape) == 4:
filter_size = array.shape[1]
else:
filter_size = array.shape[0]
num_filters = array.shape[-1]
num_cols = util.divup(80, filter_size)
num_rows = util.divup(num_filters, num_cols)
if len(array.shape) == 4:
big_pic = np.zeros((3, (filter_size + 1) * num_rows, (filter_size + 1) * num_cols))
else:
big_pic = np.zeros((filter_size * num_rows, filter_size * num_cols))
for i in range(num_rows):
for j in range(num_cols):
idx = i * num_cols + j
if idx >= num_filters: break
x = i*(filter_size + 1)
y = j*(filter_size + 1)
if len(array.shape) == 4:
big_pic[:, x:x+filter_size, y:y+filter_size] = array[:, :, :, idx]
else:
big_pic[x:x+filter_size, y:y+filter_size] = array[:, :, idx]
if len(array.shape) == 4:
return big_pic.transpose(1, 2, 0)
return big_pic
def load_layer(f, layer_id=1):
cp = find_latest(f)
try:
sf = shelve.open(cp, flag='r')
layer = sf['layers'][layer_id]
except anydbm.error:
zf = zipfile.ZipFile(cp)
layer = cPickle.loads(zf.read('layers'))[layer_id]
imgs = layer['weight']
filters = layer['numFilter']
filter_size = layer['filterSize']
colors = layer['numColor']
imgs = imgs.reshape(colors, filter_size, filter_size, filters) + layer['bias'].reshape(1, 1, 1, filters)
return imgs
def load_layers(f):
cp = find_latest(f)
try:
sf = shelve.open(cp, flag='r')
layers = sf['layers']
except anydbm.error:
zf = zipfile.ZipFile(cp)
layers = cPickle.loads(zf.read('layers'))
weights = {}
for layer in layers:
if not 'weight' in layer: continue
if not 'numFilter' in layer: continue
imgs = layer['weight']
filters = layer['numFilter']
filter_size = layer['filterSize']
colors = layer['numColor']
imgs = imgs.reshape(colors, filter_size, filter_size, filters)
bias = layer['bias'].reshape(1, 1, 1, filters)
weights[layer['name']] = imgs + bias
return weights
def plot_filters(imgs, ax=None):
imgs = imgs - imgs.min()
imgs = imgs / imgs.max()
if ax is None:
fig = pylab.gcf()
fig.set_size_inches(12, 8)
ax = fig.add_subplot(111)
big_pic = build_image(imgs)
ax.imshow(big_pic, interpolation='nearest')
def plot_file(f, layer_id=1):
return plot_filters(load_layer(f, layer_id))
def diff_files(a, b):
f_a = load_layer(a)
f_b = load_layer(b)
fig = pylab.gcf()
fig.set_size_inches(12, 8)
ax = fig.add_subplot(111)
diff = np.abs(f_a - f_b)
print sqrt((diff ** 2).sum())
#diff = diff - diff.min()
#diff = diff / diff.max()
#big_pic = build_image(diff)
#print diff[0, :, :, 0]
#print f_a[0, :, :, 0]
#print f_b[0, :, :, 0]
#diff = diff / max(np.max(f_a), np.max(f_b))
ax.imshow(build_image(diff))
return diff
| gpl-3.0 |
cbenz/openfisca-hackallocs | calculate_ars.py | 1 | 3137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import json
import os
import numpy as np
import pandas as pd
from openfisca_core import periods
import openfisca_france
script_dir = os.path.realpath(os.path.dirname(__file__))
data_dir = os.path.realpath(os.path.join(script_dir, 'data'))
def read_data(path):
# path_agen = os.path.join(path, 'agen_utile.csv')
# agen = pd.read_csv(path_agen, sep=';')
path_fil = os.path.join(path, 'fil_utile.csv')
fil = pd.read_csv(path_fil, sep=';')
###
# données foyer familliales
# fam = fil['ARSVERS']
# NUMCOMDO ?
# fam2 = agen[['ARSVERS', 'MTARSVER']]
# données individuelles
# on part de la table par foyer
var_ages = ['DTNAIRES', 'DTNAICON']
var_age_enf = [u'ANNNEN1', u'ANNNEN2', u'ANNNEN3', u'ANNNEN4']
ind = fil[['PERSCOUV', 'SEXE'] + var_ages + var_age_enf]
nb_adulte = 1 + (ind.DTNAICON != 99)
nb_enf = (ind.PERSCOUV - nb_adulte)
nb_enf_autres = nb_enf - 4 * (ind.ANNNEN4.notnull())
nb_enf_autres[nb_enf_autres < 0] = 0
for var in var_age_enf:
ind[var].replace(0, np.nan, inplace=True)
ind[var] = 2013 - ind[var]
for var in var_ages:
age = ind[var]
age[age == 99] = np.nan
age = 5 * (age - 2) + 20 + 2
age[age == 17] = 10
ind[var] = age
ind['idfam'] = ind.index
# quifam = 0
quifam0 = ind[['idfam', 'DTNAIRES']]
quifam0.columns = ['idfam', 'age']
quifam0['quifam'] = 0
# quifam = 0
quifam1 = ind.loc[ind['DTNAICON'].notnull(),
['idfam', 'DTNAICON']]
quifam1.columns = ['idfam', 'age']
quifam1['quifam'] = 1
list_enf = []
for num_enf in range(4):
var_enf_name = 'ANNNEN' + str(num_enf + 1)
var_enf = ind[var_enf_name]
enf = ind.loc[var_enf.notnull(), ['idfam', var_enf_name]]
enf.columns = ['idfam', 'age']
enf['quifam'] = 2 + num_enf + 1
list_enf += [enf]
count = 7
while sum(nb_enf_autres > 0) > 0:
cond = nb_enf_autres > 0
enf_autre = ind.loc[cond, ['idfam', 'ANNNEN4']]
enf_autre.columns = ['idfam', 'age']
enf_autre['age'] += count - 6
enf_autre['quifam'] = count
nb_enf_autres -= 1
count += 1
list_enf += [enf_autre]
individu = quifam0.append([quifam1] + list_enf)
return individu
def main():
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
scenario = tax_benefit_system.new_scenario()
scenario.period = periods.period('2014')
individu = read_data(data_dir)
year_birth = (2014 - individu['age']).astype(int).astype(str)
individu['birth'] = year_birth + '-10-02'
individu['birth'] = pd.to_datetime(individu['birth'])
del individu['age']
scenario.input_variables = {
variable_name: {periods.period('2014'): serie.values}
for variable_name, serie in individu.iterkv()
}
simulation = scenario.new_simulation()
ars = simulation.calculate('ars')
print ars
print len(ars)
if __name__ == '__main__':
main()
| agpl-3.0 |
jeffmkw/DAT210x-Lab | Module5/assignment5.py | 1 | 4696 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from sklearn import preprocessing
from sklearn.decomposition import PCA
matplotlib.style.use('ggplot') # Look Pretty
def plotDecisionBoundary(model, X, y):
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.6
resolution = 0.0025
colors = ['royalblue','forestgreen','ghostwhite']
# Calculate the boundaries
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix are the predictions of the class at said location
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
plt.contourf(xx, yy, Z, cmap=plt.cm.terrain)
plt.axis('tight')
# Plot our original points as well...
for label in range(len(np.unique(y))):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], label=str(label), alpha=0.8)
p = model.get_params()
plt.title('K = ' + str(p['n_neighbors']))
#
# TODO: Load up the dataset into a variable called X. Check the .head and compare it to the file you loaded in a
# text editor. Make sure you're loading your data properly--don't fail on the 1st step!
#
# .. your code here ..
X = pd.read_csv('Datasets/wheat.data')
#print X.head()
#
# TODO: Copy the 'wheat_type' series slice out of X, and into a series called 'y'. Then drop the original 'wheat_type'
# column from the X
#
# .. your code here ..
y = X.wheat_type
# Also drop the 'id' column, since that is not a relevant feature
X.drop(labels = ['id', 'wheat_type'], axis = 1, inplace = True)
# TODO: Do a quick, "nominal" conversion of 'y' by encoding it to a SINGLE variable (e.g. 0, 1, 2). This is covered
# in the Feature Representation reading as "Method 1)". In actuality the classification isn't nominal, but this is
# the fastest way to encode your 3 possible wheat types into a label that you can plot distinctly. More notes about
# this on the bottom of the assignment.
#
# .. your code here ..
y = y.astype('category').cat.codes
#
# TODO: Basic nan munging. Fill each row's nans with the mean of the feature
#
# .. your code here ..
#print X.isnull().sum() # Has a few missing values
X.compactness.fillna(X.compactness.mean(), inplace = True)
X.width.fillna(X.width.mean(), inplace = True)
X.groove.fillna(X.groove.mean(), inplace = True)
print (X.isnull().sum()) # No more missing values!)
print (y.isnull().sum()) # Has no missing values
#
# TODO: Use SKLearn's regular "normalize" preprocessor to normalize X's feature data
#
# .. your code here ..
T = preprocessing.normalize(X)
#
# TODO: Project both your X_train and X_test features into PCA space. This has to be done because the only way to visualize the
# decision boundary in 2D, would be if your KNN algo ran in 2D as well
#
# .. your code here ..
pca = PCA(n_components = 2)
pca_X = pca.fit_transform(T)
#
# TODO: Split out your training and testing data.
# INFO: Use 0.33 test size, and use random_state=1. This is important so that your answers are verifiable. In the real world,
# you wouldn't specify a random_state.
#
# .. your code here ..
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(pca_X, y, test_size = 0.33, random_state = 14)
#
# TODO: Run KNeighborsClassifier. Start out with K=7 neighbors. NOTE: Be sure train your classifier against the PCA transformed
# feature data above! You do not, however, need to transform your labels.
#
# .. your code here ..
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 9)
knn.fit(X_train, y_train)
# HINT: Ensure your KNeighbors classifier object from earlier is called 'knn'.
# This method plots your TEST points against the boundary learned from your training data:
#plotDecisionBoundary(knn, X_test, y_test)
#
# TODO: Display the accuracy score.
#
# NOTE: You don't have to run .predict before calling .score, since .score will take care of running your predictions for the
# params you provided.
#
# .. your code here ..
print (knn.score(X_test, y_test))
#
# BONUS: Instead of the ordinal conversion, try and get this assignment working with a proper Pandas get_dummies for feature encoding.
# HINT: You might have to update some of the plotDecisionBoundary code.
plt.show()
| mit |
wateraccounting/wa | Collect/ETmonitor/DataAccess.py | 1 | 7715 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2016
Contact: [email protected]
[email protected]
Repository: https://github.com/wateraccounting/wa
Module: Collect/ETmonitor
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the ETmonitor developers.
Description:
This script collects ETmonitor data from the UNESCO-IHE FTP server. The data has a
monthly temporal resolution and a spatial resolution of 0.01 degree. The
resulting tiff files are in the WGS84 projection.
The data is available between 2008-01-01 till 2012-12-31.
Example:
from wa.Collect import ETmonitor
ETmonitor.ET_monthly(Dir='C:/Temp/', Startdate='2003-02-24', Enddate='2003-03-09',
latlim=[50,54], lonlim=[3,7])
"""
# General modules
import numpy as np
import os
import pandas as pd
from ftplib import FTP
# Water Accounting Modules
import wa.WebAccounts as WebAccounts
import wa.General.raster_conversions as RC
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Type, Waitbar):
"""
This scripts downloads ETmonitor ET data from the UNESCO-IHE ftp server.
The output files display the total ET in mm for a period of one month.
The name of the file corresponds to the first day of the month.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
lonlim -- [ymin, ymax] (values must be between -90 and 90)
latlim -- [xmin, xmax] (values must be between -180 and 180)
"""
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print 'Latitude above 90N or below 90S is not possible. Value set to maximum'
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print 'Longitude must be between 180E and 180W. Now value is set to maximum'
lonlim[0] = np.max(lonlim[0],-180)
lonlim[1] = np.min(lonlim[1],180)
# Check Startdate and Enddate
if not Startdate:
Startdate = pd.Timestamp('2008-01-01')
if not Enddate:
Enddate = pd.Timestamp('2012-12-31')
# Creates dates library
Dates = pd.date_range(Startdate, Enddate, freq = "MS")
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Define directory and create it if not exists
if Type == "act":
output_folder = os.path.join(Dir, 'Evaporation', 'ETmonitor', 'Monthly')
if Type == "pot":
output_folder = os.path.join(Dir, 'ETpot', 'ETmonitor', 'Monthly')
if Type == "ei":
output_folder = os.path.join(Dir, 'Ei', 'ETmonitor', 'Monthly')
if Type == "es":
output_folder = os.path.join(Dir, 'Es', 'ETmonitor', 'Monthly')
if Type == "ew":
output_folder = os.path.join(Dir, 'Ew', 'ETmonitor', 'Monthly')
if Type == "tr":
output_folder = os.path.join(Dir, 'Transpiration', 'ETmonitor', 'Monthly')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for Date in Dates:
# Define year and month
year = Date.year
month = Date.month
# Define end filename and Date as printed in filename
if Type == "act":
Filename_in = "ET_ETmonitor_mm-month_%d_%02d_01.tif" %(year, month)
Filename_out= os.path.join(output_folder,'ETa_ETmonitor_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
if Type == "pot":
Filename_in = "ETpot_ETmonitor_mm-month_%d_%02d_01.tif" %(year, month)
Filename_out= os.path.join(output_folder,'ETpot_ETmonitor_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
if Type == "ei":
Filename_in = "Ei_ETmonitor_mm-month_%d_%02d_01.tif" %(year, month)
Filename_out= os.path.join(output_folder,'Ei_ETmonitor_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
if Type == "es":
Filename_in = "Es_ETmonitor_mm-month_%d_%02d_01.tif" %(year, month)
Filename_out= os.path.join(output_folder,'Es_ETmonitor_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
if Type == "ew":
Filename_in = "Ew_ETmonitor_mm-month_%d_%02d_01.tif" %(year, month)
Filename_out= os.path.join(output_folder,'Ew_ETmonitor_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
if Type == "tr":
Filename_in = "Tr_ETmonitor_mm-month_%d_%02d_01.tif" %(year, month)
Filename_out= os.path.join(output_folder,'Tr_ETmonitor_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
# Temporary filename for the downloaded global file
local_filename = os.path.join(output_folder, Filename_in)
# Download the data from FTP server if the file not exists
if not os.path.exists(Filename_out):
try:
Download_ETmonitor_from_WA_FTP(local_filename, Filename_in, Type)
# Reproject dataset
epsg_to ='4326'
name_reprojected_ETmonitor = RC.reproject_MODIS(local_filename, epsg_to)
# Clip dataset
RC.Clip_Dataset_GDAL(name_reprojected_ETmonitor, Filename_out, latlim, lonlim)
os.remove(name_reprojected_ETmonitor)
os.remove(local_filename)
except:
print "Was not able to download file with date %s" %Date
# Adjust waitbar
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
return
def Download_ETmonitor_from_WA_FTP(local_filename, Filename_in, Type):
"""
This function retrieves ETmonitor data for a given date from the
ftp.wateraccounting.unesco-ihe.org server.
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the ETmonitor developers.
Keyword arguments:
local_filename -- name of the temporary file which contains global ETmonitor data
Filename_in -- name of the end file with the weekly ETmonitor data
Type = Type of data ("act" or "pot")
"""
# Collect account and FTP information
username, password = WebAccounts.Accounts(Type = 'FTP_WA')
ftpserver = "ftp.wateraccounting.unesco-ihe.org"
# Download data from FTP
ftp=FTP(ftpserver)
ftp.login(username,password)
if Type == "pot":
directory="/WaterAccounting/Data_Satellite/Evaporation/ETmonitor/Potential_Evapotranspiration/"
else:
directory="/WaterAccounting/Data_Satellite/Evaporation/ETmonitor/Global/"
ftp.cwd(directory)
lf = open(local_filename, "wb")
ftp.retrbinary("RETR " + Filename_in, lf.write)
lf.close()
return
| apache-2.0 |
samzhang111/scikit-learn | sklearn/feature_selection/rfe.py | 3 | 15630 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
stylianos-kampakis/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
alekz112/statsmodels | statsmodels/datasets/committee/data.py | 25 | 2583 | """First 100 days of the US House of Representatives 1995"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unifited Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of bill assignments in the 104th House in 1995"""
DESCRLONG = """The example in Gill, seeks to explain the number of bill
assignments in the first 100 days of the US' 104th House of Representatives.
The response variable is the number of bill assignments in the first 100 days
over 20 Committees. The explanatory variables in the example are the number of
assignments in the first 100 days of the 103rd House, the number of members on
the committee, the number of subcommittees, the log of the number of staff
assigned to the committee, a dummy variable indicating whether
the committee is a high prestige committee, and an interaction term between
the number of subcommittees and the log of the staff size.
The data returned by load are not cleaned to represent the above example.
"""
NOTE = """::
Number of Observations - 20
Number of Variables - 6
Variable name definitions::
BILLS104 - Number of bill assignments in the first 100 days of the
104th House of Representatives.
SIZE - Number of members on the committee.
SUBS - Number of subcommittees.
STAFF - Number of staff members assigned to the committee.
PRESTIGE - PRESTIGE == 1 is a high prestige committee.
BILLS103 - Number of bill assignments in the first 100 days of the
103rd House of Representatives.
Committee names are included as a variable in the data file though not
returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the committee data and returns a data class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/committee.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
| bsd-3-clause |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch10/neighbors.py | 21 | 1787 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
import numpy as np
import mahotas as mh
from glob import glob
from features import texture, color_histogram
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial import distance
basedir = '../SimpleImageDataset/'
haralicks = []
chists = []
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
images.sort()
for fname in images:
imc = mh.imread(fname)
imc = imc[200:-200,200:-200]
haralicks.append(texture(mh.colors.rgb2grey(imc)))
chists.append(color_histogram(imc))
haralicks = np.array(haralicks)
chists = np.array(chists)
features = np.hstack([chists, haralicks])
print('Computing neighbors...')
sc = StandardScaler()
features = sc.fit_transform(features)
dists = distance.squareform(distance.pdist(features))
print('Plotting...')
fig, axes = plt.subplots(2, 9, figsize=(16,8))
# Remove ticks from all subplots
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
fig.tight_layout()
fig.savefig('figure_neighbors.png', dpi=300)
| mit |
KellyBlack/Precalculus | exponentials/img/avgRateChange.py | 1 | 1434 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
from matplotlib.patches import FancyArrowPatch
from matplotlib.patches import Ellipse
import math
import sys
from BasicPlot import BasicPlot
plotter = BasicPlot()
plt.figure(num=1,frameon=False)
###############################
plotter.clearPlot()
#plt.xkcd(scale=.6) #randomness=1,length=1,scale=0)
#plotter.subplot(1,2,1)
plotter.setupGrid(0.3,'--',
-4.0,1.0,4.1,
-4.0,1.0,4.1)
plotter.setAxesBounds(-4.1,4.1,-4.1,4.1)
plotter.axesDecorations('Graph of f','x','f')
#plotter.addInterpolant([[-4,-2],[-3,-1],[-2,0],[0,1],[2,2],[4,4]],
# np.arange(-4,4.01,0.1),'k-',2.5)
x = np.arange(-4.0,4.1,0.1)
y = np.exp(x/3)
plotter.addFunction(x,y,'k')
slope = (np.exp(1.0)-1.0)/3.0
plotter.filledCircle(0.0,1.0,0.1)
plotter.filledCircle(3.0,np.exp(1.0),0.1)
plotter.addFunction([-4.0,4.0],slope*np.arange(-4.0,4.1,8.0)+1.0,'k--')
axes = plotter.getAxes()
axes.spines['right'].set_color('none')
axes.spines['top'].set_color('none')
axes.xaxis.set_ticks_position('bottom')
axes.spines['bottom'].set_position(('data',0))
axes.yaxis.set_ticks_position('left')
axes.spines['left'].set_position(('data',0))
axes.xaxis.set_label_coords(0.95, 0.45)
axes.yaxis.set_label_coords(0.45, 0.95)
#plt.show()
plt.savefig('avgRateChange.pgf',format='pgf')
| gpl-3.0 |
shaileshr/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
pprett/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 15 | 14956 | """Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf), ('lr', clf)],
weights=[1, 2])
msg = "Names provided are not unique: ['lr', 'lr']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr__', clf)])
msg = "Estimator names must not contain __: got ['lr__']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('estimators', clf)])
msg = "Estimator names conflict with constructor arguments: ['estimators']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_predict():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
def test_set_params():
"""set_params should be able to set estimators"""
clf1 = LogisticRegression(random_state=123, C=1.0)
clf2 = RandomForestClassifier(random_state=123, max_depth=None)
clf3 = GaussianNB()
eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft',
weights=[1, 2])
eclf1.fit(X, y)
eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft',
weights=[1, 2])
eclf2.set_params(nb=clf2).fit(X, y)
assert_false(hasattr(eclf2, 'nb'))
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
assert_equal(eclf2.estimators[0][1].get_params(), clf1.get_params())
assert_equal(eclf2.estimators[1][1].get_params(), clf2.get_params())
eclf1.set_params(lr__C=10.0)
eclf2.set_params(nb__max_depth=5)
assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0)
assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5)
assert_equal(eclf1.get_params()["lr__C"],
eclf1.get_params()["lr"].get_params()['C'])
def test_set_estimator_none():
"""VotingClassifier set_params should be able to set estimators as None"""
# Test predict
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf1 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 0, 0.5]).fit(X, y)
eclf2 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 1, 0.5])
eclf2.set_params(rf=None).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_true(dict(eclf2.estimators)["rf"] is None)
assert_true(len(eclf2.estimators_) == 2)
assert_true(all([not isinstance(est, RandomForestClassifier) for est in
eclf2.estimators_]))
assert_true(eclf2.get_params()["rf"] is None)
eclf1.set_params(voting='soft').fit(X, y)
eclf2.set_params(voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
msg = ('All estimators are None. At least one is required'
' to be a classifier!')
assert_raise_message(
ValueError, msg, eclf2.set_params(lr=None, rf=None, nb=None).fit, X, y)
# Test soft voting transform
X1 = np.array([[1], [2]])
y1 = np.array([1, 2])
eclf1 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[0, 0.5]).fit(X1, y1)
eclf2 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[1, 0.5])
eclf2.set_params(rf=None).fit(X1, y1)
assert_array_equal(eclf1.transform(X1), np.array([[[0.7, 0.3], [0.3, 0.7]],
[[1., 0.], [0., 1.]]]))
assert_array_equal(eclf2.transform(X1), np.array([[[1., 0.], [0., 1.]]]))
eclf1.set_params(voting='hard')
eclf2.set_params(voting='hard')
assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]]))
assert_array_equal(eclf2.transform(X1), np.array([[0], [1]]))
def test_estimator_weights_format():
# Test estimator weights inputs as list and array
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=[1, 2],
voting='soft')
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=np.array((1, 2)),
voting='soft')
eclf1.fit(X, y)
eclf2.fit(X, y)
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.