prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'contact_firstname': pd.StringDtype(),
'contact_firstname2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'current_planned_operating_date': 'datetime64[ns]',
'deliver_power_transgrid': pd.BooleanDtype(),
'duct_burners': pd.BooleanDtype(),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.StringDtype(),
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'longitude': float,
'mercury_content_ppm': float,
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': | pd.BooleanDtype() | pandas.BooleanDtype |
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from copy import deepcopy
import re
import sys
sys.path.insert(0, './code')
import dataloader # noqa: E402
class AmtTrainHandler():
def __init__(self):
self.ylabels = [
"2", "6", "10", '12', "13", "15", "18", "19", "21", "22", "25",
"26", "36", "37", "39", "48"
]
self.ylabel_cols_24 = [
"shop_{}_amt_24".format(ylabel) for ylabel in self.ylabels
]
self.ylabel_cols_23 = [
"shop_{}_amt_23".format(ylabel) for ylabel in self.ylabels
]
self.shop_cate = [str(i + 1) for i in range(48)] + ['other']
self.cols_24 = [
"shop_{}_amt_24".format(a_cate) for a_cate in self.shop_cate
]
self.cols_23 = [
"shop_{}_amt_23".format(a_cate) for a_cate in self.shop_cate
]
self.cols_1 = [
"shop_{}_amt_1".format(a_cate) for a_cate in self.shop_cate
]
self.cols_2 = [
"shop_{}_amt_2".format(a_cate) for a_cate in self.shop_cate
]
def update_data(self, data):
print("Start Update Data")
self.data = data.copy().reset_index(drop=True)
self.get_train_test()
print("Finished updating data")
del self.data
def get_new_cols(self, df, dt):
reg = r"(.+amt_)\d+"
n_cols = ['chid']
for col in df.drop('chid', axis=1).columns:
n_idx = int(col.split('_')[-1]) - dt
n_col = re.findall(reg, col)[0]
n_col = n_col + str(n_idx)
n_cols.append(n_col)
return n_cols
def get_train_test(self):
# train set 1~23 pred 24
# test set 2~24
# label for train set
self.y_24 = self.data[self.ylabel_cols_24].copy()
self.y_23 = self.data[self.ylabel_cols_23].copy()
self.y_24.columns = self.ylabels
self.y_23.columns = self.ylabels
self.y = pd.concat([self.y_23, self.y_24])
self.y_total_24 = self.data[self.cols_24].copy()
self.y_total_23 = self.data[self.cols_23].copy()
# X
self.X_23 = self.data.drop(self.cols_23 + self.cols_24, axis=1)
self.X_24 = self.data.drop(self.cols_1 + self.cols_24, axis=1).copy()
n_cols_23 = self.get_new_cols(self.X_23, 23)
n_cols_24 = self.get_new_cols(self.X_24, 24)
self.X_23.columns = n_cols_23
self.X_24.columns = n_cols_24
self.X = pd.concat([self.X_23, self.X_24])
# test set
self.test = self.data.drop(self.cols_1 + self.cols_2, axis=1).copy()
n_cols_25 = self.get_new_cols(self.test, 25)
self.test.columns = n_cols_25
# self.test = self.test.drop('chid', axis=1)
def fit(self):
X = self.X.drop('chid', axis=1)
y = self.y
kf = KFold(n_splits=3, shuffle=True, random_state=16)
kf.get_n_splits(X)
return X, y, kf
class AmtProfileHandler():
def __init__(self):
self.cate_feats = [
'masts',
'educd',
'trdtp',
'naty',
'poscd',
'cuorg',
'primary_card',
'age',
'gender_code',
'card_1',
'card_1_12',
'card_1_16',
'card_2',
'card_2_12',
'card_2_16',
]
self.str_cate = [
'card_1', 'card_2', 'card_1_12', 'card_1_16', 'card_2_12',
'card_2_16'
]
self.label_encoder = {}
def label_encoding(self):
for cate_feat in self.cate_feats:
le = LabelEncoder()
if cate_feat not in self.str_cate:
self.data[cate_feat] = self.data[cate_feat].apply(
lambda x: int(x))
else:
self.data[cate_feat] = self.data[cate_feat].apply(
lambda x: str(x))
le.fit(self.data[cate_feat])
self.label_encoder.update({cate_feat: deepcopy(le)})
self.data[cate_feat] = le.transform(self.data[cate_feat])
def update_data(self, data):
print("Start Update Data")
self.data = data.copy()
self.data = self.data.fillna(-1)
print("Finished updating data")
print("start label encoding")
self.label_encoding()
print('Finish labor encoding')
del self.data
def transform(self, df):
df = df.fillna(-1)
for cate_feat in self.cate_feats:
if cate_feat not in self.str_cate:
df[cate_feat] = df[cate_feat].apply(lambda x: int(x))
else:
df[cate_feat] = df[cate_feat].apply(lambda x: str(x))
df[cate_feat] = self.label_encoder[cate_feat].transform(
df[cate_feat])
for feat in self.cate_feats:
df[feat] = df[feat].fillna(-1)
return df
class CntTrainHandler():
def __init__(self):
self.ylabels = [
"2", "6", "10", '12', "13", "15", "18", "19", "21", "22", "25",
"26", "36", "37", "39", "48"
]
self.ylabel_cols_24 = [
"shop_{}_cnt_24".format(ylabel) for ylabel in self.ylabels
]
self.ylabel_cols_23 = [
"shop_{}_cnt_23".format(ylabel) for ylabel in self.ylabels
]
self.shop_cate = [str(i + 1) for i in range(48)] + ['other']
self.cols_24 = [
"shop_{}_cnt_24".format(a_cate) for a_cate in self.shop_cate
]
self.cols_23 = [
"shop_{}_cnt_23".format(a_cate) for a_cate in self.shop_cate
]
self.cols_1 = [
"shop_{}_cnt_1".format(a_cate) for a_cate in self.shop_cate
]
self.cols_2 = [
"shop_{}_cnt_2".format(a_cate) for a_cate in self.shop_cate
]
def update_data(self, data):
print("Start Update Data")
self.data = data.copy()
self.get_train_test()
print("Finished updating data")
del self.data
def get_new_cols(self, df, dt):
n_cols = ['chid']
reg = r"(.+cnt_)\d+"
for col in df.drop('chid', axis=1).columns:
n_idx = int(col.split('_')[-1]) - dt
n_col = re.findall(reg, col)[0]
n_col = n_col + str(n_idx)
n_cols.append(n_col)
return n_cols
def get_label_value(self):
self.y_24 = self.data[self.ylabel_cols_24].copy()
self.y_23 = self.data[self.ylabel_cols_23].copy()
self.y_24.columns = self.ylabels
self.y_23.columns = self.ylabels
self.y = pd.concat([self.y_23, self.y_24])
# for col in self.y.columns:
# self.y[col] = self.y[col].apply(lambda x: 1 if x > 0 else 0)
def get_train_test(self):
# ylabel
print("Start Processing y label")
self.get_label_value()
# train set
print("Start Processing train set")
self.train_23 = self.data.drop(self.cols_23 + self.cols_24,
axis=1).copy()
self.train_24 = self.data.drop(self.cols_1 + self.cols_24,
axis=1).copy()
n_cols_23 = self.get_new_cols(self.train_23, 23)
n_cols_24 = self.get_new_cols(self.train_24, 24)
self.train_23.columns = n_cols_23
self.train_24.columns = n_cols_24
self.train = pd.concat([self.train_23, self.train_24])
# test set
print("Start Processing test set")
self.test = self.data.drop(self.cols_1 + self.cols_2, axis=1).copy()
n_cols_25 = self.get_new_cols(self.test, 25)
self.test.columns = n_cols_25
class RankTopHandler():
def __init__(self):
self.ylabels = [
"2", "6", "10", '12', "13", "15", "18", "19", "21", "22", "25",
"26", "36", "37", "39", "48"
]
self.cols_24 = ["top{}_24".format(rank) for rank in range(1, 4)] + [
"imp_top{}_24".format(rank) for rank in range(1, 4)
] + ["how_many_cate_24", "how_many_cate_imp_24"]
self.cols_23 = ["top{}_23".format(rank) for rank in range(1, 4)] + [
"imp_top{}_23".format(rank) for rank in range(1, 4)
] + ["how_many_cate_23", "how_many_cate_imp_23"]
self.cols_1 = ["top{}_1".format(rank) for rank in range(1, 4)] + [
"imp_top{}_1".format(rank) for rank in range(1, 4)
] + ["how_many_cate_1", "how_many_cate_imp_1"]
self.cols_2 = ["top{}_2".format(rank) for rank in range(1, 4)] + [
"imp_top{}_2".format(rank) for rank in range(1, 4)
] + ["how_many_cate_2", "how_many_cate_imp_2"]
def update_data(self, data):
print("Start Update Data")
self.data = data.copy()
self.get_train_test()
print("Finished updating data")
del self.data
def get_new_cols(self, df, dt):
n_cols = ['chid']
reg = r"(.+_)\d+"
for col in df.drop('chid', axis=1).columns:
n_idx = int(col.split('_')[-1]) - dt
n_col = re.findall(reg, col)[0]
n_col = n_col + str(n_idx)
n_cols.append(n_col)
return n_cols
def get_train_test(self):
# train set
print("Start Processing train set")
self.train_23 = self.data.drop(self.cols_23 + self.cols_24,
axis=1).copy()
self.train_24 = self.data.drop(self.cols_1 + self.cols_24,
axis=1).copy()
n_cols_23 = self.get_new_cols(self.train_23, 23)
n_cols_24 = self.get_new_cols(self.train_24, 24)
self.train_23.columns = n_cols_23
self.train_24.columns = n_cols_24
self.train = pd.concat([self.train_23, self.train_24])
# test set
print("Start Processing test set")
self.test = self.data.drop(self.cols_1 + self.cols_2, axis=1).copy()
n_cols_25 = self.get_new_cols(self.test, 25)
self.test.columns = n_cols_25
class RegionTrainHandler():
def __init__(self):
self.cols = [
"domestic_offline_cnt",
"domestic_online_cnt",
"overseas_offline_cnt",
"overseas_online_cnt",
"domestic_offline_amt",
"domestic_online_amt",
"overseas_offline_amt",
"overseas_online_amt",
]
self.cols_24 = [col + "_{}".format(24) for col in self.cols]
self.cols_23 = [col + "_{}".format(23) for col in self.cols]
self.cols_1 = [col + "_{}".format(1) for col in self.cols]
self.cols_2 = [col + "_{}".format(2) for col in self.cols]
def update_data(self, data):
print("Start Update Data")
self.data = data.copy()
self.get_train_test()
print("Finished Update Data")
del self.data
def get_new_cols(self, df, dt):
n_cols = ['chid']
reg = r"(.+_)\d+"
for col in df.drop('chid', axis=1).columns:
n_idx = int(col.split('_')[-1]) - dt
n_col = re.findall(reg, col)[0]
n_col = n_col + str(n_idx)
n_cols.append(n_col)
return n_cols
def get_train_test(self):
# train set
print("Start Processing train set")
self.train_23 = self.data.drop(self.cols_23 + self.cols_24,
axis=1).copy()
self.train_24 = self.data.drop(self.cols_1 + self.cols_24,
axis=1).copy()
n_cols_23 = self.get_new_cols(self.train_23, 23)
n_cols_24 = self.get_new_cols(self.train_24, 24)
self.train_23.columns = n_cols_23
self.train_24.columns = n_cols_24
self.train = pd.concat([self.train_23, self.train_24])
# test set
print("Start Processing test set")
self.test = self.data.drop(self.cols_1 + self.cols_2, axis=1).copy()
n_cols_25 = self.get_new_cols(self.test, 25)
self.test.columns = n_cols_25
class StackTrainHandler():
def __init__(self):
self.required_cate = [
"2", "6", "10", '12', "13", "15", "18", "19", "21", "22", "25",
"26", "36", "37", "39", "48"
]
self.loader = dataloader.DataLoader()
self.get_stack_config()
print("Start Loading idx info")
self.get_idx_results()
print("Start Loading ylabels info")
self.get_ylabels()
def get_feats(self, results):
r_list = [{}, {}, {}]
for ylabel in self.required_cate:
y_result = results[ylabel]
for i, df in enumerate(y_result):
r_list[i].update({"{}".format(ylabel): df[0].to_list()})
r_list = [pd.DataFrame(r_dict) for r_dict in r_list]
return r_list
def get_idx_results(self):
print("Fetch idx_result from {}".format(self.config['idx_results']))
self.idx_results = self.loader.load_result(self.config['idx_results'])
def get_ylabels(self):
ylabel_path = self.config['ylabels']
print("Fetch ylabel infos from {}".format(ylabel_path))
self.ylabels, self.test_labels, self.train_labels = self.loader.load_result( # noqa: E501
ylabel_path)
tmp_test = pd.DataFrame({
'chid': list(self.test_labels['chid'].unique()),
'dt': [25] * 500000
})
tmp_test['query_id'] = tmp_test['chid'].apply(
lambda x: str(x)) + tmp_test['dt'].apply(lambda x: str(x))
self.tmp_test = tmp_test
def get_base_model_feats(self, model_path, model_name):
test_results, train_results, _ = self.loader.load_result(model_path)
test_raw_feats = self.get_feats(test_results)
train_raw_feats = self.get_feats(train_results)
test_feats = []
train_feats = []
for test_feat in test_raw_feats:
feat = pd.concat([self.tmp_test, test_feat], axis=1)
feat = feat.melt(id_vars=["chid", 'dt', 'query_id'],
var_name="shop_tag",
value_name="pred_{}".format(model_name))
test_feats.append(feat)
self.config['test_model_feats'].update({model_name: test_feats})
for i, train_feat in enumerate(train_raw_feats):
tmp_train = self.train_labels[i].reset_index(drop=True)
tmp_train['query_id'] = tmp_train['chid'].apply(
lambda x: str(x)) + tmp_train['dt'].apply(lambda x: str(x))
feat = | pd.concat([tmp_train, train_feat], axis=1) | pandas.concat |
#!/usr/bin/env python
'''
Parses input/output formats,
manages transformations
'''
import csv
import re
import sys
from numpy import array
import numpy as np
import pandas as pd
from pandas import *
from . import config
from . import distance
from . import store
from . import stats
from . import HSIC
def wrap_features(txt, width=40):
'''helper function to wrap text for long labels'''
import textwrap
txt = txt.replace('s__','').replace('g__','').replace('f__','').replace('o__','').replace('c__','').replace('p__','').replace('k__','')
txt = str(txt).split("|")
txt = [val for val in txt if len(val)>0 ]
if len(txt)>1:
txt = txt[len(txt)-2]+" "+txt[len(txt)-1]
else:
txt = txt[0]
return txt #'\n'.join(textwrap.wrap(txt, width))
def substitute_special_characters(txt):
txt = re.sub('[\n\;]', '_', txt).replace('__','_').replace('__','_').replace('_',' ') # replace('.','_')
return txt
def load(file):
# Read in the file
if isinstance(file, pd.DataFrame):
return file.values
try:
import io
file_handle=io.open(file, encoding='utf-8')
except EnvironmentError:
sys.exit("Error: Unable to read file: " + file)
csvr = csv.reader(file_handle, dialect="excel-tab") #csv.excel_tab,
# Ignore comment lines in input file
data=[]
comments=[]
for line in csvr:
# Add comment to list
if re.match("#",line[0]):
comments.append(line)
else:
# First data line found
data=[line]
break
# Check if last comment is header
if comments:
header=comments[-1]
# if the same number of columns then last comment is header
if len(header) == len(data[0]):
data=[header,data[0]]
# finish processing csv
for line in csvr:
data.append(line)
# close csv file
file_handle.close()
return np.array(data)
class Input:
"""
Parser class for input
Handles missing values, data type transformations
* `CON` <- continous
* `CAT` <- categorical
* `BIN` <- binary
* `LEX` <- lexical
"""
def __init__(self, strFileName1, strFileName2=None, var_names=True, headers=False):
# Data types
self.continuous = "CON"
self.categorical = "CAT"
self.binary = "BIN"
self.lexical = "LEX"
# Boolean indicators
self.varNames = var_names
self.headers = headers
# Initialize data structures
self.strFileName1 = strFileName1
self.strFileName2 = strFileName1 if strFileName2 is None else strFileName2
self.discretized_dataset1 = None
self.discretized_dataset2 = None
self.orginal_dataset1 = None
self.orginal_dataset2 = None
self.outName1 = None
self.outName2 = None
self.outType1 = None
self.outType2 = None
self.outHead1 = None
self.outHead2 = None
self._load()
self._parse()
self._filter_to_common_columns()
print ("Discretizing is started using: %s style for filtering features with low entropy!" % config.strDiscretizing)
self._discretize()
self._remove_low_entropy_features()
if len(self.outName1) <2 or len(self.outName1) <2:
sys.exit("--- HAllA to continue needs at lease two features in each dataset!!!\n--- Please repeat the one feature or provide the -a AllA option in the command line to do pairwise alla-against-all test!!")
store.smart_decisoin()
if store.bypass_discretizing():
try:
self.orginal_dataset1= np.asarray(self.orginal_dataset1, dtype = float)
self.orginal_dataset2= np.asarray(self.orginal_dataset2, dtype = float)
self._transform_data()
#self.discretized_dataset1 = self.orginal_dataset1
#self.discretized_dataset2 = self.orginal_dataset2
except:
sys.exit("--- Please check your data types and your similarity metric!")
self._check_for_semi_colon()
def get(self):
return [(self.discretized_dataset1, self.orginal_dataset1, self.outName1, self.outType1, self.outHead1),
(self.discretized_dataset2, self.orginal_dataset2, self.outName2, self.outType2, self.outHead2)]
def _load(self):
self.orginal_dataset1 = load(self.strFileName1)
self.orginal_dataset2 = load(self.strFileName2)
def _check_for_semi_colon(self):
# check the names of features that HAllA uses to make sure they don't have ; which
# is special character to separate features in output files
for i in range(len(self.outName1)):
if ";" in self.outName1[i]:
print ("Feature names warning!")
print (self.outName1[i])
sys.exit("In the first dataset, your feature (row) names contains ; which is the special character HAllA uses for separating features,\n \
Please replace it with another character such as _")
for i in range(len(self.outName2)):
if ";" in self.outName2[i]:
print ("Feature names warning!")
print (self.outName2[i])
sys.exit("In the second dataset, your feature (row) names contains ; which is the special character HAllA uses for separating features,\n \
Please replace it with another character such as _")
def _discretize(self):
self.discretized_dataset1 = stats.discretize(self.orginal_dataset1, style = config.strDiscretizing, data_type = config.data_type[0])
self.discretized_dataset2 = stats.discretize(self.orginal_dataset2, style = config.strDiscretizing, data_type = config.data_type[1])
def _parse(self):
def __parse(pArray, bVar, bHeaders):
aOut = []
aNames = []
used_names = []
aTypes = []
aHeaders = None
# Parse header if indicated by user or "#"
if bHeaders or re.match("#",str(pArray[0,0])):
aHeaders = list(pArray[0,1:])
pArray = pArray[1:]
# Parse variable names
if bVar:
aNames = list(pArray[:, 0])
aNames = list(map(str, aNames))
if config.format_feature_names:
aNames = list(map(wrap_features, aNames))
aNames = list(map(substitute_special_characters, aNames))
pArray = pArray[:, 1:]
#replace missing charaters with nan
#pArray[pArray == config.missing_char] = 'NaN'
#print pArray
# # Parse data types, missing values, and whitespace
if config.missing_method:
from sklearn.preprocessing import Imputer
imp = Imputer(missing_values=config.missing_char, strategy=config.missing_method, axis=1)
#imp.fit(pArray)
for i, line in enumerate(pArray):
# * If the line is not full, replace the Nones with nans *
#*****************************************************************************************************
#line = list(map(lambda x: 'NaN' if x == config.missing_char else x, line)) ###### np.nan Convert missings to nans
if all([val == config.missing_char for val in line]):
# if all values in a feature are missing values then skip the feature
print ('All missing value in' , aNames[i])
continue
if not aNames:
aNames.append(i)
#aOut.append(line)
try:
if config.missing_method:
line = array(imp.fit_transform(line.reshape(1,-1)))[0]
aTypes.append("CON")
except ValueError:
line = line # we are forced to conclude that it is implicitly categorical, with some lexical ordering
aTypes.append("LEX")
used_names.append(aNames[i])
aOut.append(line)
# if there is categorical data then do HAllA with AllA style of
# finding the BH threshold using all p-values
if "LEX" in aTypes:
config.do_alla_halla = True
return aOut, used_names, aTypes, aHeaders
self.orginal_dataset1, self.outName1, self.outType1, self.outHead1 = __parse(self.orginal_dataset1, self.varNames, self.headers)
self.orginal_dataset2, self.outName2, self.outType2, self.outHead2 = __parse(self.orginal_dataset2, self.varNames, self.headers)
config.data_type[0] = self.outType1
config.data_type[1] = self.outType2
def _filter_to_common_columns(self):
"""
Make sure that the data are well-formed
"""
assert(len(self.orginal_dataset1) == len(self.outType1))
assert(len(self.orginal_dataset2) == len(self.outType2))
if self.outName1:
assert(len(self.orginal_dataset1) == len(self.outName1))
if self.outName2:
assert(len(self.orginal_dataset2) == len(self.outName2))
if self.outHead1:
assert(len(self.orginal_dataset1[0]) == len(self.outHead1))
if self.outHead2:
assert(len(self.orginal_dataset2[0]) == len(self.outHead2))
# If sample names are included in headers in both files,
# check that the samples are in the same order
if self.outHead1 and self.outHead2:
header1="\t".join(self.outHead1)
header2="\t".join(self.outHead2)
#print header1, header2
#if not (header1.lower() == header2.lower()):
#+
#"." + " \n File1 header: " + header1 + "\n" +
#" File2 header: " + header2)
try:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1)
except:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1)
try:
df2 = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2)
except:
df2 = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2)
#print df1.columns.isin(df2.columns)
#print df2.columns.isin(df1.columns)
l1_before = len(df1.columns)
l2_before = len(df2.columns)
# remove samples/columns with all NaN/missing values
# First change missing value to np.NaN for pandas
df1[df1==config.missing_char] =np.NAN
df2[df2==config.missing_char] =np.NAN
df1 = df1.dropna( axis=1, how='all')
df2 = df2.dropna( axis=1, how='all')
l1_after = len(df1.columns)
l2_after = len(df2.columns)
# replace np.NaN's with 'NaN'
df1[df1.isnull()] = 'NaN'
df2[df2.isnull()] = 'NaN'
if l1_before > l1_after:
print ("--- %d samples/columns with all missing values have been removed from the first dataset " % (l1_before- l1_after))
if l2_before > l2_after:
print ("--- %d samples/columns with all missing values have been removed from the second dataset " % (l2_before- l2_after))
# Keep common samples/columns between two data frame
df1 = df1.loc[: , df1.columns.isin(df2.columns)]
df2 = df2.loc[: , df2.columns.isin(df1.columns)]
# reorder df1 columns as the columns order of df2
df1 = df1.loc[:, df2.columns]
self.orginal_dataset1 = df1.values
self.orginal_dataset2 = df2.values
#print self.orginal_dataset1
#print HSIC.HSIC_pval(df1.values,df2.values, p_method ='gamma', N_samp =1000)
self.outName1 = list(df1.index)
self.outName2 = list(df2.index)
#print self.outName1
#print self.outName2
#self.outType1 = int
#self.outType2 = int
#self.outHead1 = df1.columns
#self.outHead2 = df2.columns
self.outHead1 = df1.columns
self.outHead2 = df2.columns
print(("The program uses %s common samples between the two data sets based on headers")%(str(df1.shape[1])))
if len(self.orginal_dataset1[0]) != len(self.orginal_dataset2[0]):
sys.exit("Have you provided --header option to use sample/column names for shared sample/columns.")
def _remove_low_variant_features(self):
try:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1, dtype=float)
except:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1, dtype=float)
try:
df2 = | pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2, dtype=float) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Prints a merged summary table from a variety of XIDs."""
from absl import app
from absl import flags
import pandas as pd
import tabulate
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.inference import util as inference_util
from ldif.util import file_util
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
FLAGS = flags.FLAGS
flags.DEFINE_string('input_dir', None, 'The input directory with results.')
flags.mark_flag_as_required('input_dir')
flags.DEFINE_string('xids', None, 'The XIDs to evaluate on.')
flags.mark_flag_as_required('xids')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
xids = inference_util.parse_xid_str(FLAGS.xids)
log.info(f'XIDS: {xids}')
names = []
ious = []
fscores = []
chamfers = []
xid_to_name = {
1: 'CRS SIF',
2: 'CRS ldif',
3: 'CR SIF',
4: 'CR ldif',
5: 'CRS PT SIF',
6: 'CRS PT ldif',
7: 'CR PT SIF',
8: 'CR PT ldif'
}
for xid in xids:
path = f'{FLAGS.input_dir}/extracted/XID{xid}_metric_summary-v2.csv'
df = file_util.read_csv(path)
log.info(f'XID {xid}:')
log.info(df)
mean = df[df['class'].str.contains('mean')]
names.append(xid_to_name[xid])
ious.append(float(mean['IoU']))
fscores.append(float(mean['F-Score (tau)']))
chamfers.append(float(mean['Chamfer']))
l = list(zip(names, ious, fscores, chamfers))
log.info('Start')
log.info(names)
log.info(ious)
log.info(fscores)
log.info(chamfers)
log.info('End')
df = | pd.DataFrame(l, columns=['Name', 'IoU', 'F-Score (tau)', 'Chamfer']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["barata"], 'english': ["german_cockroach"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["coipo"], 'english': ["coypu"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cuncuna"], 'english': ["caterpillar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carpincho"], 'english': ["capybara"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jote"], 'english': ["buzzard"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["zorzal"], 'english': ["fieldfare"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guanaco"], 'english': ["guanaco"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["pejerrey"], 'english': ["silverside"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["mandril"], 'english': ["mandrill"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["peludo"], 'english': ["armadillo"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append( | pd.DataFrame({'spanish': ["chingue"], 'english': ["skunk"]}) | pandas.DataFrame |
"""
Extract basic statistical features from data
"""
import pandas as pd
from utils.timer import Timer
# -------------------------------------------------------------------------------------------------
TRAIN_FILE = 'data/train_preliminary/train.pkl'
TEST_FILE = 'data/test/test.pkl'
TRAIN_STAT_FEAT = 'data/train_feat/train_basic_stat_feat.pkl'
TEST_STAT_FEAT = 'data/test_feat/test_basic_stat_feat.pkl'
na_cols = [
'product_id_count', 'product_id_nunique', 'industry_count', 'industry_nunique', 'duration_std'
]
dtype = {
'creative_id_count': 'uint32',
'creative_id_nunique': 'uint32',
'ad_id_nunique': 'uint32',
'advertiser_id_nunique': 'uint32',
'product_category_nunique': 'uint32',
'click_times_nunique': 'uint32',
'click_times_max': 'uint8',
'click_times_sum': 'uint32',
'click_times_mean': 'float64',
'click_times_std': 'float64',
'time_nunique': 'uint32',
'time_min': 'uint8',
'time_max': 'uint8',
'product_id_count': 'uint32',
'product_id_nunique': 'uint32',
'industry_count': 'uint32',
'industry_nunique': 'uint32',
'duration_nunique': 'uint32',
'duration_min': 'uint8',
'duration_max': 'uint8',
'duration_mean': 'float64',
'duration_median': 'float64',
'duration_std': 'float64',
'creative_id_count_bin_10': 'uint8',
'creative_id_nunique_bin_10': 'uint8',
'ad_id_nunique_bin_10': 'uint8',
'advertiser_id_nunique_bin_10': 'uint8',
'product_category_nunique_bin_10': 'uint8',
'product_id_count_bin_10': 'uint8',
'product_id_nunique_bin_10': 'uint8',
'industry_count_bin_10': 'uint8',
'industry_nunique_bin_10': 'uint8',
'click_times_max_lt_1': 'uint8',
'click_times_sum_bin_10': 'uint8',
'click_times_mean_bin_2': 'uint8',
'click_times_std_bin_2': 'uint8',
'time_nunique_bin_10': 'uint8',
'time_min_bin_4': 'uint8',
'time_max_bin_2': 'uint8',
'duration_nunique_bin_4': 'uint8',
'duration_min_lt_1': 'uint8',
'duration_max_bin_10': 'uint8',
'duration_mean_bin_10': 'uint8',
'duration_median_bin_4': 'uint8',
'duration_std_bin_10': 'uint8'
}
timer = Timer()
# -------------------------------------------------------------------------------------------------
print('Loading train and test data...')
timer.start()
train = pd.read_pickle(TRAIN_FILE)
test = pd.read_pickle(TEST_FILE)
timer.stop()
# -------------------------------------------------------------------------------------------------
print('Generate basic statistical features')
timer.start()
train_stat_basic = pd.DataFrame()
test_stat_basic = pd.DataFrame()
# general
temp = train.groupby('user_id').agg({
'creative_id': ['count', 'nunique'],
'ad_id': ['nunique'],
'advertiser_id': ['nunique'],
'product_category': ['nunique'],
'click_times': ['nunique', 'max', 'sum', 'mean', 'std'],
'time': ['nunique', 'min', 'max']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
temp = test.groupby('user_id').agg({
'creative_id': ['count', 'nunique'],
'ad_id': ['nunique'],
'advertiser_id': ['nunique'],
'product_category': ['nunique'],
'click_times': ['nunique', 'max', 'sum', 'mean', 'std'],
'time': ['nunique', 'min', 'max']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
# product_id
temp = train.loc[train['product_id'] != '\\N'].groupby('user_id').agg({
'product_id': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
temp = test.loc[test['product_id'] != '\\N'].groupby('user_id').agg({
'product_id': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
# industry
temp = train.loc[train['industry'] != '\\N'].groupby('user_id').agg({
'industry': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
temp = test.loc[test['industry'] != '\\N'].groupby('user_id').agg({
'industry': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
timer.stop()
# -------------------------------------------------------------------------------------------------
print('Generate statistical features based on click date duration...')
timer.start()
# drop all columns except user_id and time
# since only time duration will be taken into consideration
# keep one click log record at each day
train = train.loc[:, ['user_id', 'time']].drop_duplicates().sort_values(['user_id', 'time'])
test = test.loc[:, ['user_id', 'time']].drop_duplicates().sort_values(['user_id', 'time'])
# create time duration statistical features
train['next_time'] = train.groupby('user_id')['time'].shift(-1)
temp = train.groupby('user_id').size()
train.loc[train['user_id'].isin(temp[temp == 1].index), 'next_time'] = 0
train = train.loc[train['next_time'].notna()]
train = train.astype({'next_time': 'uint8'})
train['duration'] = train['next_time'] - train['time']
temp = train.groupby('user_id').agg({'duration': ['nunique', 'min', 'max', 'mean', 'median', 'std']})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
test['next_time'] = test.groupby('user_id')['time'].shift(-1)
temp = test.groupby('user_id').size()
test.loc[test['user_id'].isin(temp[temp == 1].index), 'next_time'] = 0
test = test.loc[test['next_time'].notna()]
test = test.astype({'next_time': 'uint8'})
test['duration'] = test['next_time'] - test['time']
temp = test.groupby('user_id').agg({'duration': ['nunique', 'min', 'max', 'mean', 'median', 'std']})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
# fill nan values with zeros
train_stat_basic.loc[:, na_cols] = train_stat_basic.loc[:, na_cols].fillna(0)
test_stat_basic.loc[:, na_cols] = test_stat_basic.loc[:, na_cols].fillna(0)
timer.stop()
# -------------------------------------------------------------------------------------------------
print('Bucketing continuous features...')
timer.start()
train_stat_basic['creative_id_count_bin_10'] = pd.qcut(train_stat_basic['creative_id_count'], q=10).cat.codes
train_stat_basic['creative_id_nunique_bin_10'] = pd.qcut(train_stat_basic['creative_id_nunique'], q=10).cat.codes
train_stat_basic['ad_id_nunique_bin_10'] = pd.qcut(train_stat_basic['ad_id_nunique'], q=10).cat.codes
train_stat_basic['advertiser_id_nunique_bin_10'] = pd.qcut(train_stat_basic['advertiser_id_nunique'], q=10).cat.codes
train_stat_basic['product_category_nunique_bin_10'] = pd.qcut(train_stat_basic['product_category_nunique'], q=4).cat.codes
train_stat_basic['product_id_count_bin_10'] = pd.qcut(train_stat_basic['product_id_count'], q=10).cat.codes
train_stat_basic['product_id_nunique_bin_10'] = pd.qcut(train_stat_basic['product_id_nunique'], q=10).cat.codes
train_stat_basic['industry_count_bin_10'] = pd.qcut(train_stat_basic['industry_count'], q=10).cat.codes
train_stat_basic['industry_nunique_bin_10'] = pd.qcut(train_stat_basic['industry_nunique'], q=10).cat.codes
train_stat_basic['click_times_max_lt_1'] = train_stat_basic['click_times_max'].map(lambda s: 0 if s <= 1 else 1)
train_stat_basic['click_times_sum_bin_10'] = pd.qcut(train_stat_basic['click_times_sum'], q=10).cat.codes
train_stat_basic['click_times_mean_bin_2'] = pd.qcut(train_stat_basic['click_times_mean'], q=2).cat.codes
train_stat_basic['click_times_std_bin_2'] = pd.qcut(train_stat_basic['click_times_std'], q=2).cat.codes
train_stat_basic['time_nunique_bin_10'] = pd.qcut(train_stat_basic['time_nunique'], q=10).cat.codes
train_stat_basic['time_min_bin_4'] = pd.qcut(train_stat_basic['time_min'], q=4).cat.codes
train_stat_basic['time_max_bin_2'] = pd.qcut(train_stat_basic['time_max'], q=2).cat.codes
train_stat_basic['duration_nunique_bin_4'] = pd.qcut(train_stat_basic['duration_nunique'], q=4).cat.codes
train_stat_basic['duration_min_lt_1'] = train_stat_basic['duration_min'].map(lambda s: 0 if s <= 1 else 1)
train_stat_basic['duration_max_bin_10'] = pd.qcut(train_stat_basic['duration_max'], q=10).cat.codes
train_stat_basic['duration_mean_bin_10'] = pd.qcut(train_stat_basic['duration_mean'], q=10).cat.codes
train_stat_basic['duration_median_bin_4'] = pd.qcut(train_stat_basic['duration_median'], q=4).cat.codes
train_stat_basic['duration_std_bin_10'] = pd.qcut(train_stat_basic['duration_std'], q=10).cat.codes
test_stat_basic['creative_id_count_bin_10'] = pd.qcut(test_stat_basic['creative_id_count'], q=10).cat.codes
test_stat_basic['creative_id_nunique_bin_10'] = pd.qcut(test_stat_basic['creative_id_nunique'], q=10).cat.codes
test_stat_basic['ad_id_nunique_bin_10'] = pd.qcut(test_stat_basic['ad_id_nunique'], q=10).cat.codes
test_stat_basic['advertiser_id_nunique_bin_10'] = pd.qcut(test_stat_basic['advertiser_id_nunique'], q=10).cat.codes
test_stat_basic['product_category_nunique_bin_10'] = pd.qcut(test_stat_basic['product_category_nunique'], q=4).cat.codes
test_stat_basic['product_id_count_bin_10'] = pd.qcut(test_stat_basic['product_id_count'], q=10).cat.codes
test_stat_basic['product_id_nunique_bin_10'] = pd.qcut(test_stat_basic['product_id_nunique'], q=10).cat.codes
test_stat_basic['industry_count_bin_10'] = pd.qcut(test_stat_basic['industry_count'], q=10).cat.codes
test_stat_basic['industry_nunique_bin_10'] = pd.qcut(test_stat_basic['industry_nunique'], q=10).cat.codes
test_stat_basic['click_times_max_lt_1'] = test_stat_basic['click_times_max'].map(lambda s: 0 if s <= 1 else 1)
test_stat_basic['click_times_sum_bin_10'] = pd.qcut(test_stat_basic['click_times_sum'], q=10).cat.codes
test_stat_basic['click_times_mean_bin_2'] = pd.qcut(test_stat_basic['click_times_mean'], q=2).cat.codes
test_stat_basic['click_times_std_bin_2'] = pd.qcut(test_stat_basic['click_times_std'], q=2).cat.codes
test_stat_basic['time_nunique_bin_10'] = | pd.qcut(test_stat_basic['time_nunique'], q=10) | pandas.qcut |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = | pd.Index(["A", "B", "C"], name="index") | pandas.Index |
"""Unit tests for orbitpy.coveragecalculator.gridcoverage class.
``TestGridCoverage`` class:
* ``test_execute_0``: Test format of output access files.
* ``test_execute_1``: Roll Circular sensor tests
* ``test_execute_2``: Yaw Circular sensor tests
* ``test_execute_3``: Pitch Circular sensor tests
* ``test_execute_4``: Roll Rectangular sensor tests
* ``test_execute_5``: Pitch Rectangular sensor tests
* ``test_execute_6``: Satellite-bus orientation vs sensor orientation tests
* ``test_execute_7``: Test spacecraft with multiple sensors.
* ``test_execute_8``: Test FOV vs FOR coverage. Coverage of FOR >= Coverage of FOV.
* ``test_execute_9``: Test coverage with DOUBLE_ROLL_ONLY maneuver will which result in 2 ``ViewGeometry`` objects for the field-of-regard.
"""
import json
import os, shutil
import sys
import unittest
import pandas as pd
import random
import warnings
import json
from orbitpy.coveragecalculator import CoverageOutputInfo, GridCoverage
from orbitpy.grid import Grid
from orbitpy.util import Spacecraft
from orbitpy.propagator import PropagatorFactory
sys.path.append('../')
from util.spacecrafts import spc1_json, spc4_json, spc5_json
RE = 6378.137 # radius of Earth in kilometers
class TestGridCoverage(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create new working directory to store output of all the class functions.
cls.dir_path = os.path.dirname(os.path.realpath(__file__))
cls.out_dir = os.path.join(cls.dir_path, 'temp')
if os.path.exists(cls.out_dir):
shutil.rmtree(cls.out_dir)
os.makedirs(cls.out_dir)
# make propagator
factory = PropagatorFactory()
cls.step_size = 1
cls.j2_prop = factory.get_propagator({"@type": 'J2 ANALYTICAL PROPAGATOR', "stepSize": cls.step_size})
def test_from_dict(self):
o = GridCoverage.from_dict({ "grid":{"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2},
"spacecraft": json.loads(spc1_json),
"cartesianStateFilePath":"../../state.csv",
"@id": 12})
self.assertEqual(o._id, 12)
self.assertEqual(o._type, 'GRID COVERAGE')
self.assertEqual(o.grid, Grid.from_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2}))
self.assertEqual(o.spacecraft, Spacecraft.from_json(spc1_json))
self.assertEqual(o.state_cart_file, "../../state.csv")
def test_to_dict(self): #TODO
pass
def test_execute_0(self):
""" Check the produced access file format.
"""
# setup spacecraft with some parameters setup randomly
duration=0.05
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+random.uniform(350,850),
"ecc": 0, "inc": random.uniform(0,180), "raan": random.uniform(0,360),
"aop": random.uniform(0,360), "ta": random.uniform(0,360)}
}
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": random.uniform(5,35) },
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "@id":"bs1", "@type":"Basic Sensor"}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 1})
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access) # the first instrument, mode available in the spacecraft is considered for the coverage calculation.
# check the outputs
cov_calc_type = pd.read_csv(out_file_access, nrows=1, header=None).astype(str) # 1st row contains the coverage calculation type
cov_calc_type = str(cov_calc_type[0][0])
self.assertEqual(cov_calc_type, 'GRID COVERAGE')
epoch_JDUT1 = pd.read_csv(out_file_access, skiprows = [0], nrows=1, header=None).astype(str) # 2nd row contains the epoch
epoch_JDUT1 = float(epoch_JDUT1[0][0].split()[3])
self.assertEqual(epoch_JDUT1, 2458265.0)
_step_size = pd.read_csv(out_file_access, skiprows = [0,1], nrows=1, header=None).astype(str) # 3rd row contains the stepsize
_step_size = float(_step_size[0][0].split()[4])
self.assertAlmostEqual(_step_size, self.step_size)
_duration = pd.read_csv(out_file_access, skiprows = [0,1,2], nrows=1, header=None).astype(str) # 4th row contains the mission duration
_duration = float(_duration[0][0].split()[4])
self.assertAlmostEqual(_duration, duration)
column_headers = pd.read_csv(out_file_access, skiprows = [0,1,2,3], nrows=1, header=None).astype(str) # 5th row contains the columns headers
self.assertEqual(column_headers.iloc[0][0],"time index")
self.assertEqual(column_headers.iloc[0][1],"GP index")
self.assertEqual(column_headers.iloc[0][2],"lat [deg]")
self.assertEqual(column_headers.iloc[0][3],"lon [deg]")
# check that the grid indices are interpreted correctly
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
access_data = access_data.round(3)
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(lat==access_data['lat [deg]'].tolist())
self.assertTrue(lon==access_data['lon [deg]'].tolist())
else:
warnings.warn('No data was generated in test_execute_0(.). Run the test again.')
def test_execute_1(self):
""" Orient the sensor with roll, and an equatorial orbit and check that the ground-points captured are on either
side of hemisphere only. (Conical Sensor)
"""
############ Common attributes for both positive and negative roll tests ############
duration = 0.1
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 20,
"aop": 0, "ta": 120}
}
############ positive roll ############
# setup spacecraft with some parameters setup randomly
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_accessX.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
out_info = cov.execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": sat.get_instrument(None)._id,
"modeId": sat.get_instrument(None).get_mode_id()[0],
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x > 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) positive roll test. Run the test again.')
############ negative roll ############
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_accessY.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x < 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) negative roll test. Run the test again.')
def test_execute_2(self):
""" Orient the sensor with varying yaw but same pitch and roll, and test that the captured ground-points remain the same
(Conical Sensor).
"""
####### Common attributes for both simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 0,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
pitch = 15
roll = 10.5
######## Simulation 1 #######
yaw = random.uniform(0,360)
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
out_info = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": sat.get_instrument(None)._id,
"modeId": sat.get_instrument(None).get_mode_id()[0],
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 ########
yaw = random.uniform(0,360)
instrument_dict = {"mode":[{"@id":"m1", "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw}}],
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"sen1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
out_info = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": "sen1",
"modeId": "m1",
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
if not access_data1.empty:
(lat1, lon1) = grid.get_lat_lon_from_index(access_data1['GP index'].tolist())
(lat2, lon2) = grid.get_lat_lon_from_index(access_data2['GP index'].tolist())
self.assertTrue(lat1==lat2)
else:
warnings.warn('No data was generated in test_execute_2(.). Run the test again.')
def test_execute_3(self):
""" Orient the sensor with pitch and test that the times the ground-points are captured lag or lead (depending on direction of pitch)
as compared to the coverage from a zero pitch sensor. (Conical Sensor)
Fixed inputs used.
"""
####### Common attributes for all the simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 45, "raan": 245,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
grid.write_to_file(self.out_dir+'/grid.csv')
######## Simulation 1 #######
pitch = 0
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access1.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 #######
pitch = 25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access2.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 3 #######
pitch = -25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access3.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data3 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
# the first gpi in pitch forward pitch case is detected earlier than in the zero pitch case and (both) earlier than the pitch backward case
self.assertEqual(access_data3["GP index"][0], 1436)
self.assertEqual(access_data3["time index"][0], 51)
self.assertEqual(access_data1["GP index"][0], 1436)
self.assertEqual(access_data1["time index"][0], 91)
self.assertEqual(access_data2["GP index"][34], 1436)
self.assertEqual(access_data2["time index"][34], 123)
def test_execute_4(self):
""" Orient the sensor with roll, and an equatorial orbit and check that the ground-points captured are on either
side of hemisphere only. (Rectangular Sensor)
"""
############ Common attributes for both positive and negative roll tests ############
duration = 0.1
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 20,
"aop": 0, "ta": 120}
}
############ positive roll ############
# setup spacecraft with some parameters setup randomly
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":12.5},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_accessX.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
cov.execute(out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x > 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) positive roll test. Run the test again.')
############ negative roll ############
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_accessY.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
# check the outputs
access_data = | pd.read_csv(out_file_access, skiprows = [0,1,2,3]) | pandas.read_csv |
# import the required libraries:
# import os, sys
from keras.models import Model
from keras.layers import Input, LSTM, GRU, Dense, Embedding
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# from keras.utils import to_categorical
import numpy as np
# import pickle
import matplotlib.pyplot as plt
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home():
# return 'Hello from lamine API!'
# -*- coding: utf-8 -*-
#Execute this script to set values for different parameters:
BATCH_SIZE = 64
# EPOCHS = 20
LSTM_NODES =256
NUM_SENTENCES = 20000
# MAX_SENTENCE_LENGTH = 50
MAX_NUM_WORDS = 20000
EMBEDDING_SIZE = 200
"""The language translation model that we are going to develop will translate English sentences into their French language counterparts. To develop such a model, we need a dataset that contains English sentences and their French translations.
# Data Preprocessing
We need to generate two copies of the translated sentence: one with the start-of-sentence token and the other with the end-of-sentence token.
"""
input_sentences = []
output_sentences = []
output_sentences_inputs = []
count = 0
for line in open('./fra.txt', encoding="utf-8"):
count += 1
if count > NUM_SENTENCES:
break
if '\t' not in line:
continue
input_sentence = line.rstrip().split('\t')[0]
output = line.rstrip().split('\t')[1]
output_sentence = output + ' <eos>'
output_sentence_input = '<sos> ' + output
input_sentences.append(input_sentence)
output_sentences.append(output_sentence)
output_sentences_inputs.append(output_sentence_input)
print("Number of sample input:", len(input_sentences))
print("Number of sample output:", len(output_sentences))
print("Number of sample output input:", len(output_sentences_inputs))
"""Now randomly print a sentence to analyse your dataset."""
print("English sentence: ",input_sentences[180])
print("French translation: ",output_sentences[180])
"""You can see the original sentence, i.e. **Join us**; its corresponding translation in the output, i.e **Joignez-vous à nous.** <eos>. Notice, here we have <eos> token at the end of the sentence. Similarly, for the input to the decoder, we have <sos> **Joignez-vous à nous.**
# Tokenization and Padding
The next step is tokenizing the original and translated sentences and applying padding to the sentences that are longer or shorter than a certain length, which in case of inputs will be the length of the longest input sentence. And for the output this will be the length of the longest sentence in the output.
"""
# let’s visualise the length of the sentences.
import pandas as pd
eng_len = []
fren_len = []
# populate the lists with sentence lengths
for i in input_sentences:
eng_len.append(len(i.split()))
for i in output_sentences:
fren_len.append(len(i.split()))
length_df = | pd.DataFrame({'english':eng_len, 'french':fren_len}) | pandas.DataFrame |
"""
File name: models.py
Author: <NAME>
Date created: 21.05.2018
This file contains the Model metaclass object that is used for implementing
the given models. It contains a class object for each individual model type.
"""
import os
import pickle
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Union
import catboost as cat
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Dropout
from keras.models import Sequential, load_model
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.model_selection import GridSearchCV, ParameterGrid
from utils.helper_functions import calc_perf_score
os.environ["KERAS_BACKEND"] = "tensorflow"
class Model(metaclass=ABCMeta):
"""
A metaclass used to represent any model.
:param name: Name of the model
:param dataset: The dataset for the model to train on
:param fixed_params: Hyperparameters that won't be used in model tuning
:param tuning_params: Hyperparameters that can be used in model tuning
.. py:meth:: Model.load_model(path)
:param path: Path to model file.
.. py:meth:: Model.run_gridsearch()
.. py_meth:: Model.train()
.. py.meth:: Model.save_model(path)
:param path: Path to model file.
.. py.meth:: Model.evaluate_performance(score_name)
:param score_name: Name of the performance measure.
:return: Training and test performance scores
"""
def __init__(
self,
name: str,
dataset: Dict,
fixed_params: Dict[str, Union[str, float]],
tuning_params: Dict[str, Union[str, float]] = None,
):
self.name = name
self.X_tr = dataset["train_data"]
self.y_tr = dataset["train_labels"]
self.X_te = dataset["test_data"]
self.y_te = dataset["test_labels"]
self.fixed_params = fixed_params
self.tuning_params = tuning_params
if self.fixed_params.get("out_activation") is "softmax":
self.y_tr = | pd.get_dummies(self.y_tr) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 15:45:51 2019
@author: lucia
"""
import pandas as pd
inputCSV = | pd.read_csv("./IdiomasCSV.csv") | pandas.read_csv |
# feature generation & selection
# sample
# full
# kaggle 0.14481
# minimize score
import os
import json
import sys # pylint: disable=unused-import
from time import time
import csv
from pprint import pprint # pylint: disable=unused-import
from timeit import default_timer as timer
import lightgbm as lgb
import numpy as np
from hyperopt import STATUS_OK, fmin, hp, tpe, Trials
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import PolynomialFeatures
pd.options.display.float_format = '{:.4f}'.format
| pd.set_option('display.max_columns', None) | pandas.set_option |
#!/usr/bin/env python
"""
@author: cdeline
bifacial_radiance.py - module to develop radiance bifacial scenes, including gendaylit and gencumulativesky
7/5/2016 - test script based on G173_journal_height
5/1/2017 - standalone module
Pre-requisites:
This software is written for Python >3.6 leveraging many Anaconda tools (e.g. pandas, numpy, etc)
*RADIANCE software should be installed from https://github.com/NREL/Radiance/releases
*If you want to use gencumulativesky, move 'gencumulativesky.exe' from
'bifacial_radiance\data' into your RADIANCE source directory.
*If using a Windows machine you should download the Jaloxa executables at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml#Download
* Installation of bifacial_radiance from the repo:
1. Clone the repo
2. Navigate to the directory using the command prompt
3. run `pip install -e . `
Overview:
Bifacial_radiance includes several helper functions to make it easier to evaluate
different PV system orientations for rear bifacial irradiance.
Note that this is simply an optical model - identifying available rear irradiance under different conditions.
For a detailed demonstration example, look at the .ipnyb notebook in \docs\
There are two solar resource modes in bifacial_radiance: `gendaylit` uses hour-by-hour solar
resource descriptions using the Perez diffuse tilted plane model.
`gencumulativesky` is an annual average solar resource that combines hourly
Perez skies into one single solar source, and computes an annual average.
bifacial_radiance includes five object-oriented classes:
RadianceObj: top level class to work on radiance objects, keep track of filenames,
sky values, PV module type etc.
GroundObj: details for the ground surface and reflectance
SceneObj: scene information including array configuration (row spacing, clearance or hub height)
MetObj: meteorological data from EPW (energyplus) file.
Future work: include other file support including TMY files
AnalysisObj: Analysis class for plotting and reporting
"""
import logging
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
import os, datetime
from subprocess import Popen, PIPE # replacement for os.system()
import pandas as pd
import numpy as np
import warnings
#from input import *
# Mutual parameters across all processes
#daydate=sys.argv[1]
global DATA_PATH # path to data files including module.json. Global context
#DATA_PATH = os.path.abspath(pkg_resources.resource_filename('bifacial_radiance', 'data/') )
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def _findme(lst, a): #find string match in a list. script from stackexchange
return [i for i, x in enumerate(lst) if x == a]
def _missingKeyWarning(dictype, missingkey, newvalue): # prints warnings
if type(newvalue) is bool:
valueunit = ''
else:
valueunit = 'm'
print("Warning: {} Dictionary Parameters passed, but {} is missing. ".format(dictype, missingkey))
print("Setting it to default value of {} {} to continue\n".format(newvalue, valueunit))
def _normRGB(r, g, b): #normalize by each color for human vision sensitivity
return r*0.216+g*0.7152+b*0.0722
def _popen(cmd, data_in, data_out=PIPE):
"""
Helper function subprocess.popen replaces os.system
- gives better input/output process control
usage: pass <data_in> to process <cmd> and return results
based on rgbeimage.py (<NAME> 2010)
"""
if type(cmd) == str:
cmd = str(cmd) # gets rid of unicode oddities
shell=True
else:
shell=False
p = Popen(cmd, bufsize=-1, stdin=PIPE, stdout=data_out, stderr=PIPE, shell=shell) #shell=True required for Linux? quick fix, but may be security concern
data, err = p.communicate(data_in)
#if err:
# return 'message: '+err.strip()
#if data:
# return data. in Python3 this is returned as `bytes` and needs to be decoded
if err:
if data:
returntuple = (data.decode('latin1'), 'message: '+err.decode('latin1').strip())
else:
returntuple = (None, 'message: '+err.decode('latin1').strip())
else:
if data:
returntuple = (data.decode('latin1'), None) #Py3 requires decoding
else:
returntuple = (None, None)
return returntuple
def _interactive_load(title=None):
# Tkinter file picker
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring window into foreground
return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir
def _interactive_directory(title=None):
# Tkinter directory picker. Now Py3.6 compliant!
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring to front
return filedialog.askdirectory(parent=root, title=title)
def _modDict(originaldict, moddict, relative=False):
'''
Compares keys in originaldict with moddict and updates values of
originaldict to moddict if existing.
Parameters
----------
originaldict : dictionary
Original dictionary calculated, for example frontscan or backscan dictionaries.
moddict : dictionary
Modified dictinoary, for example modscan['xstart'] = 0 to change position of x.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
originaldict : dictionary
Updated original dictionary with values from moddict.
'''
newdict = originaldict.copy()
for key in moddict:
try:
if relative:
newdict[key] = moddict[key] + newdict[key]
else:
newdict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return newdict
def _heightCasesSwitcher(sceneDict, preferred='hub_height', nonpreferred='clearance_height'):
"""
Parameters
----------
sceneDict : dictionary
Dictionary that might contain more than one way of defining height for
the array: `clearance_height`, `hub_height`, `height`*
* height deprecated from sceneDict. This function helps choose
* which definition to use.
preferred : str, optional
When sceneDict has hub_height and clearance_height, or it only has height,
it will leave only the preferred option.. The default is 'hub_height'.
nonpreferred : TYPE, optional
When sceneDict has hub_height and clearance_height,
it wil ldelete this nonpreferred option. The default is 'clearance_height'.
Returns
-------
sceneDict : TYPE
Dictionary now containing the appropriate definition for system height.
use_clearanceheight : Bool
Helper variable to specify if dictionary has only clearancehet for
use inside `makeScene1axis`. Will get deprecated once that internal
function is streamlined.
"""
# TODO: When we update to python 3.9.0, this could be a Switch Cases (Structural Pattern Matching):
heightCases = '_'
if 'height' in sceneDict:
heightCases = heightCases+'height__'
if 'clearance_height' in sceneDict:
heightCases = heightCases+'clearance_height__'
if 'hub_height' in sceneDict:
heightCases = heightCases+'hub_height__'
use_clearanceheight = False
# CASES:
if heightCases == '_height__':
print("sceneDict Warning: 'height' is being deprecated. "+
"Renaming as "+preferred)
sceneDict[preferred]=sceneDict['height']
del sceneDict['height']
elif heightCases == '_clearance_height__':
#print("Using clearance_height.")
use_clearanceheight = True
elif heightCases == '_hub_height__':
#print("Using hub_height.'")
pass
elif heightCases == '_height__clearance_height__':
print("sceneDict Warning: 'clearance_height and 'height' "+
"(deprecated) are being passed. removing 'height' "+
"from sceneDict for this tracking routine")
del sceneDict['height']
use_clearanceheight = True
elif heightCases == '_height__hub_height__':
print("sceneDict Warning: 'height' is being deprecated. Using 'hub_height'")
del sceneDict['height']
elif heightCases == '_height__clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height', 'clearance_height'"+
", and 'height' are being passed. Removing 'height'"+
" (deprecated) and "+ nonpreferred+ ", using "+preferred)
del sceneDict[nonpreferred]
elif heightCases == '_clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height' and 'clearance_height'"+
" are being passed. Using "+preferred+
" and removing "+ nonpreferred)
del sceneDict[nonpreferred]
else:
print ("sceneDict Error! no argument in sceneDict found "+
"for 'hub_height', 'height' nor 'clearance_height'. "+
"Exiting routine.")
return sceneDict, use_clearanceheight
def _is_leap_and_29Feb(s): # Removes Feb. 29 if it a leap year.
return (s.index.year % 4 == 0) & \
((s.index.year % 100 != 0) | (s.index.year % 400 == 0)) & \
(s.index.month == 2) & (s.index.day == 29)
def _subhourlydatatoGencumskyformat(gencumskydata, label='right'):
# Subroutine to resample, pad, remove leap year and get data in the
# 8760 hourly format
# for saving the temporary files for gencumsky in _saveTempTMY and
# _makeTrackerCSV
#Resample to hourly. Gencumsky wants right-labeled data.
gencumskydata = gencumskydata.resample('60T', closed='right', label='right').mean()
if label == 'left': #switch from left to right labeled by adding an hour
gencumskydata.index = gencumskydata.index + pd.to_timedelta('1H')
# Padding
tzinfo = gencumskydata.index.tzinfo
padstart = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0],1,1,1,0 ) ).tz_localize(tzinfo)
padend = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0]+1,1,1,0,0) ).tz_localize(tzinfo)
gencumskydata.iloc[0] = 0 # set first datapt to zero to forward fill w zeros
gencumskydata.iloc[-1] = 0 # set last datapt to zero to forward fill w zeros
# check if index exists. I'm sure there is a way to do this backwards.
if any(gencumskydata.index.isin([padstart])):
print("Data starts on Jan. 01")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padstart]))
gencumskydata=pd.concat([gencumskydata,pd.DataFrame(index=[padstart])])
if any(gencumskydata.index.isin([padend])):
print("Data ends on Dec. 31st")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padend]))
gencumskydata=pd.concat([gencumskydata, pd.DataFrame(index=[padend])])
gencumskydata.loc[padstart]=0
gencumskydata.loc[padend]=0
gencumskydata=gencumskydata.sort_index()
# Fill empty timestamps with zeros
gencumskydata = gencumskydata.resample('60T').asfreq().fillna(0)
# Mask leap year
leapmask = ~(_is_leap_and_29Feb(gencumskydata))
gencumskydata = gencumskydata[leapmask]
if (gencumskydata.index.year[-1] == gencumskydata.index.year[-2]+1) and len(gencumskydata)>8760:
gencumskydata = gencumskydata[:-1]
return gencumskydata
# end _subhourlydatatoGencumskyformat
class RadianceObj:
"""
The RadianceObj top level class is used to work on radiance objects,
keep track of filenames, sky values, PV module configuration, etc.
Parameters
----------
name : text to append to output files
filelist : list of Radiance files to create oconv
nowstr : current date/time string
path : working directory with Radiance materials and objects
Methods
-------
__init__ : initialize the object
_setPath : change the working directory
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, name=None, path=None, hpc=False):
'''
initialize RadianceObj with path of Radiance materials and objects,
as well as a basename to append to
Parameters
----------
name: string, append temporary and output files with this value
path: location of Radiance materials and objects
hpc: Keeps track if User is running simulation on HPC so some file
reading routines try reading a bit longer and some writing
routines (makeModule) that overwrite themselves are inactivated.
Returns
-------
none
'''
self.metdata = {} # data from epw met file
self.data = {} # data stored at each timestep
self.path = "" # path of working directory
self.name = "" # basename to append
#self.filelist = [] # list of files to include in the oconv
self.materialfiles = [] # material files for oconv
self.skyfiles = [] # skyfiles for oconv
self.radfiles = [] # scene rad files for oconv
self.octfile = [] #octfile name for analysis
self.Wm2Front = 0 # cumulative tabulation of front W/m2
self.Wm2Back = 0 # cumulative tabulation of rear W/m2
self.backRatio = 0 # ratio of rear / front Wm2
self.nMods = None # number of modules per row
self.nRows = None # number of rows per scene
self.hpc = hpc # HPC simulation is being run. Some read/write functions are modified
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
# DEFAULTS
if name is None:
self.name = self.nowstr # set default filename for output files
else:
self.name = name
self.basename = name # add backwards compatibility for prior versions
#self.__name__ = self.name #optional info
#self.__str__ = self.__name__ #optional info
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
# load files in the /materials/ directory
self.materialfiles = self.returnMaterialFiles('materials')
def _setPath(self, path):
"""
setPath - move path and working directory
"""
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
# check for path in the new Radiance directory:
def _checkPath(path): # create the file structure if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
_checkPath('images'); _checkPath('objects')
_checkPath('results'); _checkPath('skies'); _checkPath('EPWs')
# if materials directory doesn't exist, populate it with ground.rad
# figure out where pip installed support files.
from shutil import copy2
if not os.path.exists('materials'): #copy ground.rad to /materials
os.makedirs('materials')
print('Making path: materials')
copy2(os.path.join(DATA_PATH, 'ground.rad'), 'materials')
# if views directory doesn't exist, create it with two default views - side.vp and front.vp
if not os.path.exists('views'):
os.makedirs('views')
with open(os.path.join('views', 'side.vp'), 'w') as f:
f.write('rvu -vtv -vp -10 1.5 3 -vd 1.581 0 -0.519234 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
with open(os.path.join('views', 'front.vp'), 'w') as f:
f.write('rvu -vtv -vp 0 -3 5 -vd 0 0.894427 -0.894427 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
def getfilelist(self):
"""
Return concat of matfiles, radfiles and skyfiles
"""
return self.materialfiles + self.skyfiles + self.radfiles
def save(self, savefile=None):
"""
Pickle the radiance object for further use.
Very basic operation - not much use right now.
Parameters
----------
savefile : str
Optional savefile name, with .pickle extension.
Otherwise default to save.pickle
"""
import pickle
if savefile is None:
savefile = 'save.pickle'
with open(savefile, 'wb') as f:
pickle.dump(self, f)
print('Saved to file {}'.format(savefile))
#def setHPC(self, hpc=True):
# self.hpc = hpc
def addMaterial(self, material, Rrefl, Grefl, Brefl, materialtype='plastic',
specularity=0, roughness=0, material_file=None, comment=None, rewrite=True):
"""
Function to add a material in Radiance format.
Parameters
----------
material : str
DESCRIPTION.
Rrefl : str
Reflectivity for first wavelength, or 'R' bin.
Grefl : str
Reflecstrtivity for second wavelength, or 'G' bin.
Brefl : str
Reflectivity for third wavelength, or 'B' bin.
materialtype : str, optional
Type of material. The default is 'plastic'. Others can be mirror,
trans, etc. See RADIANCe documentation.
specularity : str, optional
Ratio of reflection that is specular and not diffuse. The default is 0.
roughness : str, optional
This is the microscopic surface roughness: the more jagged the
facets are, the rougher it is and more blurry reflections will appear.
material_file : str, optional
DESCRIPTION. The default is None.
comment : str, optional
DESCRIPTION. The default is None.
rewrite : str, optional
DESCRIPTION. The default is True.
Returns
-------
None. Just adds the material to the material_file specified or the
default in ``materials\ground.rad``.
References:
See examples of documentation for more materialtype details.
http://www.jaloxa.eu/resources/radiance/documentation/docs/radiance_tutorial.pdf page 10
Also, you can use https://www.jaloxa.eu/resources/radiance/colour_picker.shtml
to have a sense of how the material would look with the RGB values as
well as specularity and roughness.
To understand more on reflectivity, specularity and roughness values
https://thinkmoult.com/radiance-specularity-and-roughness-value-examples.html
"""
if material_file is None:
material_file = 'ground.rad'
matfile = os.path.join('materials', material_file)
with open(matfile, 'r') as fp:
buffer = fp.readlines()
# search buffer for material matching requested addition
found = False
for i in buffer:
if materialtype and material in i:
loc = buffer.index(i)
found = True
break
if found:
if rewrite:
print('Material exists, overwriting...\n')
if comment is None:
pre = loc - 1
else:
pre = loc - 2
# commit buffer without material match
with open(matfile, 'w') as fp:
for i in buffer[0:pre]:
fp.write(i)
for i in buffer[loc+4:]:
fp.write(i)
if (found and rewrite) or (not found):
# append -- This will create the file if it doesn't exist
file_object = open(matfile, 'a')
file_object.write("\n\n")
if comment is not None:
file_object.write("#{}".format(comment))
file_object.write("\nvoid {} {}".format(materialtype, material))
if materialtype == 'glass':
file_object.write("\n0\n0\n3 {} {} {}".format(Rrefl, Grefl, Brefl))
else:
file_object.write("\n0\n0\n5 {} {} {} {} {}".format(Rrefl, Grefl, Brefl, specularity, roughness))
file_object.close()
print('Added material {} to file {}'.format(material, material_file))
if (found and not rewrite):
print('Material already exists\n')
def exportTrackerDict(self, trackerdict=None,
savefile=None, reindex=None):
"""
Use :py:func:`~bifacial_radiance.load._exportTrackerDict` to save a
TrackerDict output as a csv file.
Parameters
----------
trackerdict
The tracker dictionary to save
savefile : str
path to .csv save file location
reindex : bool
True saves the trackerdict in TMY format, including rows for hours
where there is no sun/irradiance results (empty)
"""
import bifacial_radiance.load
if trackerdict is None:
trackerdict = self.trackerdict
if savefile is None:
savefile = _interactive_load(title='Select a .csv file to save to')
if reindex is None:
if self.cumulativesky is True:
# don't re-index for cumulativesky,
# which has angles for index
reindex = False
else:
reindex = True
if self.cumulativesky is True and reindex is True:
# don't re-index for cumulativesky,
# which has angles for index
print ("\n Warning: For cumulativesky simulations, exporting the "
"TrackerDict requires reindex = False. Setting reindex = "
"False and proceeding")
reindex = False
bifacial_radiance.load._exportTrackerDict(trackerdict,
savefile,
reindex)
def loadtrackerdict(self, trackerdict=None, fileprefix=None):
"""
Use :py:class:`bifacial_radiance.load._loadtrackerdict`
to browse the results directory and load back any results saved in there.
Parameters
----------
trackerdict
fileprefix : str
"""
from bifacial_radiance.load import loadTrackerDict
if trackerdict is None:
trackerdict = self.trackerdict
(trackerdict, totaldict) = loadTrackerDict(trackerdict, fileprefix)
self.Wm2Front = totaldict['Wm2Front']
self.Wm2Back = totaldict['Wm2Back']
def returnOctFiles(self):
"""
Return files in the root directory with `.oct` extension
Returns
-------
oct_files : list
List of .oct files
"""
oct_files = [f for f in os.listdir(self.path) if f.endswith('.oct')]
#self.oct_files = oct_files
return oct_files
def returnMaterialFiles(self, material_path=None):
"""
Return files in the Materials directory with .rad extension
appends materials files to the oconv file list
Parameters
----------
material_path : str
Optional parameter to point to a specific materials directory.
otherwise /materials/ is default
Returns
-------
material_files : list
List of .rad files
"""
if material_path is None:
material_path = 'materials'
material_files = [f for f in os.listdir(os.path.join(self.path,
material_path)) if f.endswith('.rad')]
materialfilelist = [os.path.join(material_path, f) for f in material_files]
self.materialfiles = materialfilelist
return materialfilelist
def setGround(self, material=None, material_file=None):
"""
Use GroundObj constructor class and return a ground object
Parameters
------------
material : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
self.ground : tuple
self.ground.normval : numeric
Normalized color value
self.ground.ReflAvg : numeric
Average reflectance
"""
if material is None:
try:
if self.metdata.albedo is not None:
material = self.metdata.albedo
print(" Assigned Albedo from metdata.albedo")
except:
pass
self.ground = GroundObj(material, material_file)
def getEPW(self, lat=None, lon=None, GetAll=False):
"""
Subroutine to download nearest epw files to latitude and longitude provided,
into the directory \EPWs\
based on github/aahoo.
.. warning::
verify=false is required to operate within NREL's network.
to avoid annoying warnings, insecurerequestwarning is disabled
currently this function is not working within NREL's network. annoying!
Parameters
----------
lat : decimal
Used to find closest EPW file.
lon : decimal
Longitude value to find closest EPW file.
GetAll : boolean
Download all available files. Note that no epw file will be loaded into memory
"""
import requests, re
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
hdr = {'User-Agent' : "Magic Browser",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
path_to_save = 'EPWs' # create a directory and write the name of directory here
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
def _returnEPWnames():
''' return a dataframe with the name, lat, lon, url of available files'''
r = requests.get('https://github.com/NREL/EnergyPlus/raw/develop/weather/master.geojson', verify=False)
data = r.json() #metadata for available files
#download lat/lon and url details for each .epw file into a dataframe
df = pd.DataFrame({'url':[], 'lat':[], 'lon':[], 'name':[]})
for location in data['features']:
match = re.search(r'href=[\'"]?([^\'" >]+)', location['properties']['epw'])
if match:
url = match.group(1)
name = url[url.rfind('/') + 1:]
lontemp = location['geometry']['coordinates'][0]
lattemp = location['geometry']['coordinates'][1]
dftemp = pd.DataFrame({'url':[url], 'lat':[lattemp], 'lon':[lontemp], 'name':[name]})
#df = df.append(dftemp, ignore_index=True)
df = pd.concat([df, dftemp], ignore_index=True)
return df
def _findClosestEPW(lat, lon, df):
#locate the record with the nearest lat/lon
errorvec = np.sqrt(np.square(df.lat - lat) + np.square(df.lon - lon))
index = errorvec.idxmin()
url = df['url'][index]
name = df['name'][index]
return url, name
def _downloadEPWfile(url, path_to_save, name):
r = requests.get(url, verify=False, headers=hdr)
if r.ok:
filename = os.path.join(path_to_save, name)
# py2 and 3 compatible: binary write, encode text first
with open(filename, 'wb') as f:
f.write(r.text.encode('ascii', 'ignore'))
print(' ... OK!')
else:
print(' connection error status code: %s' %(r.status_code))
r.raise_for_status()
# Get the list of EPW filenames and lat/lon
df = _returnEPWnames()
# find the closest EPW file to the given lat/lon
if (lat is not None) & (lon is not None) & (GetAll is False):
url, name = _findClosestEPW(lat, lon, df)
# download the EPW file to the local drive.
print('Getting weather file: ' + name)
_downloadEPWfile(url, path_to_save, name)
self.epwfile = os.path.join('EPWs', name)
elif GetAll is True:
if input('Downloading ALL EPW files available. OK? [y/n]') == 'y':
# get all of the EPW files
for index, row in df.iterrows():
print('Getting weather file: ' + row['name'])
_downloadEPWfile(row['url'], path_to_save, row['name'])
self.epwfile = None
else:
print('Nothing returned. Proper usage: epwfile = getEPW(lat,lon)')
self.epwfile = None
return self.epwfile
def readWeatherFile(self, weatherFile=None, starttime=None,
endtime=None, label=None, source=None,
coerce_year=None, tz_convert_val=None):
"""
Read either a EPW or a TMY file, calls the functions
:py:class:`~bifacial_radiance.readTMY` or
:py:class:`~bifacial_radiance.readEPW`
according to the weatherfile extention.
Parameters
----------
weatherFile : str
File containing the weather information. EPW, TMY or solargis accepted.
starttime : str
Limited start time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
endtime : str
Limited end time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
daydate : str DEPRECATED
For single day in 'MM/DD' or MM_DD format. Now use starttime and
endtime set to the same date.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
source : str
To help identify different types of .csv files. If None, it assumes
it is a TMY3-style formated data. Current options: 'TMY3',
'solargis', 'EPW'
coerce_year : int
Year to coerce weather data to in YYYY format, ie 2021.
If more than one year of data in the weather file, year is NOT coerced.
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
"""
#from datetime import datetime
import warnings
if weatherFile is None:
if hasattr(self,'epwfile'):
weatherFile = self.epwfile
else:
try:
weatherFile = _interactive_load('Select EPW or TMY3 climate file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
if coerce_year is not None:
coerce_year = int(coerce_year)
if str(coerce_year).__len__() != 4:
warnings.warn('Incorrect coerce_year. Setting to None')
coerce_year = None
def _parseTimes(t, hour, coerce_year):
'''
parse time input t which could be string mm_dd_HH or YYYY-mm-dd_HHMM
or datetime.datetime object. Return pd.datetime object. Define
hour as hour input if not passed directly.
'''
import re
if type(t) == str:
try:
tsplit = re.split('-|_| ', t)
#mm_dd format
if tsplit.__len__() == 2 and t.__len__() == 5:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
tsplit.append(str(hour).rjust(2,'0')+'00')
#mm_dd_hh or YYYY_mm_dd format
elif tsplit.__len__() == 3 :
if tsplit[0].__len__() == 2:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
elif tsplit[0].__len__() == 4:
tsplit.append(str(hour).rjust(2,'0')+'00')
#YYYY-mm-dd_HHMM format
if tsplit.__len__() == 4 and tsplit[0].__len__() == 4:
t_out = pd.to_datetime(''.join(tsplit).ljust(12,'0') )
else:
raise Exception(f'incorrect time string passed {t}.'
'Valid options: mm_dd, mm_dd_HH, '
'mm_dd_HHMM, YYYY-mm-dd_HHMM')
except Exception as e:
# Error for incorrect string passed:
raise(e)
else: #datetime or timestamp
try:
t_out = pd.to_datetime(t)
except pd.errors.ParserError:
print('incorrect time object passed. Valid options: '
'string or datetime.datetime or pd.timeIndex. You '
f'passed {type(t)}.')
return t_out, coerce_year
# end _parseTimes
def _tz_convert(metdata, metadata, tz_convert_val):
"""
convert metdata to a different local timzone. Particularly for
SolarGIS weather files which are returned in UTC by default.
----------
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
Returns: metdata, metadata
"""
import pytz
if (type(tz_convert_val) == int) | (type(tz_convert_val) == float):
metadata['TZ'] = tz_convert_val
metdata = metdata.tz_convert(pytz.FixedOffset(tz_convert_val*60))
return metdata, metadata
# end _tz_convert
if source is None:
if weatherFile[-3:].lower() == 'epw':
source = 'EPW'
else:
print('Warning: CSV file passed for input. Assuming it is TMY3'+
'style format')
source = 'TMY3'
if label is None:
label = 'right' # EPW and TMY are by deffault right-labeled.
if source.lower() == 'solargis':
if label is None:
label = 'center'
metdata, metadata = self._readSOLARGIS(weatherFile, label=label)
if source.lower() =='epw':
metdata, metadata = self._readEPW(weatherFile, label=label)
if source.lower() =='tmy3':
metdata, metadata = self._readTMY(weatherFile, label=label)
metdata, metadata = _tz_convert(metdata, metadata, tz_convert_val)
tzinfo = metdata.index.tzinfo
tempMetDatatitle = 'metdata_temp.csv'
# Parse the start and endtime strings.
if starttime is not None:
starttime, coerce_year = _parseTimes(starttime, 1, coerce_year)
starttime = starttime.tz_localize(tzinfo)
if endtime is not None:
endtime, coerce_year = _parseTimes(endtime, 23, coerce_year)
endtime = endtime.tz_localize(tzinfo)
'''
#TODO: do we really need this check?
if coerce_year is not None and starttime is not None:
if coerce_year != starttime.year or coerce_year != endtime.year:
print("Warning: Coerce year does not match requested sampled "+
"date(s)'s years. Setting Coerce year to None.")
coerce_year = None
'''
tmydata_trunc = self._saveTempTMY(metdata, filename=tempMetDatatitle,
starttime=starttime, endtime=endtime,
coerce_year=coerce_year,
label=label)
if tmydata_trunc.__len__() > 0:
self.metdata = MetObj(tmydata_trunc, metadata, label = label)
else:
self.metdata = None
raise Exception('Weather file returned zero points for the '
'starttime / endtime provided')
return self.metdata
def _saveTempTMY(self, tmydata, filename=None, starttime=None, endtime=None,
coerce_year=None, label=None):
'''
private function to save part or all of tmydata into /EPWs/ for use
in gencumsky -G mode and return truncated tmydata. Gencumsky 8760
starts with Jan 1, 1AM and ends Dec 31, 2400
starttime: tz-localized pd.TimeIndex
endtime: tz-localized pd.TimeIndex
returns: tmydata_truncated : subset of tmydata based on start & end
'''
if filename is None:
filename = 'temp.csv'
gencumskydata = None
gencumdict = None
if len(tmydata) == 8760:
print("8760 line in WeatherFile. Assuming this is a standard hourly"+
" WeatherFile for the year for purposes of saving Gencumulativesky"+
" temporary weather files in EPW folder.")
if coerce_year is None and starttime is not None:
coerce_year = starttime.year
# SILVANA: If user doesn't pass starttime, and doesn't select
# coerce_year, then do we really need to coerce it?
elif coerce_year is None:
coerce_year = 2021
print(f"Coercing year to {coerce_year}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# Correcting last index to next year.
tmydata.index.values[-1] = tmydata.index[-1] + pd.DateOffset(year=(coerce_year+1))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
starttime
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
else:
if len(tmydata.index.year.unique()) == 1:
if coerce_year:
# TODO: check why subhourly data still has 0 entries on the next day on _readTMY3
# in the meantime, let's make Silvana's life easy by just deletig 0 entries
tmydata = tmydata[~(tmydata.index.hour == 0)]
print(f"Coercing year to {coerce_year}")
# TODO: this coercing shows a python warning. Turn it off or find another method? bleh.
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
if coerce_year:
print("More than 1 year of data identified. Can't do coercing")
# Check if years are consecutive
l = list(tmydata.index.year.unique())
if l != list(range(min(l), max(l)+1)):
print("Years are not consecutive. Won't be able to use Gencumsky"+
" because who knows what's going on with this data.")
else:
print("Years are consecutive. For Gencumsky, make sure to select"+
" which yearly temporary weather file you want to use"+
" else they will all get accumulated to same hour/day")
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata = tmydata[filterdates] # Reducing years potentially
# Checking if filtering reduced to just 1 year to do usual savin.
if len(tmydata.index.year.unique()) == 1:
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
gencumdict = [g for n, g in tmydata.groupby(pd.Grouper(freq='Y'))]
for ii in range(0, len(gencumdict)):
gencumskydata = gencumdict[ii]
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
gencumdict[ii] = gencumskydata
gencumskydata = None # clearing so that the dictionary style can be activated.
# Let's save files in EPWs folder for Gencumsky
if gencumskydata is not None:
csvfile = os.path.join('EPWs', filename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile = csvfile
if gencumdict is not None:
self.gencumsky_metfile = []
for ii in range (0, len(gencumdict)):
gencumskydata = gencumdict[ii]
newfilename = filename.split('.')[0]+'_year_'+str(ii)+'.csv'
csvfile = os.path.join('EPWs', newfilename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile.append(csvfile)
return tmydata
def _readTMY(self, tmyfile=None, label = 'right', coerce_year=None):
'''
use pvlib to read in a tmy3 file.
Note: pvlib 0.7 does not currently support sub-hourly files. Until
then, use _readTMYdate() to create the index
Parameters
------------
tmyfile : str
Filename of tmy3 to be read with pvlib.tmy.readtmy3
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce to. Default is 2021.
Returns
-------
metdata - MetObj collected from TMY3 file
'''
def _convertTMYdate(data, meta):
''' requires pvlib 0.8, updated to handle subhourly timestamps '''
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data['Date (MM/DD/YYYY)'])
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data['Time (HH:MM)'].str[:2].astype(int) % 24
minute = data['Time (HH:MM)'].str[3:].astype(int)
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = | pd.DatetimeIndex(data_ymd) | pandas.DatetimeIndex |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet as fp
except ImportError: # pragma: no cover
fp = None
from .... import dataframe as md
from .... import tensor as mt
from ...datasource.read_csv import DataFrameReadCSV
from ...datasource.read_sql import DataFrameReadSQL
from ...datasource.read_parquet import DataFrameReadParquet
@pytest.mark.parametrize('chunk_size', [2, (2, 3)])
def test_set_index(setup, chunk_size):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=chunk_size)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, df3.execute().fetch())
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, df4.execute().fetch())
expected = df1.set_index('y')
df2.set_index('y', inplace=True)
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
def test_iloc_getitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
result = df3.execute(extra_config={'check_series_name': False}).fetch()
pd.testing.assert_series_equal(
expected, result)
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, df4.execute().fetch())
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, df5.execute().fetch())
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df6.execute().fetch())
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df7.execute().fetch())
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df8.execute().fetch())
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df9.execute().fetch())
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, df10.execute().fetch())
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
assert expected == df11.execute().fetch()
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, df12.execute().fetch())
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, df14.execute().fetch())
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, df13.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
assert series.execute().fetch() == data.iloc[4]
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# test index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)[:3]
pd.testing.assert_index_equal(
index.execute().fetch(), data[:3])
index = md.Index(data, chunk_size=3)[4]
assert index.execute().fetch() == data[4]
index = md.Index(data, chunk_size=3)[[2, 3, 4, 9]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[2, 3, 4, 9]])
index = md.Index(data, chunk_size=3)[[4, 3, 9, 2]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[4, 3, 9, 2]])
index = md.Index(data)[5:]
pd.testing.assert_index_equal(
index.execute().fetch(), data[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
index = md.Index(data)[selection]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
index = md.Index(data)[mt.tensor(selection, chunk_size=4)]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
def test_iloc_setitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
series.execute().fetch(), data)
# test Index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)
with pytest.raises(TypeError):
index[5:] = 4
def test_loc_getitem(setup):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
raw6 = raw1[:0]
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df6 = md.DataFrame(raw6)
df = df2.loc[3, 'b']
result = df.execute().fetch()
expected = raw2.loc[3, 'b']
assert result == expected
df = df1.loc['a3', 'b']
result = df.execute(extra_config={'check_shape': False}).fetch()
expected = raw1.loc['a3', 'b']
assert result == expected
# test empty list
df = df1.loc[[]]
result = df.execute().fetch()
expected = raw1.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[[]]
result = df.execute().fetch()
expected = raw2.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[1:4, 'b':'d']
result = df.execute().fetch()
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = df.execute().fetch()
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = df.execute().fetch()
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = df.execute().fetch()
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = df.execute().fetch()
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = df.execute().fetch()
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = df.execute().fetch()
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
df = df3.loc[md.Series(selection), ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index on index
# whose index_value does not have value
df = df1.loc[['a3', 'a1'], ['b', 'a', 'd']]
result = df.execute(extra_config={'check_nsplits': False}).fetch()
expected = raw1.loc[['a3', 'a1'], ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# get timestamp by str
df = df5.loc['20200101']
result = df.execute(extra_config={'check_series_name': False}).fetch(
extra_config={'check_series_name': False})
expected = raw5.loc['20200101']
pd.testing.assert_series_equal(result, expected)
# get timestamp by str, return scalar
df = df5.loc['2020-1-1', 'c']
result = df.execute().fetch()
expected = raw5.loc['2020-1-1', 'c']
assert result == expected
# test empty df
df = df6.loc[[]]
result = df.execute().fetch()
expected = raw6.loc[[]]
pd.testing.assert_frame_equal(result, expected)
def test_dataframe_getitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
data2 = data.copy()
data2.index = pd.date_range('2020-1-1', periods=10)
mdf = md.DataFrame(data2, chunk_size=3)
series1 = df['c2']
pd.testing.assert_series_equal(
series1.execute().fetch(), data['c2'])
series2 = df['c5']
pd.testing.assert_series_equal(
series2.execute().fetch(), data['c5'])
df1 = df[['c1', 'c2', 'c3']]
pd.testing.assert_frame_equal(
df1.execute().fetch(), data[['c1', 'c2', 'c3']])
df2 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df2.execute().fetch(), data[['c3', 'c2', 'c1']])
df3 = df[['c1']]
pd.testing.assert_frame_equal(
df3.execute().fetch(), data[['c1']])
df4 = df[['c3', 'c1', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df4.execute().fetch(), data[['c3', 'c1', 'c2', 'c1']])
df5 = df[np.array(['c1', 'c2', 'c3'])]
pd.testing.assert_frame_equal(
df5.execute().fetch(), data[['c1', 'c2', 'c3']])
df6 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df6.execute().fetch(), data[['c3', 'c2', 'c1']])
df7 = df[1:7:2]
pd.testing.assert_frame_equal(
df7.execute().fetch(), data[1:7:2])
series3 = df['c1'][0]
assert series3.execute().fetch() == data['c1'][0]
df8 = mdf[3:7]
pd.testing.assert_frame_equal(
df8.execute().fetch(), data2[3:7])
df9 = mdf['2020-1-2': '2020-1-5']
pd.testing.assert_frame_equal(
df9.execute().fetch(), data2['2020-1-2': '2020-1-5'])
def test_dataframe_getitem_bool(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data = data.c1 > 0.5
mask = md.Series(mask_data, chunk_size=2)
# getitem by mars series
assert df[mask].execute().fetch().shape == data[mask_data].shape
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by pandas series
pd.testing.assert_frame_equal(
df[mask_data].execute().fetch(), data[mask_data])
# getitem by mars series with alignment but no shuffle
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=range(9, -1, -1))
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by mars series with shuffle alignment
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by mars series with shuffle alignment and extra element
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True, False],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4, 10])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by DataFrame with all bool columns
r = df[df > 0.5]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data > 0.5])
# getitem by tensor mask
r = df[(df['c1'] > 0.5).to_tensor()]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data['c1'] > 0.5])
def test_dataframe_getitem_using_attr(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'key', 'dtypes', 'size'])
df = md.DataFrame(data, chunk_size=2)
series1 = df.c2
pd.testing.assert_series_equal(
series1.execute().fetch(), data.c2)
# accessing column using attribute shouldn't overwrite existing attributes
assert df.key == getattr(getattr(df, '_data'), '_key')
assert df.size == data.size
pd.testing.assert_series_equal(df.dtypes, data.dtypes)
# accessing non-existing attributes should trigger exception
with pytest.raises(AttributeError):
_ = df.zzz # noqa: F841
def test_series_getitem(setup):
data = pd.Series(np.random.rand(10))
series = md.Series(data)
assert series[1].execute().fetch() == data[1]
data = pd.Series(np.random.rand(10), name='a')
series = md.Series(data, chunk_size=4)
for i in range(10):
series1 = series[i]
assert series1.execute().fetch() == data[i]
series2 = series[[0, 1, 2, 3, 4]]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[[0, 1, 2, 3, 4]])
series3 = series[[4, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[[4, 3, 2, 1, 0]])
series4 = series[[1, 2, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[[1, 2, 3, 2, 1, 0]])
#
index = ['i' + str(i) for i in range(20)]
data = pd.Series(np.random.rand(20), index=index, name='a')
series = md.Series(data, chunk_size=3)
for idx in index:
series1 = series[idx]
assert series1.execute().fetch() == data[idx]
selected = ['i1', 'i2', 'i3', 'i4', 'i5']
series2 = series[selected]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[selected])
selected = ['i4', 'i7', 'i0', 'i1', 'i5']
series3 = series[selected]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[selected])
selected = ['i0', 'i1', 'i5', 'i4', 'i0', 'i1']
series4 = series[selected]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[selected])
selected = ['i0']
series5 = series[selected]
pd.testing.assert_series_equal(
series5.execute().fetch(), data[selected])
data = pd.Series(np.random.rand(10,))
series = md.Series(data, chunk_size=3)
selected = series[:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:2])
selected = series[2:8:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[2:8:2])
data = pd.Series(np.random.rand(9), index=['c' + str(i) for i in range(9)])
series = md.Series(data, chunk_size=3)
selected = series[:'c2']
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:'c2'])
selected = series['c2':'c9']
pd.testing.assert_series_equal(
selected.execute().fetch(), data['c2':'c9'])
def test_head(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.head().execute().fetch(), data.head())
pd.testing.assert_frame_equal(
df.head(3).execute().fetch(), data.head(3))
pd.testing.assert_frame_equal(
df.head(-3).execute().fetch(), data.head(-3))
pd.testing.assert_frame_equal(
df.head(8).execute().fetch(), data.head(8))
pd.testing.assert_frame_equal(
df.head(-8).execute().fetch(), data.head(-8))
pd.testing.assert_frame_equal(
df.head(13).execute().fetch(), data.head(13))
pd.testing.assert_frame_equal(
df.head(-13).execute().fetch(), data.head(-13))
def test_tail(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.tail().execute().fetch(), data.tail())
pd.testing.assert_frame_equal(
df.tail(3).execute().fetch(), data.tail(3))
pd.testing.assert_frame_equal(
df.tail(-3).execute().fetch(), data.tail(-3))
pd.testing.assert_frame_equal(
df.tail(8).execute().fetch(), data.tail(8))
pd.testing.assert_frame_equal(
df.tail(-8).execute().fetch(), data.tail(-8))
pd.testing.assert_frame_equal(
df.tail(13).execute().fetch(), data.tail(13))
pd.testing.assert_frame_equal(
df.tail(-13).execute().fetch(), data.tail(-13))
def test_at(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
data2 = data.copy()
data2.index = np.arange(10)
df2 = md.DataFrame(data2, chunk_size=3)
with pytest.raises(ValueError):
_ = df.at[['i3, i4'], 'c1']
result = df.at['i3', 'c1'].execute().fetch()
assert result == data.at['i3', 'c1']
result = df['c1'].at['i2'].execute().fetch()
assert result == data['c1'].at['i2']
result = df2.at[3, 'c2'].execute().fetch()
assert result == data2.at[3, 'c2']
result = df2.loc[3].at['c2'].execute().fetch()
assert result == data2.loc[3].at['c2']
def test_iat(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
with pytest.raises(ValueError):
_ = df.iat[[1, 2], 3]
result = df.iat[3, 4].execute().fetch()
assert result == data.iat[3, 4]
result = df.iloc[:, 2].iat[3].execute().fetch()
assert result == data.iloc[:, 2].iat[3]
def test_setitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
data2 = np.random.rand(10)
data3 = np.random.rand(10, 2)
df = md.DataFrame(data, chunk_size=3)
df['c3'] = df['c3'] + 1
df['c10'] = 10
df[4] = mt.tensor(data2, chunk_size=4)
df['d1'] = df['c4'].mean()
df['e1'] = data2 * 2
result = df.execute().fetch()
expected = data.copy()
expected['c3'] = expected['c3'] + 1
expected['c10'] = 10
expected[4] = data2
expected['d1'] = data['c4'].mean()
expected['e1'] = data2 * 2
pd.testing.assert_frame_equal(result, expected)
# test set multiple cols with scalar
df = md.DataFrame(data, chunk_size=3)
df[['c0', 'c2']] = 1
df[['c1', 'c10']] = df['c4'].mean()
df[['c11', 'c12']] = mt.tensor(data3, chunk_size=4)
result = df.execute().fetch()
expected = data.copy()
expected[['c0', 'c2']] = 1
expected[['c1', 'c10']] = expected['c4'].mean()
expected[['c11', 'c12']] = data3
pd.testing.assert_frame_equal(result, expected)
# test set multiple rows
df = md.DataFrame(data, chunk_size=3)
df[['c1', 'c4', 'c10']] = df[['c2', 'c3', 'c4']] * 2
result = df.execute().fetch()
expected = data.copy()
expected[['c1', 'c4', 'c10']] = expected[['c2', 'c3', 'c4']] * 2
pd.testing.assert_frame_equal(result, expected)
# test setitem into empty DataFrame
df = md.DataFrame()
df['a'] = md.Series(np.arange(1, 11), chunk_size=3)
pd.testing.assert_index_equal(df.index_value.to_pandas(),
| pd.RangeIndex(10) | pandas.RangeIndex |
import os
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
# Boiling
filename = '009Boiling-2018-02-15-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data1 = pd.read_csv(data, delimiter = ',')
filename = '010Boiling-2018-02-16-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data2 = pd.read_csv(data, delimiter = ',')
filename = '023Boiling-2018-03-01.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data3 = pd.read_csv(data, delimiter = ',')
boiling = pd.concat([data1, data2, data3], ignore_index=True)
boiling = boiling.fillna(value=0)
# Candle Burning
filename1 = '001Indoor-Hoover-Candle-2018-02-06-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
data = pd.read_csv(data, delimiter = ',')
candle = data[data['label'] == 5]
candle = candle.reset_index()
candle = candle.fillna(value=0)
# Frying
filename1 = '006Frying-2018-02-07.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
frying_data1 = pd.read_csv(data, delimiter = ',')
filename1 = '007Frying-2018-02-01.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
frying_data2 = pd.read_csv(data, delimiter = ',')
filename1 = '005Frying-Outdoor-2018-02-13-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
frying_data3 = pd.read_csv(data, delimiter = ',')
frying_data3 = frying_data3[frying_data3['label'] == 2]
frying = pd.concat([frying_data1, frying_data2, frying_data3], ignore_index=True)
frying = frying.fillna(value=0)
# Hoovering
filename1 = '001Indoor-Hoover-Candle-2018-02-06-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
data1 = pd.read_csv(data, delimiter = ',')
filename1 = '020Hoover-2018-02-27.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
data2 = pd.read_csv(data, delimiter = ',')
data1 = data1[data1['label'] == 4]
hoovering = pd.concat([data1, data2],ignore_index=True)
hoovering = hoovering.fillna(value=0)
# Smoking
filename1 = '008Smoking_Inside-2018-02-07.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename1)
smoking_inside_data = pd.read_csv(data, delimiter = ',')
smoking_inside_data = smoking_inside_data[smoking_inside_data['bin0'] > 2500]
filename3 = '017Smoking_Inside-2018-02-28.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename3)
smoking_inside_data1 = pd.read_csv(data, delimiter = ',')
smoking_inside_data1 = smoking_inside_data1[smoking_inside_data1['bin0'] > 2500]
smoking = pd.concat([smoking_inside_data, smoking_inside_data1], ignore_index=True)
smoking = smoking.fillna(value=0)
# Spraying
filename = '011Spray-2018-02-16-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data1 = pd.read_csv(data, delimiter = ',')
filename = '018Spray-2018-03-01.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data2 = pd.read_csv(data, delimiter = ',')
filename = '019Spray-2018-02-28.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data3 = pd.read_csv(data, delimiter = ',')
spraying = pd.concat([data1, data2, data3], ignore_index=True)
spraying = spraying.fillna(value=0)
# Indoor Clean
filename = '001Indoor-Hoover-Candle-2018-02-06-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data1 = pd.read_csv(data, delimiter = ',')
data1 = data1[(data1['label'] == 0) & (data1['bin0'] < 100)]
filename = '002Indoor-Outdoor-2018-02-06-labeled.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data2 = pd.read_csv(data, delimiter = ',')
data2 = data2[(data2['label'] == 0) & (data2['bin0'] < 100)]
filename = '013Indoor-Day-2018-02-26.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data3 = pd.read_csv(data, delimiter = ',')
filename = '014Indoor-Night-2018-02-26.csv'
data = os.path.join(os.getcwd(), 'api_vlad/data/new/', filename)
data4 = pd.read_csv(data, delimiter = ',')
indoor = | pd.concat([data1, data2, data3, data4], ignore_index=True) | pandas.concat |
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import datetime
# from datetime import datetime
dire = '../../data/'
start = datetime.datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = | pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8') | pandas.read_csv |
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import geopandas as gpd
import multiprocessing as mp
import re
from typing import List
from enums import Properties, Organization
from os import path
def __add_features_to_geo_dataframe(df):
df["geometry"] = df.geometry.simplify(tolerance=0.01, preserve_topology=True)
return df
def __add_features_to_huc_gap_dataframe(df):
df["PropertyName"] = df.apply(lambda row: Properties(row.PropertyValue).name, axis = 1)
df["HUC12"] = df.apply(lambda row: f"0{row.HUC12_}", axis = 1)
df["Start"] = pd.to_datetime(df["Start"])
df["Finish"] = pd.to_datetime(df["Finish"])
df["Elapsed"] = df.apply(lambda row: row.Finish - row.Start, axis = 1)
return df
def __add_features_to_station_gap_dataframe(df):
df["PropertyName"] = df.apply(lambda row: Properties(row.PropertyValue).name, axis = 1)
df["Start"] = pd.to_datetime(df["Start"])
df["Finish"] = pd.to_datetime(df["Finish"])
df["Elapsed"] = df.apply(lambda row: row.Finish - row.Start, axis = 1)
return df
def __add_features_to_water_dataframe(df):
df["Property"] = df.apply(lambda row: int(__get_common_prop(row.ParameterName_CBP, row.ParameterName_CMC).value), axis = 1)
df["DateTime"] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
df["Organization"] = df.apply(lambda row: int(Organization.CMC.value) if row.Database == "CMC" else int(Organization.CBP.value) , axis = 1)
return df
def __create_dataframes():
water_df = load_water_dataframe()
geo_df = load_geo_dataframe()
start = min(water_df["DateTime"])
end = max(water_df["DateTime"])
join_df = water_df[["Station", "StationCode", "StationName", "Latitude", "Longitude", "HUC12_", "HUCNAME_", "COUNTY_", "STATE_", "Organization"]]
huc_gaps_df = __create_dataframe_from_gaps(water_df, "HUC12_", geo_df["HUC12"], start, end, __add_features_to_huc_gap_dataframe)
huc_join_df = join_df.groupby(["HUC12_"]).first().reset_index()
huc_gaps_df = pd.merge(huc_gaps_df, huc_join_df, on="HUC12_", how="left")
huc_gaps_df["Organization"] = huc_gaps_df["Organization"].fillna(0)
huc_gaps_df["Organization"] = huc_gaps_df["Organization"].astype(int)
huc_gaps_df = huc_gaps_df.rename(columns={
"HUC12_": "HUC12",
"HUCNAME_": "HUCName",
"STATE_": "State",
"COUNTY_": "County"
})
huc_gaps_df.to_csv("../data/huc12_gaps.csv")
codes = water_df["StationCode"].unique()
codes = [c for c in codes if str(c) != "nan"]
station_gaps_df = __create_dataframe_from_gaps(water_df, "StationCode", codes, start, end, __add_features_to_station_gap_dataframe)
station_join_df = join_df.groupby(["StationCode"]).first().reset_index()
station_gaps_df = | pd.merge(station_gaps_df, station_join_df, on="StationCode", how="left") | pandas.merge |
import pandas as pd
from app import cache
from model.ynab_api import api
@cache.memoize()
def get_categorized_transactions(budget_id, hierarchy, category_names):
transactions_df = api.get_transactions(budget_id)
transactions_df = pd.merge(transactions_df, hierarchy,
left_on="category_id",
right_on="category_id",
how="left")
transactions_df = pd.merge(transactions_df, category_names,
left_on="parent_category_id",
right_on="cat_id",
how="left").rename(columns={'cat_name': 'parent_category_name'})
return transactions_df
@cache.memoize()
def get_categorized_budgets(budget_id):
simple_categories, bottom_up_dict, category_names, hierarchy = api.get_simple_categories(budget_id)
month_budgets = api.get_complete_budget_months(budget_id)
month_budgets = pd.merge(month_budgets, hierarchy,
left_on="id",
right_on="category_id",
how="left")
month_budgets = pd.merge(month_budgets, category_names,
left_on="parent_category_id",
right_on="cat_id",
how="left").rename(columns={'cat_name': 'parent_category_name'})
return month_budgets
def get_budget_by_name(name):
budgets = api.get_budgets()
for budget in budgets:
if budget['name'] == name:
return budget
@cache.memoize()
def get_simple_categories(budget_id, unhide=True):
category_groups = api.get_categories(budget_id)
simple_categories = {}
bottom_up_dict = {}
for group in category_groups:
simple_categories[group['id']] = {'name': group['name'], 'sub_categories': {}}
for subcat in group['categories']:
if group['name'] == "Hidden Categories" and unhide:
continue
else:
simple_categories[group['id']]['sub_categories'][subcat['id']] = {'name': subcat['name']}
bottom_up_dict[subcat['id']] = group['id']
if unhide:
for subcat in group['categories']:
if group['name'] == "Hidden Categories":
simple_categories[subcat['original_category_group_id']]['sub_categories'][subcat['id']] = {
'name': subcat['name']}
bottom_up_dict[subcat['id']] = subcat['original_category_group_id']
category_names = pd.DataFrame([[k, v] for d in [
{**{sub_cat: v2['name'] for sub_cat, v2 in v['sub_categories'].items()}, **{cat: v['name']}} for cat, v in
simple_categories.items()] for k, v in d.items()], columns=['cat_id', 'cat_name'])
hierarchy = pd.DataFrame([[k, v] for k, v in bottom_up_dict.items()],
columns=["category_id", "parent_category_id"])
return simple_categories, bottom_up_dict, category_names, hierarchy
@cache.memoize()
def get_sub_transactions(transactions_df, hierarchy, category_names):
sub_trans = []
for i, row in transactions_df[transactions_df.category_name == "Split SubCategory"].iterrows():
df = pd.DataFrame(row.subtransactions)
# print(row.date)
df['date'] = row.date
df['account_name'] = row.account_name
sub_trans.append(df[['id', 'date', 'amount', 'category_id', 'category_name', "account_name"]])
if sub_trans:
sub_trans = pd.concat(sub_trans)
sub_trans['amount'] = sub_trans.amount / 1000
sub_trans = pd.merge(sub_trans, hierarchy, left_on="category_id", right_on="category_id", how="left")
sub_trans = pd.merge(sub_trans, category_names, left_on="parent_category_id", right_on="cat_id",
how="left").rename(columns={'cat_name': 'parent_category_name'})
return sub_trans[
['id', 'date', 'amount', 'category_id', 'category_name', 'parent_category_id', 'parent_category_name',
'account_name']]
return pd.DataFrame(
columns=['id', 'date', 'amount', 'category_id', 'category_name', 'parent_category_id', 'parent_category_name',
'account_name'])
@cache.memoize()
def get_category_transactions(budget_id):
simple_categories, bottom_up_dict, category_names, hierarchy = get_simple_categories(budget_id)
transactions_df = get_categorized_transactions(budget_id, hierarchy, category_names)
sub_transactions = get_sub_transactions(transactions_df, hierarchy, category_names)
category_transactions = pd.concat([transactions_df[['id', 'date', 'amount', 'category_id', 'category_name',
'parent_category_id', 'parent_category_name', 'account_name']],
sub_transactions])
return category_transactions
def get_balance_per_category(month_budgets):
return month_budgets[['month', 'balance', 'name']]
def get_balance_per_account(category_transactions, frequency="M"):
accounts = category_transactions.account_name.unique()
running_balances = []
for account in accounts:
df = category_transactions[category_transactions.account_name == account]
df = df.append(pd.DataFrame([[pd.Timestamp.now(), 0, account]], columns=["date", "amount", "account_name"]),
sort=False, ignore_index=True)
df["running_balance"] = df.amount.cumsum()
df = df.resample(frequency, on='date')['running_balance', 'account_name'].agg('last')
df = df.fillna(method='ffill')
running_balances.append( | pd.DataFrame(df) | pandas.DataFrame |
import networkx as nx
import numpy as np
from cleaning import *
import pandas as pd
from networkx.algorithms.community import greedy_modularity_communities
import os
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
import itertools
def centrality(adj , node_dict, type):
G = nx.convert_matrix.from_numpy_matrix(adj)
value_dict = {}
return_dict = {}
if type == 'degree':
value_dict = nx.degree_centrality(G)
if type == 'eigenvector':
value_dict = nx.eigenvector_centrality(G)
if type =='katz':
value_dict = nx.katz_centrality(G)
if type == 'closeness':
value_dict = nx.closeness_centrality(G)
if type == 'betweenness':
value_dict = nx.betweenness_centrality(G)
for (index1, node), (index2, value) in zip(nodes.items(), value_dict.items()):
return_dict[node] = value
return return_dict
def all_measures( node_dict, adj, iteration, alpha = 0.9):
#pool = mp.Pool(mp.cpu_count())
G = nx.convert_matrix.from_numpy_matrix(adj)
df = pd.DataFrame.from_dict(node_dict, orient = 'index')
df.columns = ['Node']
df['iteration'] = iteration
df['status'] = pd.Series([val.status for val in node_dict.values()])
df['degree'] = pd.Series(nx.degree_centrality(G))
df['eigenvector'] = pd.Series(nx.eigenvector_centrality(G))
df['katz'] = pd.Series(nx.katz_centrality_numpy(G))
#df['closeness'] = pd.Series(nx.closeness_centrality(G))
#df['betweenness'] = pd.Series(nx.betweenness_centrality(G))
df['pagerank'] = pd.Series(nx.pagerank(G, alpha))
df['local_clustering_coefficients'] = pd.Series(nx.clustering(G))
return( df)
def all_measures_master(node_dict_list, adj_list, name ):
master_df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
import numpy as np
import pickle
import time
import datetime
import pandas as pd
import Klines
def compute_features(kline_dictionary, btc_price, cmc_frame):
pump_list = pd.read_csv("./data/coin-pump.csv")
rm_key = []
for symbol in kline_dictionary.keys():
kline_dictionary[symbol].pdFrame['ret1'] = np.log(kline_dictionary[symbol].pdFrame.Close) - np.log(kline_dictionary[symbol].pdFrame.Close.shift(1))
kline_dictionary[symbol].pdFrame = pd.merge(kline_dictionary[symbol].pdFrame, btc_price, how='inner', left_index=True, right_index=True)
xs = [1, 3, 12, 24, 36, 48, 60, 72]
ys = [3, 12, 24, 36, 48, 60, 72]
for x in xs:
strx = str(x)
ret = "ret" + strx
volf = "volf" + strx
volbtc = "volbtc" + strx
kline_dictionary[symbol].pdFrame[ret] = kline_dictionary[symbol].pdFrame['ret1'].rolling(x).sum()
kline_dictionary[symbol].pdFrame[volf] = kline_dictionary[symbol].pdFrame['Volume'].rolling(x).sum()
kline_dictionary[symbol].pdFrame[volbtc] = (kline_dictionary[symbol].pdFrame['Volume'] * kline_dictionary[symbol].pdFrame['Close']) / kline_dictionary[symbol].pdFrame['btc_price']
for y in ys:
stry = str(y)
vola = "vola" + stry
volavol = "volavol" + stry
rtvol = "rtvol" + stry
kline_dictionary[symbol].pdFrame[vola] = kline_dictionary[symbol].pdFrame['ret1'].rolling(
y).std() # * (x ** 0.5) ??? => According to finance expert this should be included BUT according to the Rshit code on github it does not seem to be there
kline_dictionary[symbol].pdFrame[volavol] = kline_dictionary[symbol].pdFrame['volf' + str(y)].rolling(y).std()
kline_dictionary[symbol].pdFrame[rtvol] = kline_dictionary[symbol].pdFrame['volbtc' + str(y)].rolling(y).std()
unpaired_token = symbol#[:-3]
pumps = pump_list.loc[pump_list['Coin'] == unpaired_token].sort_values('Date')
ptr = cmc_frame.loc[unpaired_token]
try:
kline_dictionary[symbol].pdFrame['existence_time'] = ((pd.to_datetime(kline_dictionary[symbol].pdFrame.index) - pd.to_datetime(ptr["date_added"]).replace(tzinfo=None)).days)
except KeyError:
rm_key.append(symbol)
continue
except TypeError:
ptr = cmc_frame.loc[unpaired_token].tail(1).iloc[0]
kline_dictionary[symbol].pdFrame['existence_time'] = ((pd.to_datetime(kline_dictionary[symbol].pdFrame.index) - pd.to_datetime(ptr["date_added"]).replace(tzinfo=None)).days)
kline_dictionary[symbol].pdFrame['market_cap'] = ptr["total_supply"]
kline_dictionary[symbol].pdFrame['coin_rating'] = ptr["cmc_rank"]
try:
kline_dictionary[symbol].pdFrame['pump_count'] = pumps.groupby('Coin').size().iloc[0]
except IndexError:
kline_dictionary[symbol].pdFrame['pump_count'] = 0
kline_dictionary[symbol].pdFrame['last_open_price'] = kline_dictionary[symbol].pdFrame['Open'].shift(1)
kline_dictionary[symbol].pdFrame['symbol'] = unpaired_token
kline_dictionary[symbol].pdFrame['label'] = 0
for symbol in rm_key:
del kline_dictionary[symbol]
return rm_key
def update_features(symbol, temp_df, btc_price, cmc_frame):
pump_list = pd.read_csv("./data/coin-pump.csv")
xs = [1, 3, 12, 24, 36, 48, 60, 72]
ys = [3, 12, 24, 36, 48, 60, 72]
temp_df['ret1'] = np.log(temp_df.Close) - np.log(temp_df.Close.shift(1))
temp_df['btc_price'] = btc_price
for x in xs:
strx = str(x)
ret = "ret" + strx
volf = "volf" + strx
volbtc = "volbtc" + strx
temp_df[ret] = temp_df['ret1'].rolling(x).sum()
temp_df[volf] = temp_df['Volume'].rolling(x).sum()
temp_df[volbtc] = (temp_df['Volume'] * temp_df['Close']) / temp_df['btc_price']
for y in ys:
stry = str(y)
vola = "vola" + stry
volavol = "volavol" + stry
rtvol = "rtvol" + stry
temp_df[vola] = temp_df['ret1'].rolling(y).std()
temp_df[volavol] = temp_df['volf' + str(y)].rolling(y).std()
temp_df[rtvol] = temp_df['volbtc' + str(y)].rolling(y).std()
unpaired_token = symbol[:-3]
pumps = pump_list.loc[pump_list['Coin'] == unpaired_token].sort_values('Date')
temp_df['existence_time'] = (( | pd.to_datetime(temp_df.index) | pandas.to_datetime |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == | pd.Timestamp(2019, 5, 24) | pandas.Timestamp |
import os
import json
import argparse
import numpy as np
import pandas as pd
from scipy import sparse
from joblib import Parallel, delayed
from temprel.models import relevance
from temprel.rdkit import smiles_to_fingerprint, templates_from_smarts_list
from temprel.evaluate.diversity import diversity
from temprel.evaluate.accuracy import accuracy_by_popularity
from temprel.evaluate.topk_appl import topk_appl_recall_and_precision
from temprel.evaluate.reciprocal_rank import reciprocal_rank_by_popularity
def parse_arguments():
parser = argparse.ArgumentParser(description='Test a Morgan fingerprint teplate relevance network')
parser.add_argument('--test-smiles', dest='test_smiles', default='data/processed/test.input.smiles.npy')
parser.add_argument('--test-labels', dest='test_labels', default='data/processed/test.labels.classes.npy')
parser.add_argument('--test-appl-labels', dest='test_appl_labels', default='data/processed/test.appl_matrix.npz')
parser.add_argument('--train-labels', dest='train_labels', default='data/processed/train.labels.classes.npy')
parser.add_argument('--templates', dest='templates_path')
parser.add_argument('--topk', dest='topk', type=int, default=100)
parser.add_argument('--fp-length', dest='fp_length', type=int, default=2048)
parser.add_argument('--fp-radius', dest='fp_radius', type=int, default=2)
parser.add_argument('--num-hidden', dest='num_hidden', type=int, default=1)
parser.add_argument('--hidden-size', dest='hidden_size', type=int, default=1024)
parser.add_argument('--num-highway', dest='num_highway', type=int, default=0)
parser.add_argument('--activation', dest='activation', default='relu')
parser.add_argument('--model-weights', dest='model_weights', default=None)
parser.add_argument('--batch-size', dest='batch_size', type=int, default=512)
parser.add_argument('--model-name', dest='model_name', default='baseline')
parser.add_argument('--accuracy', dest='accuracy', action='store_true', default=False)
parser.add_argument('--reciprocal-rank', dest='rr', action='store_true', default=False)
parser.add_argument('--topk_appl', dest='topk_appl', action='store_true', default=False)
parser.add_argument('--diversity', dest='diversity', action='store_true', default=False)
parser.add_argument('--nproc', dest='nproc', type=int, default=1)
return parser.parse_args()
if __name__ == '__main__':
if not os.path.exists('evaluation'):
os.makedirs('evaluation')
args = parse_arguments()
test_smiles = np.load(args.test_smiles)
test_labels = np.load(args.test_labels)
train_labels = np.load(args.train_labels)
if os.path.exists(args.test_appl_labels):
test_appl_labels = sparse.load_npz(args.test_appl_labels)
test_fps = Parallel(n_jobs=args.nproc, verbose=1)(
delayed(smiles_to_fingerprint)(smi, length=args.fp_length, radius=args.fp_radius) for smi in test_smiles
)
test_fps = np.array(test_fps)
templates = pd.read_json(args.templates_path)
model = relevance(
input_shape=(args.fp_length), output_shape=len(templates), num_hidden=args.num_hidden,
hidden_size=args.hidden_size, activation=args.activation, num_highway=args.num_highway
)
model.load_weights(args.model_weights)
if args.accuracy:
acc = accuracy_by_popularity(model, test_fps, test_labels, train_labels, batch_size=args.batch_size)
| pd.DataFrame.from_dict(acc, orient='index', columns=model.metrics_names) | pandas.DataFrame.from_dict |
#!/usr/bin/python
import os, math
import pandas as pd
import numpy as np
np.random.seed(42)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
torch.manual_seed(42)
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterSampler
def doc_mean_thres(df):
doc_mean = df.mean()
df_bin = 1.0 * (df.values > doc_mean.values)
df_bin = pd.DataFrame(df_bin, columns=df.columns, index=df.index)
return df_bin
def load_doc_term_matrix(version=190325, binarize=True):
dtm = pd.read_csv("../../../data/text/dtm_{}.csv.gz".format(version), compression="gzip", index_col=0)
if binarize:
dtm = doc_mean_thres(dtm)
return dtm
def load_coordinates():
atlas_labels = pd.read_csv("../../../data/brain/labels.csv")
activations = | pd.read_csv("../../../data/brain/coordinates.csv", index_col=0) | pandas.read_csv |
from ..multiagentenv import MultiAgentEnv
import numpy as np
import pandapower as pp
from pandapower import ppException
import pandas as pd
import copy
import os
from collections import namedtuple
from .pf_res_plot import pf_res_plotly
from .voltage_barrier.voltage_barrier_backend import VoltageBarrier
def convert(dictionary):
return namedtuple('GenericDict', dictionary.keys())(**dictionary)
class ActionSpace(object):
def __init__(self, low, high):
self.low = low
self.high = high
class VoltageControl(MultiAgentEnv):
"""this class is for the environment of distributed active voltage control
it is easy to interact with the environment, e.g.,
state, global_state = env.reset()
for t in range(240):
actions = agents.get_actions() # a vector involving all agents' actions
reward, done, info = env.step(actions)
"""
def __init__(self, kwargs):
"""initialisation
"""
# unpack args
args = kwargs
if isinstance(args, dict):
args = convert(args)
self.args = args
# set the data path
self.data_path = args.data_path
# load the model of power network
self.base_powergrid = self._load_network()
# load data
self.pv_data = self._load_pv_data()
self.active_demand_data = self._load_active_demand_data()
self.reactive_demand_data = self._load_reactive_demand_data()
# define episode and rewards
self.episode_limit = args.episode_limit
self.voltage_loss_type = getattr(args, "voltage_loss", "l1")
self.voltage_weight = getattr(args, "voltage_weight", 1.0)
self.q_weight = getattr(args, "q_weight", 0.1)
self.line_weight = getattr(args, "line_weight", None)
self.dv_dq_weight = getattr(args, "dq_dv_weight", None)
# define constraints and uncertainty
self.v_upper = getattr(args, "v_upper", 1.05)
self.v_lower = getattr(args, "v_lower", 0.95)
self.active_demand_std = self.active_demand_data.values.std(axis=0) / 100.0
self.reactive_demand_std = self.reactive_demand_data.values.std(axis=0) / 100.0
self.pv_std = self.pv_data.values.std(axis=0) / 100.0
self._set_reactive_power_boundary()
# define action space and observation space
self.action_space = ActionSpace(low=-self.args.action_scale+self.args.action_bias, high=self.args.action_scale+self.args.action_bias)
self.history = getattr(args, "history", 1)
self.state_space = getattr(args, "state_space", ["pv", "demand", "reactive", "vm_pu", "va_degree"])
if self.args.mode == "distributed":
self.n_actions = 1
self.n_agents = len(self.base_powergrid.sgen)
elif self.args.mode == "decentralised":
self.n_actions = len(self.base_powergrid.sgen)
self.n_agents = len( set( self.base_powergrid.bus["zone"].to_numpy(copy=True) ) ) - 1 # exclude the main zone
agents_obs, state = self.reset()
self.obs_size = agents_obs[0].shape[0]
self.state_size = state.shape[0]
self.last_v = self.powergrid.res_bus["vm_pu"].sort_index().to_numpy(copy=True)
self.last_q = self.powergrid.sgen["q_mvar"].to_numpy(copy=True)
# initialise voltage barrier function
self.voltage_barrier = VoltageBarrier(self.voltage_loss_type)
self._rendering_initialized = False
def reset(self, reset_time=True):
"""reset the env
"""
# reset the time step, cumulative rewards and obs history
self.steps = 1
self.sum_rewards = 0
if self.history > 1:
self.obs_history = {i: [] for i in range(self.n_agents)}
# reset the power grid
self.powergrid = copy.deepcopy(self.base_powergrid)
solvable = False
while not solvable:
# reset the time stamp
if reset_time:
self._episode_start_hour = self._select_start_hour()
self._episode_start_day = self._select_start_day()
self._episode_start_interval = self._select_start_interval()
# get one episode of data
self.pv_histories = self._get_episode_pv_history()
self.active_demand_histories = self._get_episode_active_demand_history()
self.reactive_demand_histories = self._get_episode_reactive_demand_history()
self._set_demand_and_pv()
# random initialise action
if self.args.reset_action:
self.powergrid.sgen["q_mvar"] = self.get_action()
self.powergrid.sgen["q_mvar"] = self._clip_reactive_power(self.powergrid.sgen["q_mvar"], self.powergrid.sgen["p_mw"])
try:
pp.runpp(self.powergrid)
solvable = True
except ppException:
print ("The power flow for the initialisation of demand and PV cannot be solved.")
print (f"This is the pv: \n{self.powergrid.sgen['p_mw']}")
print (f"This is the q: \n{self.powergrid.sgen['q_mvar']}")
print (f"This is the active demand: \n{self.powergrid.load['p_mw']}")
print (f"This is the reactive demand: \n{self.powergrid.load['q_mvar']}")
print (f"This is the res_bus: \n{self.powergrid.res_bus}")
solvable = False
return self.get_obs(), self.get_state()
def manual_reset(self, day, hour, interval):
"""manual reset the initial date
"""
# reset the time step, cumulative rewards and obs history
self.steps = 1
self.sum_rewards = 0
if self.history > 1:
self.obs_history = {i: [] for i in range(self.n_agents)}
# reset the power grid
self.powergrid = copy.deepcopy(self.base_powergrid)
# reset the time stamp
self._episode_start_hour = hour
self._episode_start_day = day
self._episode_start_interval = interval
solvable = False
while not solvable:
# get one episode of data
self.pv_histories = self._get_episode_pv_history()
self.active_demand_histories = self._get_episode_active_demand_history()
self.reactive_demand_histories = self._get_episode_reactive_demand_history()
self._set_demand_and_pv(add_noise=False)
# random initialise action
if self.args.reset_action:
self.powergrid.sgen["q_mvar"] = self.get_action()
self.powergrid.sgen["q_mvar"] = self._clip_reactive_power(self.powergrid.sgen["q_mvar"], self.powergrid.sgen["p_mw"])
try:
pp.runpp(self.powergrid)
solvable = True
except ppException:
print ("The power flow for the initialisation of demand and PV cannot be solved.")
print (f"This is the pv: \n{self.powergrid.sgen['p_mw']}")
print (f"This is the q: \n{self.powergrid.sgen['q_mvar']}")
print (f"This is the active demand: \n{self.powergrid.load['p_mw']}")
print (f"This is the reactive demand: \n{self.powergrid.load['q_mvar']}")
print (f"This is the res_bus: \n{self.powergrid.res_bus}")
solvable = False
return self.get_obs(), self.get_state()
def step(self, actions, add_noise=True):
"""function for the interaction between agent and the env each time step
"""
last_powergrid = copy.deepcopy(self.powergrid)
# check whether the power balance is unsolvable
solvable = self._take_action(actions)
if solvable:
# get the reward of current actions
reward, info = self._calc_reward()
else:
q_loss = np.mean( np.abs(self.powergrid.sgen["q_mvar"]) )
self.powergrid = last_powergrid
reward, info = self._calc_reward()
reward -= 200.
# keep q_loss
info["destroy"] = 1.
info["totally_controllable_ratio"] = 0.
info["q_loss"] = q_loss
# set the pv and demand for the next time step
self._set_demand_and_pv(add_noise=add_noise)
# terminate if episode_limit is reached
self.steps += 1
self.sum_rewards += reward
if self.steps >= self.episode_limit or not solvable:
terminated = True
else:
terminated = False
if terminated:
print (f"Episode terminated at time: {self.steps} with return: {self.sum_rewards:2.4f}.")
return reward, terminated, info
def get_state(self):
"""return the global state for the power system
the default state: voltage, active power of generators, bus state, load active power, load reactive power
"""
state = []
if "demand" in self.state_space:
state += list(self.powergrid.res_bus["p_mw"].sort_index().to_numpy(copy=True))
state += list(self.powergrid.res_bus["q_mvar"].sort_index().to_numpy(copy=True))
if "pv" in self.state_space:
state += list(self.powergrid.sgen["p_mw"].sort_index().to_numpy(copy=True))
if "reactive" in self.state_space:
state += list(self.powergrid.sgen["q_mvar"].sort_index().to_numpy(copy=True))
if "vm_pu" in self.state_space:
state += list(self.powergrid.res_bus["vm_pu"].sort_index().to_numpy(copy=True))
if "va_degree" in self.state_space:
state += list(self.powergrid.res_bus["va_degree"].sort_index().to_numpy(copy=True))
state = np.array(state)
return state
def get_obs(self):
"""return the obs for each agent in the power system
the default obs: voltage, active power of generators, bus state, load active power, load reactive power
each agent can only observe the state within the zone where it belongs
"""
clusters = self._get_clusters_info()
if self.args.mode == "distributed":
obs_zone_dict = dict()
zone_list = list()
obs_len_list = list()
for i in range(len(self.powergrid.sgen)):
obs = list()
zone_buses, zone, pv, q, sgen_bus = clusters[f"sgen{i}"]
zone_list.append(zone)
if not( zone in obs_zone_dict.keys() ):
if "demand" in self.state_space:
copy_zone_buses = copy.deepcopy(zone_buses)
copy_zone_buses.loc[sgen_bus]["p_mw"] -= pv
copy_zone_buses.loc[sgen_bus]["q_mvar"] -= q
obs += list(copy_zone_buses.loc[:, "p_mw"].to_numpy(copy=True))
obs += list(copy_zone_buses.loc[:, "q_mvar"].to_numpy(copy=True))
if "pv" in self.state_space:
obs.append(pv)
if "reactive" in self.state_space:
obs.append(q)
if "vm_pu" in self.state_space:
obs += list(zone_buses.loc[:, "vm_pu"].to_numpy(copy=True))
if "va_degree" in self.state_space:
# transform the voltage phase to radian
obs += list(zone_buses.loc[:, "va_degree"].to_numpy(copy=True) * np.pi / 180)
obs_zone_dict[zone] = np.array(obs)
obs_len_list.append(obs_zone_dict[zone].shape[0])
agents_obs = list()
obs_max_len = max(obs_len_list)
for zone in zone_list:
obs_zone = obs_zone_dict[zone]
pad_obs_zone = np.concatenate( [obs_zone, np.zeros(obs_max_len - obs_zone.shape[0])], axis=0 )
agents_obs.append(pad_obs_zone)
elif self.args.mode == "decentralised":
obs_len_list = list()
zone_obs_list = list()
for i in range(self.n_agents):
zone_buses, pv, q, sgen_buses = clusters[f"zone{i+1}"]
obs = list()
if "demand" in self.state_space:
copy_zone_buses = copy.deepcopy(zone_buses)
copy_zone_buses.loc[sgen_buses]["p_mw"] -= pv
copy_zone_buses.loc[sgen_buses]["q_mvar"] -= q
obs += list(copy_zone_buses.loc[:, "p_mw"].to_numpy(copy=True))
obs += list(copy_zone_buses.loc[:, "q_mvar"].to_numpy(copy=True))
if "pv" in self.state_space:
obs += list(pv.to_numpy(copy=True))
if "reactive" in self.state_space:
obs += list(q.to_numpy(copy=True))
if "vm_pu" in self.state_space:
obs += list(zone_buses.loc[:, "vm_pu"].to_numpy(copy=True))
if "va_degree" in self.state_space:
obs += list(zone_buses.loc[:, "va_degree"].to_numpy(copy=True) * np.pi / 180)
obs = np.array(obs)
zone_obs_list.append(obs)
obs_len_list.append(obs.shape[0])
agents_obs = []
obs_max_len = max(obs_len_list)
for obs_zone in zone_obs_list:
pad_obs_zone = np.concatenate( [obs_zone, np.zeros(obs_max_len - obs_zone.shape[0])], axis=0 )
agents_obs.append(pad_obs_zone)
if self.history > 1:
agents_obs_ = []
for i, obs in enumerate(agents_obs):
if len(self.obs_history[i]) >= self.history - 1:
obs_ = np.concatenate(self.obs_history[i][-self.history+1:]+[obs], axis=0)
else:
zeros = [np.zeros_like(obs)] * ( self.history - len(self.obs_history[i]) - 1 )
obs_ = self.obs_history[i] + [obs]
obs_ = zeros + obs_
obs_ = np.concatenate(obs_, axis=0)
agents_obs_.append(copy.deepcopy(obs_))
self.obs_history[i].append(copy.deepcopy(obs))
agents_obs = agents_obs_
return agents_obs
def get_obs_agent(self, agent_id):
"""return observation for agent_id
"""
agents_obs = self.get_obs()
return agents_obs[agent_id]
def get_obs_size(self):
"""return the observation size
"""
return self.obs_size
def get_state_size(self):
"""return the state size
"""
return self.state_size
def get_action(self):
"""return the action according to a uniform distribution over [action_lower, action_upper)
"""
rand_action = np.random.uniform(low=self.action_space.low, high=self.action_space.high, size=self.powergrid.sgen["q_mvar"].values.shape)
return rand_action
def get_total_actions(self):
"""return the total number of actions an agent could ever take
"""
return self.n_actions
def get_avail_actions(self):
"""return available actions for all agents
"""
avail_actions = []
for agent_id in range(self.n_agents):
avail_actions.append(self.get_avail_agent_actions(agent_id))
return np.expand_dims(np.array(avail_actions), axis=0)
def get_avail_agent_actions(self, agent_id):
""" return the available actions for agent_id
"""
if self.args.mode == "distributed":
return [1]
elif self.args.mode == "decentralised":
avail_actions = np.zeros(self.n_actions)
zone_sgens = self.base_powergrid.sgen.loc[self.base_powergrid.sgen["name"] == f"zone{agent_id+1}"]
avail_actions[zone_sgens.index] = 1
return avail_actions
def get_num_of_agents(self):
"""return the number of agents
"""
return self.n_agents
def _get_voltage(self):
return self.powergrid.res_bus["vm_pu"].sort_index().to_numpy(copy=True)
def _create_basenet(self, base_net):
"""initilization of power grid
set the pandapower net to use
"""
if base_net is None:
raise Exception("Please provide a base_net configured as pandapower format.")
else:
return base_net
def _select_start_hour(self):
"""select start hour for an episode
"""
return np.random.choice(24)
def _select_start_interval(self):
"""select start interval for an episode
"""
return np.random.choice( 60 // self.time_delta )
def _select_start_day(self):
"""select start day (date) for an episode
"""
pv_data = self.pv_data
pv_days = (pv_data.index[-1] - pv_data.index[0]).days
self.time_delta = (pv_data.index[1] - pv_data.index[0]).seconds // 60
episode_days = ( self.episode_limit // (24 * (60 // self.time_delta) ) ) + 1 # margin
return np.random.choice(pv_days - episode_days)
def _load_network(self):
"""load network
"""
network_path = os.path.join(self.data_path, 'model.p')
base_net = pp.from_pickle(network_path)
return self._create_basenet(base_net)
def _load_pv_data(self):
"""load pv data
the sensor frequency is set to 3 or 15 mins as default
"""
pv_path = os.path.join(self.data_path, 'pv_active.csv')
pv = pd.read_csv(pv_path, index_col=None)
pv.index = pd.to_datetime(pv.iloc[:, 0])
pv.index.name = 'time'
pv = pv.iloc[::1, 1:] * self.args.pv_scale
return pv
def _load_active_demand_data(self):
"""load active demand data
the sensor frequency is set to 3 or 15 mins as default
"""
demand_path = os.path.join(self.data_path, 'load_active.csv')
demand = pd.read_csv(demand_path, index_col=None)
demand.index = pd.to_datetime(demand.iloc[:, 0])
demand.index.name = 'time'
demand = demand.iloc[::1, 1:] * self.args.demand_scale
return demand
def _load_reactive_demand_data(self):
"""load reactive demand data
the sensor frequency is set to 3 min as default
"""
demand_path = os.path.join(self.data_path, 'load_reactive.csv')
demand = | pd.read_csv(demand_path, index_col=None) | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils import estimator_checks
from sklego.common import flatten
from sklego.preprocessing import ColumnCapper
from tests.conftest import transformer_checks, general_checks
@pytest.mark.parametrize("test_fn", flatten([
transformer_checks,
general_checks,
# nonmeta_checks
estimator_checks.check_estimators_dtypes,
estimator_checks.check_fit_score_takes_y,
estimator_checks.check_dtype_object,
estimator_checks.check_sample_weights_pandas_series,
estimator_checks.check_sample_weights_list,
estimator_checks.check_sample_weights_invariance,
estimator_checks.check_estimators_fit_returns_self,
estimator_checks.check_complex_data,
estimator_checks.check_estimators_empty_data_messages,
estimator_checks.check_pipeline_consistency,
# ColumnCapper works with nan/inf cells
# estimator_checks.check_estimators_nan_inf,
estimator_checks.check_estimators_overwrite_params,
estimator_checks.check_estimator_sparse_data,
estimator_checks.check_estimators_pickle,
]))
def test_estimator_checks(test_fn):
test_fn(ColumnCapper.__name__, ColumnCapper())
def test_quantile_range():
def expect_type_error(quantile_range):
with pytest.raises(TypeError):
ColumnCapper(quantile_range)
def expect_value_error(quantile_range):
with pytest.raises(ValueError):
ColumnCapper(quantile_range)
# Testing quantile_range type
expect_type_error(quantile_range=1)
expect_type_error(quantile_range='a')
expect_type_error(quantile_range={})
expect_type_error(quantile_range=set())
# Testing quantile_range values
# Invalid type:
expect_type_error(quantile_range=('a', 90))
expect_type_error(quantile_range=(10, 'a'))
# Invalid limits
expect_value_error(quantile_range=(-1, 90))
expect_value_error(quantile_range=(10, 110))
# Invalid order
expect_value_error(quantile_range=(60, 40))
def test_interpolation():
valid_interpolations = ('linear', 'lower', 'higher', 'midpoint', 'nearest')
invalid_interpolations = ('test', 42, None, [], {}, set(), .42)
for interpolation in valid_interpolations:
ColumnCapper(interpolation=interpolation)
for interpolation in invalid_interpolations:
with pytest.raises(ValueError):
ColumnCapper(interpolation=interpolation)
@pytest.fixture()
def valid_df():
return pd.DataFrame({'a': [1, np.nan, 3, 4],
'b': [11, 12, np.inf, 14],
'c': [21, 22, 23, 24]})
def test_X_types_and_transformed_shapes(valid_df):
def expect_value_error(X, X_transform=None):
if X_transform is None:
X_transform = X
with pytest.raises(ValueError):
capper = ColumnCapper().fit(X)
capper.transform(X_transform)
# Fitted and transformed arrays must have the same number of columns
expect_value_error(valid_df, valid_df[['a', 'b']])
invalid_dfs = [
| pd.DataFrame({'a': [np.nan, np.nan, np.nan], 'b': [11, 12, 13]}) | pandas.DataFrame |
"""Utility functions for evaluating model outputs."""
import logging
import re
from collections import defaultdict
from pathlib import Path
from typing import Dict, Tuple, Union
import numpy as np
import pandas as pd
from ms3 import Parse
import harmonic_inference.utils.harmonic_constants as hc
import harmonic_inference.utils.harmonic_utils as hu
from harmonic_inference.data.chord import Chord
from harmonic_inference.data.data_types import (
NO_REDUCTION,
TRIAD_REDUCTION,
ChordType,
KeyMode,
PitchType,
)
from harmonic_inference.data.piece import Piece
from harmonic_inference.models.joint_model import State
def get_results_df(
piece: Piece,
state: State,
output_root_type: PitchType,
output_tonic_type: PitchType,
chord_root_type: PitchType,
key_tonic_type: PitchType,
) -> pd.DataFrame:
"""
Evaluate the piece's estimated chords.
Parameters
----------
piece : Piece
The piece, containing the ground truth harmonic structure.
state : State
The state, containing the estimated harmonic structure.
chord_root_type : PitchType
The pitch type used for chord roots.
key_tonic_type : PitchType
The pitch type used for key tonics.
Returns
-------
results_df : pd.DataFrame
A DataFrame containing the results of the given state, with the given settings.
"""
labels_list = []
gt_chords = piece.get_chords()
gt_changes = piece.get_chord_change_indices()
gt_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(gt_chords, gt_changes, gt_changes[1:]):
chord = chord.to_pitch_type(chord_root_type)
gt_chord_labels[start:end] = chord.get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
gt_chord_labels[gt_changes[-1] :] = (
gt_chords[-1]
.to_pitch_type(chord_root_type)
.get_one_hot_index(relative=False, use_inversion=True, pad=False)
)
chords, changes = state.get_chords()
estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(chords, changes[:-1], changes[1:]):
root, chord_type, inv = hu.get_chord_from_one_hot_index(chord, output_root_type)
root = hu.get_pitch_from_string(
hu.get_pitch_string(root, output_root_type), chord_root_type
)
chord = hu.get_chord_one_hot_index(chord_type, root, chord_root_type, inversion=inv)
estimated_chord_labels[start:end] = chord
gt_keys = piece.get_keys()
gt_changes = piece.get_key_change_input_indices()
gt_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(gt_keys, gt_changes, gt_changes[1:]):
key = key.to_pitch_type(key_tonic_type)
gt_key_labels[start:end] = key.get_one_hot_index()
gt_key_labels[gt_changes[-1] :] = gt_keys[-1].to_pitch_type(key_tonic_type).get_one_hot_index()
keys, changes = state.get_keys()
estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(keys, changes[:-1], changes[1:]):
tonic, mode = hu.get_key_from_one_hot_index(key, output_tonic_type)
tonic = hu.get_pitch_from_string(
hu.get_pitch_string(tonic, output_tonic_type), key_tonic_type
)
key = hu.get_key_one_hot_index(mode, tonic, key_tonic_type)
estimated_key_labels[start:end] = key
chord_label_list = hu.get_chord_label_list(chord_root_type, use_inversions=True)
key_label_list = hu.get_key_label_list(key_tonic_type)
for duration, est_chord_label, gt_chord_label, est_key_label, gt_key_label in zip(
piece.get_duration_cache(),
estimated_chord_labels,
gt_chord_labels,
estimated_key_labels,
gt_key_labels,
):
if duration == 0:
continue
labels_list.append(
{
"gt_key": key_label_list[gt_key_label],
"gt_chord": chord_label_list[gt_chord_label],
"est_key": key_label_list[est_key_label],
"est_chord": chord_label_list[est_chord_label],
"duration": duration,
}
)
return pd.DataFrame(labels_list)
def get_labels_df(piece: Piece, tpc_c: int = hc.TPC_C) -> pd.DataFrame:
"""
Create and return a labels_df for a given Piece, containing all chord and key
information for each segment of the piece, in all formats (TPC and MIDI pitch).
Parameters
----------
piece : Piece
The piece to create a labels_df for.
tpc_c : int
Where C should be in the TPC output.
Returns
-------
labels_df : pd.DataFrame
A labels_df, with the columns:
- chord_root_tpc
- chord_root_midi
- chord_type
- chord_inversion
- chord_suspension_midi
- chord_suspension_tpc
- key_tonic_tpc
- key_tonic_midi
- key_mode
- duration
- mc
- onset_mc
"""
def get_suspension_strings(chord: Chord) -> Tuple[str, str]:
"""
Get the tpc and midi strings for the given chord's suspension and changes.
Parameters
----------
chord : Chord
The chord whose string to return.
Returns
-------
tpc_string : str
A string representing the mapping of altered pitches in the given chord.
Each altered pitch is represented as "orig:new", where orig is the pitch in the default
chord voicing, and "new" is the altered pitch that is actually present. For added
pitches, "orig" is the empty string. "new" can be prefixed with a "+", in which
case this pitch is present in an upper octave. Pitches are represented as TPC,
and multiple alterations are separated by semicolons.
midi_string : str
The same format as tpc_string, but using a MIDI pitch representation.
"""
if chord.suspension is None:
return "", ""
change_mapping = hu.get_added_and_removed_pitches(
chord.root,
chord.chord_type,
chord.suspension,
chord.key_tonic,
chord.key_mode,
)
mappings_midi = []
mappings_tpc = []
for orig, new in change_mapping.items():
if orig == "":
orig_midi = ""
orig_tpc = ""
else:
orig_midi = str(
hu.get_pitch_from_string(
hu.get_pitch_string(int(orig), PitchType.TPC), PitchType.MIDI
)
)
orig_tpc = str(int(orig) - hc.TPC_C + tpc_c)
prefix = ""
if new[0] == "+":
prefix = "+"
new = new[1:]
new_midi = prefix + str(
hu.get_pitch_from_string(
hu.get_pitch_string(int(new), PitchType.TPC), PitchType.MIDI
)
)
new_tpc = prefix + str(int(new) - hc.TPC_C + tpc_c)
mappings_midi.append(f"{orig_midi}:{new_midi}")
mappings_tpc.append(f"{orig_tpc}:{new_tpc}")
return ";".join(mappings_tpc), ";".join(mappings_midi)
labels_list = []
chords = piece.get_chords()
onsets = [note.onset for note in piece.get_inputs()]
chord_changes = piece.get_chord_change_indices()
chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
chord_suspensions_midi = np.full(len(piece.get_inputs()), "", dtype=object)
chord_suspensions_tpc = np.full(len(piece.get_inputs()), "", dtype=object)
for chord, start, end in zip(chords, chord_changes, chord_changes[1:]):
chord_labels[start:end] = chord.get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
tpc_string, midi_string = get_suspension_strings(chord)
chord_suspensions_tpc[start:end] = tpc_string
chord_suspensions_midi[start:end] = midi_string
chord_labels[chord_changes[-1] :] = chords[-1].get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
tpc_string, midi_string = get_suspension_strings(chords[-1])
chord_suspensions_tpc[chord_changes[-1] :] = tpc_string
chord_suspensions_midi[chord_changes[-1] :] = midi_string
keys = piece.get_keys()
key_changes = piece.get_key_change_input_indices()
key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(keys, key_changes, key_changes[1:]):
key_labels[start:end] = key.get_one_hot_index()
key_labels[key_changes[-1] :] = keys[-1].get_one_hot_index()
chord_labels_list = hu.get_chord_from_one_hot_index(
slice(len(hu.get_chord_label_list(PitchType.TPC))), PitchType.TPC
)
key_labels_list = hu.get_key_from_one_hot_index(
slice(len(hu.get_key_label_list(PitchType.TPC))), PitchType.TPC
)
for duration, chord_label, key_label, suspension_tpc, suspension_midi, onset in zip(
piece.get_duration_cache(),
chord_labels,
key_labels,
chord_suspensions_tpc,
chord_suspensions_midi,
onsets,
):
if duration == 0:
continue
root_tpc, chord_type, inversion = chord_labels_list[chord_label]
tonic_tpc, mode = key_labels_list[key_label]
root_midi = hu.get_pitch_from_string(
hu.get_pitch_string(root_tpc, PitchType.TPC), PitchType.MIDI
)
tonic_midi = hu.get_pitch_from_string(
hu.get_pitch_string(tonic_tpc, PitchType.TPC), PitchType.MIDI
)
labels_list.append(
{
"chord_root_tpc": root_tpc - hc.TPC_C + tpc_c,
"chord_root_midi": root_midi,
"chord_type": chord_type,
"chord_inversion": inversion,
"chord_suspension_tpc": suspension_tpc,
"chord_suspension_midi": suspension_midi,
"key_tonic_tpc": tonic_tpc - hc.TPC_C + tpc_c,
"key_tonic_midi": tonic_midi,
"key_mode": mode,
"duration": duration,
"mc": onset[0],
"mn_onset": onset[1],
}
)
return pd.DataFrame(labels_list)
def evaluate_chords(
piece: Piece,
state: State,
pitch_type: PitchType,
use_inversion: bool = True,
reduction: Dict[ChordType, ChordType] = NO_REDUCTION,
) -> float:
"""
Evaluate the piece's estimated chords.
Parameters
----------
piece : Piece
The piece, containing the ground truth harmonic structure.
state : State
The state, containing the estimated harmonic structure.
pitch_type : PitchType
The pitch type used for chord roots.
use_inversion : bool
True to use inversion when checking the chord type. False to ignore inversion.
reduction : Dict[ChordType, ChordType]
A reduction to reduce chord types to another type.
Returns
-------
accuracy : float
The average accuracy of the state's chord estimates for the full duration of
the piece.
"""
gt_chords = piece.get_chords()
gt_changes = piece.get_chord_change_indices()
gt_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(gt_chords, gt_changes, gt_changes[1:]):
gt_labels[start:end] = chord.get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
gt_labels[gt_changes[-1] :] = gt_chords[-1].get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
chords, changes = state.get_chords()
estimated_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(chords, changes[:-1], changes[1:]):
estimated_labels[start:end] = chord
accuracy = 0.0
for duration, est_label, gt_label in zip(
piece.get_duration_cache(),
estimated_labels,
gt_labels,
):
if duration == 0:
continue
gt_root, gt_chord_type, gt_inversion = hu.get_chord_from_one_hot_index(
gt_label, pitch_type, use_inversions=True
)
est_root, est_chord_type, est_inversion = hu.get_chord_from_one_hot_index(
est_label, pitch_type, use_inversions=True
)
distance = get_chord_distance(
gt_root,
gt_chord_type,
gt_inversion,
est_root,
est_chord_type,
est_inversion,
use_inversion=use_inversion,
reduction=reduction,
)
accuracy += (1.0 - distance) * duration
return accuracy / np.sum(piece.get_duration_cache())
def get_chord_distance(
gt_root: int,
gt_chord_type: ChordType,
gt_inversion: int,
est_root: int,
est_chord_type: ChordType,
est_inversion: int,
use_inversion: bool = True,
reduction: Dict[ChordType, ChordType] = NO_REDUCTION,
) -> float:
"""
Get the distance from a ground truth chord to an estimated chord.
Parameters
----------
gt_root : int
The root pitch of the ground truth chord.
gt_chord_type : ChordType
The chord type of the ground truth chord.
gt_inversion : int
The inversion of the ground truth chord.
est_root : int
The root pitch of the estimated chord.
est_chord_type : ChordType
The chord type of the estimated chord.
est_inversion : int
The inversion of the estimated chord.
use_inversion : bool
True to use inversion when checking the chord type. False to ignore inversion.
reduction : Dict[ChordType, ChordType]
A reduction to reduce chord types to another type.
Returns
-------
distance : float
A distance between 0 (completely correct), and 1 (completely incorrect).
"""
gt_chord_type = reduction[gt_chord_type]
est_chord_type = reduction[est_chord_type]
if not use_inversion:
gt_inversion = 0
est_inversion = 0
if gt_root == est_root and gt_chord_type == est_chord_type and gt_inversion == est_inversion:
return 0.0
return 1.0
def evaluate_keys(
piece: Piece,
state: State,
pitch_type: PitchType,
tonic_only: bool = False,
) -> float:
"""
Evaluate the piece's estimated keys.
Parameters
----------
piece : Piece
The piece, containing the ground truth harmonic structure.
state : State
The state, containing the estimated harmonic structure.
pitch_type : PitchType
The pitch type used for key tonics.
tonic_only : bool
True to only evaluate the tonic pitch. False to also take mode into account.
Returns
-------
accuracy : float
The average accuracy of the state's key estimates for the full duration of
the piece.
"""
gt_keys = piece.get_keys()
gt_changes = piece.get_key_change_input_indices()
gt_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(gt_keys, gt_changes, gt_changes[1:]):
gt_labels[start:end] = key.get_one_hot_index()
gt_labels[gt_changes[-1] :] = gt_keys[-1].get_one_hot_index()
keys, changes = state.get_keys()
estimated_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(keys, changes[:-1], changes[1:]):
estimated_labels[start:end] = key
accuracy = 0.0
for duration, est_label, gt_label in zip(
piece.get_duration_cache(),
estimated_labels,
gt_labels,
):
if duration == 0:
continue
gt_tonic, gt_mode = hu.get_key_from_one_hot_index(int(gt_label), pitch_type)
est_tonic, est_mode = hu.get_key_from_one_hot_index(int(est_label), pitch_type)
distance = get_key_distance(
gt_tonic,
gt_mode,
est_tonic,
est_mode,
tonic_only=tonic_only,
)
accuracy += (1.0 - distance) * duration
return accuracy / np.sum(piece.get_duration_cache())
def get_key_distance(
gt_tonic: int,
gt_mode: KeyMode,
est_tonic: int,
est_mode: KeyMode,
tonic_only: bool = False,
) -> float:
"""
Get the distance from one key to another.
Parameters
----------
gt_tonic : int
The tonic pitch of the ground truth key.
gt_mode : KeyMode
The mode of the ground truth key.
est_tonic : int
The tonic pitch of the estimated key.
est_mode : KeyMode
The mode of the estimated key.
tonic_only : bool
True to only evaluate the tonic pitch. False to also take mode into account.
Returns
-------
distance : float
The distance between the estimated and ground truth keys.
"""
if tonic_only:
return 0.0 if gt_tonic == est_tonic else 1.0
return 0.0 if gt_tonic == est_tonic and gt_mode == est_mode else 1.0
def evaluate_chords_and_keys_jointly(
piece: Piece,
state: State,
root_type: PitchType,
tonic_type: PitchType,
use_inversion: bool = True,
chord_reduction: Dict[ChordType, ChordType] = NO_REDUCTION,
tonic_only: bool = False,
) -> float:
"""
Evaluate the state's combined chords and keys.
Parameters
----------
piece : Piece
The piece, containing the ground truth harmonic structure.
state : State
The state, containing the estimated harmonic structure.
root_type : PitchType
The pitch type used for chord roots.
tonic_type : PitchType
The pitch type used for key tonics.
use_inversion : bool
True to use inversion when checking the chord type. False to ignore inversion.
chord_reduction : Dict[ChordType, ChordType]
A reduction to reduce chord types to another type.
tonic_only : bool
True to only evaluate the key's tonic pitch. False to also take mode into account.
Returns
-------
accuracy : float
The average accuracy of the state's joint chord and key estimates for the full
duration of the piece.
"""
gt_chords = piece.get_chords()
gt_changes = piece.get_chord_change_indices()
gt_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(gt_chords, gt_changes, gt_changes[1:]):
gt_chord_labels[start:end] = chord.get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
gt_chord_labels[gt_changes[-1] :] = gt_chords[-1].get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
chords, changes = state.get_chords()
estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(chords, changes[:-1], changes[1:]):
estimated_chord_labels[start:end] = chord
gt_keys = piece.get_keys()
gt_changes = piece.get_key_change_input_indices()
gt_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(gt_keys, gt_changes, gt_changes[1:]):
gt_key_labels[start:end] = key.get_one_hot_index()
gt_key_labels[gt_changes[-1] :] = gt_keys[-1].get_one_hot_index()
keys, changes = state.get_keys()
estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(keys, changes[:-1], changes[1:]):
estimated_key_labels[start:end] = key
accuracy = 0.0
for duration, est_chord_label, gt_chord_label, est_key_label, gt_key_label in zip(
piece.get_duration_cache(),
estimated_chord_labels,
gt_chord_labels,
estimated_key_labels,
gt_key_labels,
):
if duration == 0:
continue
gt_root, gt_chord_type, gt_inversion = hu.get_chord_from_one_hot_index(
gt_chord_label, root_type, use_inversions=True
)
est_root, est_chord_type, est_inversion = hu.get_chord_from_one_hot_index(
est_chord_label, root_type, use_inversions=True
)
chord_distance = get_chord_distance(
gt_root,
gt_chord_type,
gt_inversion,
est_root,
est_chord_type,
est_inversion,
use_inversion=use_inversion,
reduction=chord_reduction,
)
gt_tonic, gt_mode = hu.get_key_from_one_hot_index(int(gt_key_label), tonic_type)
est_tonic, est_mode = hu.get_key_from_one_hot_index(int(est_key_label), tonic_type)
key_distance = get_key_distance(
gt_tonic,
gt_mode,
est_tonic,
est_mode,
tonic_only=tonic_only,
)
similarity = (1.0 - chord_distance) * (1.0 - key_distance)
accuracy += similarity * duration
return accuracy / np.sum(piece.get_duration_cache())
def get_annotation_df(
state: State,
piece: Piece,
root_type: PitchType,
tonic_type: PitchType,
) -> pd.DataFrame:
"""
Get a df containing the labels of the given state.
Parameters
----------
state : State
The state containing harmony annotations.
piece : Piece
The piece which was used as input when creating the given state.
root_type : PitchType
The pitch type to use for chord root labels.
tonic_type : PitchType
The pitch type to use for key tonic annotations.
Returns
-------
annotation_df : pd.DataFrame[type]
A DataFrame containing the harmony annotations from the given state.
"""
labels_list = []
chords, changes = state.get_chords()
estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(chords, changes[:-1], changes[1:]):
estimated_chord_labels[start:end] = chord
keys, changes = state.get_keys()
estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(keys, changes[:-1], changes[1:]):
estimated_key_labels[start:end] = key
chord_label_list = hu.get_chord_label_list(root_type, use_inversions=True)
key_label_list = hu.get_key_label_list(tonic_type)
prev_est_key_string = None
prev_est_chord_string = None
for duration, note, est_chord_label, est_key_label in zip(
piece.get_duration_cache(),
piece.get_inputs(),
estimated_chord_labels,
estimated_key_labels,
):
if duration == 0:
continue
est_chord_string = chord_label_list[est_chord_label]
est_key_string = key_label_list[est_key_label]
# No change in labels
if est_chord_string == prev_est_chord_string and est_key_string == prev_est_key_string:
continue
if est_key_string != prev_est_key_string:
labels_list.append(
{
"label": est_key_string,
"mc": note.onset[0],
"mc_onset": note.mc_onset,
"mn_onset": note.onset[1],
}
)
if est_chord_string != prev_est_chord_string:
labels_list.append(
{
"label": est_chord_string,
"mc": note.onset[0],
"mc_onset": note.mc_onset,
"mn_onset": note.onset[1],
}
)
prev_est_key_string = est_key_string
prev_est_chord_string = est_chord_string
return pd.DataFrame(labels_list)
def get_label_df(
state: State,
piece: Piece,
root_type: PitchType,
tonic_type: PitchType,
) -> pd.DataFrame:
"""
Get a df containing the labels of the given state, color-coded in terms of their accuracy
according to the ground truth harmony in the given piece.
Parameters
----------
state : State
The state, containing the estimated harmonic structure.
piece : Piece
The piece, containing the ground truth harmonic structure.
root_type : PitchType
The pitch type used for chord roots.
tonic_type : PitchType
The pitch type used for key tonics.
Returns
-------
label_df : pd.DataFrame
A DataFrame containing the labels of the given state.
"""
labels_list = []
gt_chord_labels = np.full(len(piece.get_inputs()), -1, dtype=int)
if len(piece.get_chords()) > 0:
gt_chords = piece.get_chords()
gt_changes = piece.get_chord_change_indices()
for chord, start, end in zip(gt_chords, gt_changes, gt_changes[1:]):
gt_chord_labels[start:end] = chord.get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
gt_chord_labels[gt_changes[-1] :] = gt_chords[-1].get_one_hot_index(
relative=False, use_inversion=True, pad=False
)
chords, changes = state.get_chords()
estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for chord, start, end in zip(chords, changes[:-1], changes[1:]):
estimated_chord_labels[start:end] = chord
gt_key_labels = np.full(len(piece.get_inputs()), -1, dtype=int)
if len(piece.get_keys()) > 0:
gt_keys = piece.get_keys()
gt_changes = piece.get_key_change_input_indices()
gt_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(gt_keys, gt_changes, gt_changes[1:]):
gt_key_labels[start:end] = key.get_one_hot_index()
gt_key_labels[gt_changes[-1] :] = gt_keys[-1].get_one_hot_index()
keys, changes = state.get_keys()
estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int)
for key, start, end in zip(keys, changes[:-1], changes[1:]):
estimated_key_labels[start:end] = key
chord_label_list = hu.get_chord_label_list(root_type, use_inversions=True)
key_label_list = hu.get_key_label_list(tonic_type)
prev_gt_chord_string = None
prev_gt_key_string = None
prev_est_key_string = None
prev_est_chord_string = None
for duration, note, est_chord_label, gt_chord_label, est_key_label, gt_key_label in zip(
piece.get_duration_cache(),
piece.get_inputs(),
estimated_chord_labels,
gt_chord_labels,
estimated_key_labels,
gt_key_labels,
):
if duration == 0:
continue
gt_chord_string = chord_label_list[gt_chord_label]
gt_key_string = key_label_list[gt_key_label]
est_chord_string = chord_label_list[est_chord_label]
est_key_string = key_label_list[est_key_label]
# No change in labels
if (
gt_chord_string == prev_gt_chord_string
and gt_key_string == prev_gt_key_string
and est_chord_string == prev_est_chord_string
and est_key_string == prev_est_key_string
):
continue
if gt_key_string != prev_gt_key_string or est_key_string != prev_est_key_string:
gt_tonic, gt_mode = hu.get_key_from_one_hot_index(int(gt_key_label), tonic_type)
est_tonic, est_mode = hu.get_key_from_one_hot_index(int(est_key_label), tonic_type)
full_key_distance = get_key_distance(
gt_tonic,
gt_mode,
est_tonic,
est_mode,
tonic_only=False,
)
if full_key_distance == 0:
color = "green"
else:
partial_key_distance = get_key_distance(
gt_tonic,
gt_mode,
est_tonic,
est_mode,
tonic_only=True,
)
color = "yellow" if partial_key_distance != 1 else "red"
labels_list.append(
{
"label": est_key_string if est_key_string != prev_est_key_string else "--",
"mc": note.onset[0],
"mc_onset": note.mc_onset,
"mn_onset": note.onset[1],
"color_name": color,
}
)
if gt_chord_string != prev_gt_chord_string or est_chord_string != prev_est_chord_string:
gt_root, gt_chord_type, gt_inversion = hu.get_chord_from_one_hot_index(
gt_chord_label, root_type, use_inversions=True
)
est_root, est_chord_type, est_inversion = hu.get_chord_from_one_hot_index(
est_chord_label, root_type, use_inversions=True
)
full_chord_distance = get_chord_distance(
gt_root,
gt_chord_type,
gt_inversion,
est_root,
est_chord_type,
est_inversion,
use_inversion=True,
reduction=NO_REDUCTION,
)
if full_chord_distance == 0:
color = "green"
else:
partial_chord_distance = get_chord_distance(
gt_root,
gt_chord_type,
gt_inversion,
est_root,
est_chord_type,
est_inversion,
use_inversion=False,
reduction=TRIAD_REDUCTION,
)
color = "yellow" if partial_chord_distance != 1 else "red"
labels_list.append(
{
"label": est_chord_string
if est_chord_string != prev_est_chord_string
else "--",
"mc": note.onset[0],
"mc_onset": note.mc_onset,
"mn_onset": note.onset[1],
"color_name": color,
}
)
prev_gt_key_string = gt_key_string
prev_gt_chord_string = gt_chord_string
prev_est_key_string = est_key_string
prev_est_chord_string = est_chord_string
return | pd.DataFrame(labels_list) | pandas.DataFrame |
import re
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas.util.testing as tm
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
msg = re.escape("Index(['a'], dtype='object')")
with pytest.raises(KeyError, match=msg):
df.duplicated(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {
"col_{0:02d}".format(i): np.random.randint(0, 1000, 30000) for i in range(100)
}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, False, True])),
("last", Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True])),
],
)
def test_duplicated_keep(keep, expected):
df = DataFrame({"A": [0, 1, 1, 2, 0], "B": ["a", "b", "b", "c", "a"]})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, False, True])),
("last", Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True])),
],
)
def test_duplicated_nan_none(keep, expected):
df = DataFrame({"C": [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import cPickle
import os
import sys
import scipy as sc
import operator
import numpy as np
import pandas as pd
from scipy import sparse
import xgboost as xgb
from sklearn import model_selection, preprocessing, ensemble
from sklearn.metrics import log_loss
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from bs4 import BeautifulSoup
#reload(sys)
#sys.setdefaultencoding('utf8')
#r = re.compile(r"\s")
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import nltk
from scipy.stats import boxcox
from sklearn.decomposition import TruncatedSVD
import datetime as dt
from nltk.stem.porter import *
import gc
import math
from collections import Counter
nfold = 5
nbag = 10
with open("../pickle03.pkl", "rb") as f:
(train_df,test_df,train_y,features_to_use,features_to_use_ln,ntrain,test_df_listing_id) = cPickle.load( f)
train_test = pd.concat((train_df, test_df), axis=0).reset_index(drop=True)
###############Model Build and Predict
param = {}
param['objective'] = 'multi:softprob'
param['eta'] = 0.01
param['max_depth'] = 6
param['silent'] = 1
param['num_class'] = 3
param['eval_metric'] = "mlogloss"
param['min_child_weight'] = 1
param['subsample'] = .9
param['colsample_bytree'] = .8
param['seed'] = 12345
### Ftrs+Desc Ftrs+Ftr Count Vec
features_to_use_ln=[
'listing_id','Zero_building_id', 'Zero_Ftr','Zero_description', 'num_description_words','ratio_description_words', 'num_photos', 'num_features', 'top_1_manager', 'top_2_manager','top_5_manager', 'top_10_manager', 'top_15_manager','top_20_manager', 'top_25_manager', 'top_30_manager','top_50_manager', 'bottom_10_manager', 'bottom_20_manager','bottom_30_manager', 'top_1_building', 'top_2_building','top_5_building', 'top_10_building', 'top_15_building','top_20_building', 'top_25_building', 'top_30_building','top_50_building', 'bottom_10_building', 'bottom_20_building','bottom_30_building', 'top_1_add', 'top_2_add', 'top_5_add','top_10_add', 'top_15_add', 'top_20_add', 'top_25_add','top_30_add', 'top_50_add', 'bottom_10_add', 'bottom_20_add','bottom_30_add',
##LOG Price variant
'lg_price','per_bed_price','per_bath_price','per_bed_price_dev','per_bath_price_dev', #'lg_price_rnd',
##BoxCox Price variant
#'bc_price','per_bed_price_bc','per_bath_price_bc','per_bed_price_dev_bc','per_bath_price_dev_bc',#bc_price_rnd,
###label encoding
u'building_id', u'created',u'display_address', u'manager_id', u'street_address','created_year', 'created_month','created_day', 'created_hour', 'created_weekday', 'created_wd','bed_bath','street', 'avenue', 'east', 'west', 'north','south', 'other_address', 'bathrooms_cat', 'bedroom_cat','lat_cat','lon_cat', #'lat_cat_rnd','lon_cat_rnd'#,
'per_bed_bath_price','bedPerBath','bedBathDiff','bedBathSum','bedsPerc','per_bed_price_rat','per_bath_price_rat','manager_id_interest_level_high0','building_id_interest_level_high0','manager_id_interest_level_medium0','building_id_interest_level_medium0'
]
cv_scores = []
bow = CountVectorizer(stop_words='english', max_features=100, ngram_range=(1,1),min_df=2, max_df=.85)
bow.fit(train_test["features_2"])
oob_valpred = np.zeros((train_df.shape[0],3))
oob_tstpred = np.zeros((test_df.shape[0],3))
i=0
with open("../xgb_lblenc_ftrcntvecraw_newftr_lgprice.pkl", "rb") as f:
(x1,y1) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_lgprice.pkl", "rb") as f:
(x2,y2) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_deskpi_lgprice.pkl", "rb") as f:
(x3,y3) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_bcprice.pkl", "rb") as f:
(x4,y4) = cPickle.load( f)
with open("../xgb_cntenc_ftrcntvec200_lnprice.pkl", "rb") as f:
(x5,y5) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_deskpi_rnd_bcprice.pkl", "rb") as f:
(x6,y6) = cPickle.load( f)
with open("../xgb_tgtenc_ftrcntvec200_bcprice.pkl", "rb") as f:
(x7,y7) = cPickle.load( f)
with open("../xgb_rnkenc_ftrcntvec200_bcprice.pkl", "rb") as f:
(x8,y8) = cPickle.load( f)
with open("../xgb_reg_rmse_lblenc_ftrcntvec200_lgprice.pkl", "rb") as f:
(x9,y9) = cPickle.load( f)
with open("../xgb_poi_lblenc_ftrcntvec200_lgprice.pkl", "rb") as f:
(x10,y10) = cPickle.load( f)
with open("../xgb_reg_rmse_lblenc_ftrcntvec200_lgprice_newftr.pkl", "rb") as f:
(x11,y11) = cPickle.load( f)
with open("../xgb_poi_lblenc_ftrcntvec200_lgprice_newftr.pkl", "rb") as f:
(x12,y12) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_desctfidf_descmtr_lgprice.pkl", "rb") as f:
(x16,y16) = cPickle.load( f)
with open("../et_lblenc_ftrcntvecraw_newftr_bcprice_100.pkl", "rb") as f:
(x13,y13) = cPickle.load( f)
with open("../xgb_restack_l2_50ftr_pred.pkl", "rb") as f:
(x14,y14) = cPickle.load( f)
with open("../xgb_restack_l2_regre50_pred.pkl", "rb") as f:
(x18,y18) = cPickle.load( f)
with open("../xgb_restack_l2_woftr_wolisting_rnd_pred.pkl", "rb") as f:
(x19,y19) = cPickle.load( f)
with open("../xgb_restack_l2_50ftr_rnd_pred.pkl", "rb") as f:
(x20,y20) = cPickle.load( f)
with open("../keras_minMax_targetenc_200Ftr.pkl", "rb") as f:
(x21,y21) = cPickle.load( f)
with open("../keras_minMax_cnt_50Ftr.pkl", "rb") as f:
(x22,y22) = cPickle.load( f)
with open("../keras_regre_minMax_targetenc_200Ftr.pkl", "rb") as f:
(x23,y23) = cPickle.load( f)
with open("../et-lbl-ftr-cvecraw-newftr-bc-10.pkl", "rb") as f:
(x24,y24) = cPickle.load( f)
with open("../ada_lblenc_ftrcntvecraw_newftr_bcprice_50.pkl", "rb") as f:
(x15,y15) = cPickle.load( f)
test_df2 = np.hstack((x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x16,x13,x14,x18,x19,x20,x21,x22,x23,x24,x15,test_df[features_to_use_ln].values))
train_df2 = np.hstack((y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y16,y13,y14,y18,y19,y20,y21[:49352,:],y22[:49352,:],y23[:49352,:],y24,y15,train_df[features_to_use_ln].values))
for x in np.arange(nbag):
kf = model_selection.KFold(n_splits=nfold, shuffle=True, random_state=12345*x)
for dev_index, val_index in kf.split(range(train_y.shape[0])):
dev_X, val_X = train_df2[dev_index,:], train_df2[val_index,:]
dev_y, val_y = train_y[dev_index], train_y[val_index]
tr_sparse_2 = bow.transform(train_df.loc[dev_index,"features_2"])
val_sparse_2 = bow.transform(train_df.loc[val_index,"features_2"])
te_sparse_2 = bow.transform(test_df["features_2"])
train_X2 = sparse.hstack([dev_X,tr_sparse_2]).tocsr()#,tr_sparse_d
val_X2 = sparse.hstack([val_X,val_sparse_2]).tocsr()#,val_sparse_d
test_X2 = sparse.hstack([test_df2, te_sparse_2]).tocsr()
print(train_X2.shape)
print(test_X2.shape)
num_rounds =10000
plst = list(param.items())
xgtrain = xgb.DMatrix(train_X2, label=dev_y)
xgval = xgb.DMatrix(val_X2, label=val_y)
xgtest = xgb.DMatrix(test_X2)
watchlist = [ (xgtrain,'train'), (xgval, 'val') ]
model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=50)
best_iteration = model.best_iteration+1
model = xgb.train(plst, xgtrain, best_iteration, watchlist, early_stopping_rounds=50)
preds = model.predict(xgval)
oob_valpred[val_index,...] += preds
cv_scores.append(log_loss(val_y, preds))
print(cv_scores)
print(np.mean(cv_scores))
print(np.std(cv_scores))
predtst = model.predict(xgtest)
oob_tstpred += predtst
oob_valpred /=nbag
oob_tstpred /= (nfold*nbag)
out_df = pd.DataFrame(oob_tstpred)#
out_df.columns = ["high", "medium", "low"]
out_df["listing_id"] = test_df_listing_id
out_df.to_csv("../xgb_restack_l2_pred.csv", index=False)
#######################
############Keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers.advanced_activations import PReLU
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dense, Dropout, Activation, Merge, Reshape
from keras.layers.embeddings import Embedding
def nn_model4():
model = Sequential()
model.add(Dense(100, input_dim = train_X2.shape[1], init = 'uniform'))#500
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.3))#.2
model.add(Dense(100, init = 'uniform'))#400
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.3))#.2
model.add(Dense(3, init='zero'))
model.add(Activation('softmax'))##
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam')
return(model)
with open("../xgb_lblenc_ftrcntvecraw_newftr_lgprice.pkl", "rb") as f:
(x1,y1) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_lgprice.pkl", "rb") as f:
(x2,y2) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_deskpi_lgprice.pkl", "rb") as f:
(x3,y3) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_bcprice.pkl", "rb") as f:
(x4,y4) = cPickle.load( f)
with open("../xgb_cntenc_ftrcntvec200_lnprice.pkl", "rb") as f:
(x5,y5) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_deskpi_rnd_bcprice.pkl", "rb") as f:
(x6,y6) = cPickle.load( f)
with open("../xgb_tgtenc_ftrcntvec200_bcprice.pkl", "rb") as f:
(x7,y7) = cPickle.load( f)
with open("../xgb_rnkenc_ftrcntvec200_bcprice.pkl", "rb") as f:
(x8,y8) = cPickle.load( f)
with open("../xgb_reg_rmse_lblenc_ftrcntvec200_lgprice.pkl", "rb") as f:
(x9,y9) = cPickle.load( f)
with open("../xgb_poi_lblenc_ftrcntvec200_lgprice.pkl", "rb") as f:
(x10,y10) = cPickle.load( f)
with open("../xgb_reg_rmse_lblenc_ftrcntvec200_lgprice_newftr.pkl", "rb") as f:
(x11,y11) = cPickle.load( f)
with open("../xgb_poi_lblenc_ftrcntvec200_lgprice_newftr.pkl", "rb") as f:
(x12,y12) = cPickle.load( f)
with open("../xgb_lblenc_ftrcntvec200_desctfidf_descmtr_lgprice.pkl", "rb") as f:
(x16,y16) = cPickle.load( f)
with open("../et_lblenc_ftrcntvecraw_newftr_bcprice_100.pkl", "rb") as f:
(x13,y13) = cPickle.load( f)
with open("../xgb_restack_l2_50ftr_pred.pkl", "rb") as f:
(x14,y14) = cPickle.load( f)
with open("../xgb_restack_l2_regre50_pred.pkl", "rb") as f:
(x18,y18) = cPickle.load( f)
with open("../xgb_restack_l2_woftr_wolisting_rnd_pred.pkl", "rb") as f:
(x19,y19) = cPickle.load( f)
with open("../xgb_restack_l2_50ftr_rnd_pred.pkl", "rb") as f:
(x20,y20) = cPickle.load( f)
with open("../keras_minMax_targetenc_200Ftr.pkl", "rb") as f:
(x21,y21) = cPickle.load( f)
with open("../keras_minMax_cnt_50Ftr.pkl", "rb") as f:
(x22,y22) = cPickle.load( f)
with open("../keras_regre_minMax_targetenc_200Ftr.pkl", "rb") as f:
(x23,y23) = cPickle.load( f)
with open("../et-lbl-ftr-cvecraw-newftr-bc-10.pkl", "rb") as f:
(x24,y24) = cPickle.load( f)
with open("../ada_lblenc_ftrcntvecraw_newftr_bcprice_50.pkl", "rb") as f:
(x15,y15) = cPickle.load( f)
with open("../xgb_lblenc_lgprice_fewFTR.pkl", "rb") as f:
(x25,y25) = cPickle.load( f)
with open("../xgb_few_ftrs.pkl", "rb") as f:
(x26,y26) = cPickle.load( f)
with open("../xgb_listing_id.pkl", "rb") as f:
(x27,y27) = cPickle.load( f)
with open("../xgb_ftr_desc.pkl", "rb") as f:
(x28,y28) = cPickle.load( f)
test_df2 = np.hstack((x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x16,x13,x14,x18,x19,x20,x21,x22,x23,x24,x15,x25,x26,x27,x28))
train_df2 = np.hstack((y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y16,y13,y14,y18,y19,y20,y21[:49352,:],y22[:49352,:],y23[:49352,:],y24,y15,y25,y26,y27,y28))
cv_scores = []
oob_valpred = np.zeros((train_df.shape[0],3))
oob_tstpred = np.zeros((test_df.shape[0],3))
train_y2 = np_utils.to_categorical(train_y, 3)
for x in np.arange(nbag):
kf = model_selection.KFold(n_splits=nfold, shuffle=True, random_state=12345*x)
for dev_index, val_index in kf.split(range(train_y.shape[0])):
train_X2, val_X2 = train_df2[dev_index,:], train_df2[val_index,:]
dev_y, val_y = train_y2[dev_index], train_y2[val_index]
test_X2 = test_df2.copy()
print(train_X2.shape)
model = nn_model4()
earlyStopping=EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath="./weights2XXLK.hdf5", verbose=1, save_best_only=True)
fit = model.fit(train_X2, dev_y,
nb_epoch = 10000,
validation_data=(val_X2, val_y),
verbose = 1,callbacks=[earlyStopping,checkpointer]
)
print("loading weights")
model.load_weights("./weights2XXLK.hdf5")
print("predicting..")
preds = model.predict(val_X2)#[:,0]
oob_valpred[val_index,...] += preds
cv_scores.append(log_loss(val_y, preds))
print(cv_scores)
print(np.mean(cv_scores))
print(np.std(cv_scores))
predtst = (model.predict(test_X2))#[:,0]
oob_tstpred += predtst
oob_valpred /= nbag
oob_tstpred /= (nfold*nbag)
out_df = pd.DataFrame(oob_tstpred)
out_df.columns = ["high", "medium", "low"]
out_df["listing_id"] = test_df_listing_id
out_df.to_csv("../keras_L2.csv", index=False)
with open("../keras_L2.pkl", "wb") as f:
cPickle.dump((oob_tstpred,oob_valpred), f, -1)
###Old Score
#[0.52305209635321348, 0.51907342921080069, 0.52102132207204954, 0.5201797693216722, 0.51651091318463827]
#0.519967506028
#0.00216414827934
#New Score
#[0.5228894522984826, 0.51887473053048139, 0.52087177150944586, 0.52010859504893847, 0.51494352591063364]
#0.51953761506
#0.00264143428707
############Combine
testIdSTCKNET = pd.read_csv("../stacknet/test_stacknet.csv",usecols=[0],header=None)
out_df3 = pd.read_csv("../stacknet/sigma_stack_pred_restack.csv",header=None)#../stacknet/submission_0.538820662797.csv -non restacking
out_df3 = | pd.concat([testIdSTCKNET,out_df3],axis=1) | pandas.concat |
import pandas as pd
import pandas_profiling as pdp
def read_csv_to_df(file_name):
try:
df = | pd.read_csv(file_name) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# ## Topic: Apriori algorithm, generate and print out all the association rules.
#
# #### Name: <NAME>
#
# #### Subject: CS 634 Data mining
#
# ## Loading Libraries
# In[1]:
#importing all libraries
import pandas as pd
import numpy as np
import csv
from itertools import combinations
# ## Defining functions
# In[2]:
# For loading data and creating a list of items
def data_prep(file):
df = | pd.read_csv(file) | pandas.read_csv |
#######
# Here we'll make a scatter plot with fake data that is
# intentionally denser on the left, with overlapping data points.
# We'll use Selection Data to uncover the difference.
######
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import numpy as np
import pandas as pd
import json
app = dash.Dash()
# create x and y arrays
np.random.seed(10)
x1 = np.linspace(0.1,5,50)
x2 = np.linspace(5.1,10,50)
y = np.random.randint(0,50,50)
# create three "half DataFrames"
df1 = pd.DataFrame({'x': x1, 'y': y})
df2 = pd.DataFrame({'x': x1, 'y': y})
df3 = pd.DataFrame({'x': x2, 'y': y})
# combine them into one DataFrame
df = | pd.concat([df1,df2,df3]) | pandas.concat |
import torch
import pandas as pd
from datetime import datetime
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from utils import haversine_np, get_perp
from ml import FFNet
class ETAInf:
def __init__(self, path_to_model, path_to_features, path_to_embs):
self.model = FFNet(152, False)
self.model.load_state_dict(torch.load(path_to_model))
self.nodes = np.array(pd.read_csv(path_to_embs, index_col = 0))
self.wfile = path_to_features
self.model.eval()
def preprocess_route(self, route, points, st_edge_cors, fin_edge_cors):
print('st_edge_cors', st_edge_cors)
start_perp = get_perp(st_edge_cors[0][0], st_edge_cors[0][1], st_edge_cors[1][0], st_edge_cors[1][1], points.start_lat, points.start_lon)
start_perp = start_perp if start_perp is not None else [st_edge_cors[0][0], st_edge_cors[0][1]]
dist_to_a = haversine_np(start_perp[1], start_perp[0], points.start_lon, points.start_lat)
start_point_meters = haversine_np(start_perp[1], start_perp[0], st_edge_cors[0][1], st_edge_cors[0][0])
start_point_part = start_point_meters / haversine_np(st_edge_cors[0][1], st_edge_cors[0][0], st_edge_cors[1][1], st_edge_cors[1][0])
end_perp = get_perp(fin_edge_cors[0][0], fin_edge_cors[0][1], fin_edge_cors[1][0], fin_edge_cors[1][1], points.end_lat, points.end_lon)
end_perp = end_perp if end_perp is not None else [fin_edge_cors[1][0], fin_edge_cors[1][1]]
dist_to_b = haversine_np(end_perp[1], end_perp[0], points.start_lon, points.start_lat)
finish_point_meters = haversine_np(end_perp[1], end_perp[0], fin_edge_cors[1][1], fin_edge_cors[1][0])
finish_point_part = finish_point_meters / haversine_np(fin_edge_cors[0][1], fin_edge_cors[0][0], fin_edge_cors[1][1], fin_edge_cors[1][0])
stat_data = pd.DataFrame({"dist_to_b": [dist_to_b], "dist_to_a": [dist_to_a], "start_point_meters": [start_point_meters], "finish_point_meters": [finish_point_meters], "start_point_part":[start_point_part], "finish_point_part": [finish_point_part]})
weather = | pd.read_csv(self.wfile, delimiter=";") | pandas.read_csv |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
| pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False) | pandas.testing.assert_frame_equal |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = | Period(freq="W", year=2007, month=3, day=31) | pandas.Period |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = pd.to_numeric(df['4s'])
runs = pd.to_numeric(df['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation='vertical')
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost(tendulkarsp,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
# retrieve the file path of a data file installed with cricketr
batsmanCumulativeAverageRuns(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= | pd.to_numeric(batsman['Runs']) | pandas.to_numeric |
# Copyright (c) 2020, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import DataFrame, Series
from cudf.tests.utils import (
INTEGER_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
def test_series_replace():
a1 = np.array([0, 1, 2, 3, 4])
# Numerical
a2 = np.array([5, 1, 2, 3, 4])
sr1 = Series(a1)
sr2 = sr1.replace(0, 5)
assert_eq(a2, sr2.to_array())
# Categorical
psr3 = pd.Series(["one", "two", "three"], dtype="category")
psr4 = psr3.replace("one", "two")
sr3 = Series.from_pandas(psr3)
sr4 = sr3.replace("one", "two")
assert_eq(psr4, sr4)
psr5 = psr3.replace("one", "five")
sr5 = sr3.replace("one", "five")
assert_eq(psr5, sr5)
# List input
a6 = np.array([5, 6, 2, 3, 4])
sr6 = sr1.replace([0, 1], [5, 6])
assert_eq(a6, sr6.to_array())
with pytest.raises(TypeError):
sr1.replace([0, 1], [5.5, 6.5])
# Series input
a8 = np.array([5, 5, 5, 3, 4])
sr8 = sr1.replace(sr1[:3], 5)
assert_eq(a8, sr8.to_array())
# large input containing null
sr9 = Series(list(range(400)) + [None])
sr10 = sr9.replace([22, 323, 27, 0], None)
assert sr10.null_count == 5
assert len(sr10.to_array()) == (401 - 5)
sr11 = sr9.replace([22, 323, 27, 0], -1)
assert sr11.null_count == 1
assert len(sr11.to_array()) == (401 - 1)
# large input not containing nulls
sr9 = sr9.fillna(-11)
sr12 = sr9.replace([22, 323, 27, 0], None)
assert sr12.null_count == 4
assert len(sr12.to_array()) == (401 - 4)
sr13 = sr9.replace([22, 323, 27, 0], -1)
assert sr13.null_count == 0
assert len(sr13.to_array()) == 401
def test_series_replace_with_nulls():
a1 = np.array([0, 1, 2, 3, 4])
# Numerical
a2 = np.array([-10, 1, 2, 3, 4])
sr1 = Series(a1)
sr2 = sr1.replace(0, None).fillna(-10)
assert_eq(a2, sr2.to_array())
# List input
a6 = np.array([-10, 6, 2, 3, 4])
sr6 = sr1.replace([0, 1], [None, 6]).fillna(-10)
assert_eq(a6, sr6.to_array())
sr1 = Series([0, 1, 2, 3, 4, None])
with pytest.raises(TypeError):
sr1.replace([0, 1], [5.5, 6.5]).fillna(-10)
# Series input
a8 = np.array([-10, -10, -10, 3, 4, -10])
sr8 = sr1.replace(sr1[:3], None).fillna(-10)
assert_eq(a8, sr8.to_array())
a9 = np.array([-10, 6, 2, 3, 4, -10])
sr9 = sr1.replace([0, 1], [None, 6]).fillna(-10)
assert_eq(a9, sr9.to_array())
def test_dataframe_replace():
# numerical
pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]})
gdf1 = DataFrame.from_pandas(pdf1)
pdf2 = pdf1.replace(0, 4)
gdf2 = gdf1.replace(0, 4)
assert_eq(gdf2, pdf2)
# categorical
pdf4 = pd.DataFrame(
{"a": ["one", "two", "three"], "b": ["one", "two", "three"]},
dtype="category",
)
gdf4 = DataFrame.from_pandas(pdf4)
pdf5 = pdf4.replace("two", "three")
gdf5 = gdf4.replace("two", "three")
assert_eq(gdf5, pdf5)
# list input
pdf6 = pdf1.replace([0, 1], [4, 5])
gdf6 = gdf1.replace([0, 1], [4, 5])
assert_eq(gdf6, pdf6)
pdf7 = pdf1.replace([0, 1], 4)
gdf7 = gdf1.replace([0, 1], 4)
assert_eq(gdf7, pdf7)
# dict input:
pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
assert_eq(gdf8, pdf8)
pdf9 = pdf1.replace({"a": 0}, {"a": 4})
gdf9 = gdf1.replace({"a": 0}, {"a": 4})
assert_eq(gdf9, pdf9)
def test_dataframe_replace_with_nulls():
# numerical
pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]})
gdf1 = DataFrame.from_pandas(pdf1)
pdf2 = pdf1.replace(0, 4)
gdf2 = gdf1.replace(0, None).fillna(4)
assert_eq(gdf2, pdf2)
# list input
pdf6 = pdf1.replace([0, 1], [4, 5])
gdf6 = gdf1.replace([0, 1], [4, None]).fillna(5)
assert_eq(gdf6, pdf6)
pdf7 = pdf1.replace([0, 1], 4)
gdf7 = gdf1.replace([0, 1], None).fillna(4)
assert_eq(gdf7, pdf7)
# dict input:
pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": None, "b": 5}).fillna(4)
assert_eq(gdf8, pdf8)
gdf1 = DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, None]})
gdf9 = gdf1.replace([0, 1], [4, 5]).fillna(3)
assert_eq(gdf9, pdf6)
def test_replace_strings():
pdf = pd.Series(["a", "b", "c", "d"])
gdf = Series(["a", "b", "c", "d"])
assert_eq(pdf.replace("a", "e"), gdf.replace("a", "e"))
@pytest.mark.parametrize(
"psr",
[
pd.Series([0, 1, None, 2, None], dtype=pd.Int8Dtype()),
pd.Series([0, 1, np.nan, 2, np.nan]),
],
)
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("fill_value", [10, pd.Series([10, 20, 30, 40, 50])])
@pytest.mark.parametrize("inplace", [True, False])
def test_series_fillna_numerical(psr, data_dtype, fill_value, inplace):
test_psr = psr.copy(deep=True)
# TODO: These tests should use Pandas' nullable int type
# when we support a recent enough version of Pandas
# https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
if np.dtype(data_dtype).kind not in ("f") and test_psr.dtype.kind == "i":
test_psr = test_psr.astype(
cudf.utils.dtypes.cudf_dtypes_to_pandas_dtypes[
np.dtype(data_dtype)
]
)
gsr = cudf.from_pandas(test_psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = test_psr.fillna(fill_value, inplace=inplace)
actual = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = test_psr
actual = gsr
# TODO: Remove check_dtype when we have support
# to compare with pandas nullable dtypes
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"data",
[
[1, None, None, 2, 3, 4],
[None, None, 1, 2, None, 3, 4],
[1, 2, None, 3, 4, None, None],
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("method", ["ffill", "bfill"])
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_method_numerical(data, container, data_dtype, method, inplace):
if container == pd.DataFrame:
data = {"a": data, "b": data, "c": data}
pdata = container(data)
if np.dtype(data_dtype).kind not in ("f"):
data_dtype = cudf.utils.dtypes.cudf_dtypes_to_pandas_dtypes[
np.dtype(data_dtype)
]
pdata = pdata.astype(data_dtype)
# Explicitly using nans_as_nulls=True
gdata = cudf.from_pandas(pdata, nan_as_null=True)
expected = pdata.fillna(method=method, inplace=inplace)
actual = gdata.fillna(method=method, inplace=inplace)
if inplace:
expected = pdata
actual = gdata
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"psr",
[
pd.Series(["a", "b", "a", None, "c", None], dtype="category"),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["q", "r", "z", "a", "b", "c"],
),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["x", "t", "p", "q", "r", "z"],
),
pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"),
pd.Series(
[None, None, None, None, None, None, "a", "b", "c"],
dtype="category",
),
],
)
@pytest.mark.parametrize(
"fill_value",
[
"c",
pd.Series(["c", "c", "c", "c", "c", "a"], dtype="category"),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["x", "t", "p", "q", "r", "z"],
),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["q", "r", "z", "a", "b", "c"],
),
pd.Series(["a", "b", "a", None, "c", None], dtype="category"),
pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_categorical(psr, fill_value, inplace):
gsr = Series.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = psr
got = gsr
assert_eq(expected, got)
@pytest.mark.parametrize(
"psr",
[
pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y")),
pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
),
],
)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("2010-01-02"),
pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y"))
+ pd.Timedelta("1d"),
pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_datetime(psr, fill_value, inplace):
gsr = cudf.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
got = gsr
expected = psr
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
# Categorical
pd.Categorical([1, 2, None, None, 3, 4]),
pd.Categorical([None, None, 1, None, 3, 4]),
pd.Categorical([1, 2, None, 3, 4, None, None]),
pd.Categorical(["1", "20", None, None, "3", "40"]),
pd.Categorical([None, None, "10", None, "30", "4"]),
pd.Categorical(["1", "20", None, "30", "4", None, None]),
# Datetime
np.array(
[
"2020-01-01 08:00:00",
"2020-01-01 09:00:00",
None,
"2020-01-01 10:00:00",
None,
"2020-01-01 10:00:00",
],
dtype="datetime64[ns]",
),
np.array(
[
None,
None,
"2020-01-01 09:00:00",
"2020-01-01 10:00:00",
None,
"2020-01-01 10:00:00",
],
dtype="datetime64[ns]",
),
np.array(
[
"2020-01-01 09:00:00",
None,
None,
"2020-01-01 10:00:00",
None,
None,
],
dtype="datetime64[ns]",
),
# Timedelta
np.array(
[10, 100, 1000, None, None, 10, 100, 1000], dtype="datetime64[ns]"
),
np.array(
[None, None, 10, None, 1000, 100, 10], dtype="datetime64[ns]"
),
np.array(
[10, 100, None, None, 1000, None, None], dtype="datetime64[ns]"
),
# String
np.array(
["10", "100", "1000", None, None, "10", "100", "1000"],
dtype="object",
),
np.array(
[None, None, "1000", None, "10", "100", "10"], dtype="object"
),
np.array(
["10", "100", None, None, "1000", None, None], dtype="object"
),
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("method", ["ffill", "bfill"])
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_method_fixed_width_non_num(data, container, method, inplace):
if container == pd.DataFrame:
data = {"a": data, "b": data, "c": data}
pdata = container(data)
# Explicitly using nans_as_nulls=True
gdata = cudf.from_pandas(pdata, nan_as_null=True)
expected = pdata.fillna(method=method, inplace=inplace)
actual = gdata.fillna(method=method, inplace=inplace)
if inplace:
expected = pdata
actual = gdata
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, None], "b": [None, None, 5]}),
pd.DataFrame(
{"a": [1, 2, None], "b": [None, None, 5]}, index=["a", "p", "z"]
),
],
)
@pytest.mark.parametrize(
"value",
[
10,
pd.Series([10, 20, 30]),
pd.Series([3, 4, 5]),
pd.Series([10, 20, 30], index=["z", "a", "p"]),
{"a": 5, "b": pd.Series([3, 4, 5])},
{"a": 5001},
{"b": pd.Series([11, 22, 33], index=["a", "p", "z"])},
{"a": 5, "b": pd.Series([3, 4, 5], index=["a", "p", "z"])},
{"c": 100},
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_dataframe(df, value, inplace):
pdf = df.copy(deep=True)
gdf = DataFrame.from_pandas(pdf)
fill_value_pd = value
if isinstance(fill_value_pd, (pd.Series, pd.DataFrame)):
fill_value_cudf = cudf.from_pandas(fill_value_pd)
elif isinstance(fill_value_pd, dict):
fill_value_cudf = {}
for key in fill_value_pd:
temp_val = fill_value_pd[key]
if isinstance(temp_val, pd.Series):
temp_val = cudf.from_pandas(temp_val)
fill_value_cudf[key] = temp_val
else:
fill_value_cudf = value
expect = pdf.fillna(fill_value_pd, inplace=inplace)
got = gdf.fillna(fill_value_cudf, inplace=inplace)
if inplace:
got = gdf
expect = pdf
assert_eq(expect, got)
@pytest.mark.parametrize(
"psr",
[
pd.Series(["a", "b", "c", "d"]),
pd.Series([None] * 4, dtype="object"),
pd.Series(["z", None, "z", None]),
pd.Series(["x", "y", None, None, None]),
pd.Series([None, None, None, "i", "P"]),
],
)
@pytest.mark.parametrize(
"fill_value",
[
"a",
pd.Series(["a", "b", "c", "d"]),
pd.Series(["z", None, "z", None]),
pd.Series([None] * 4, dtype="object"),
pd.Series(["x", "y", None, None, None]),
pd.Series([None, None, None, "i", "P"]),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_string(psr, fill_value, inplace):
gsr = cudf.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = psr
got = gsr
assert_eq(expected, got)
@pytest.mark.parametrize("data_dtype", INTEGER_TYPES)
def test_series_fillna_invalid_dtype(data_dtype):
gdf = Series([1, 2, None, 3], dtype=data_dtype)
fill_value = 2.5
with pytest.raises(TypeError) as raises:
gdf.fillna(fill_value)
raises.match(
f"Cannot safely cast non-equivalent"
f" {type(fill_value).__name__} to {gdf.dtype.type.__name__}"
)
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("fill_value", [100, 100.0, 128.5])
def test_series_where(data_dtype, fill_value):
psr = pd.Series(list(range(10)), dtype=data_dtype)
sr = Series.from_pandas(psr)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr > 0, fill_value)
else:
# Cast back to original dtype as pandas automatically upcasts
expect = psr.where(psr > 0, fill_value).astype(psr.dtype)
got = sr.where(sr > 0, fill_value)
assert_eq(expect, got)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr < 0, fill_value)
else:
expect = psr.where(psr < 0, fill_value).astype(psr.dtype)
got = sr.where(sr < 0, fill_value)
assert_eq(expect, got)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr == 0, fill_value)
else:
expect = psr.where(psr == 0, fill_value).astype(psr.dtype)
got = sr.where(sr == 0, fill_value)
assert_eq(expect, got)
@pytest.mark.parametrize("fill_value", [100, 100.0, 100.5])
def test_series_with_nulls_where(fill_value):
psr = pd.Series([None] * 3 + list(range(5)))
sr = Series.from_pandas(psr)
expect = psr.where(psr > 0, fill_value)
got = sr.where(sr > 0, fill_value)
assert_eq(expect, got)
expect = psr.where(psr < 0, fill_value)
got = sr.where(sr < 0, fill_value)
assert_eq(expect, got)
expect = psr.where(psr == 0, fill_value)
got = sr.where(sr == 0, fill_value)
assert_eq(expect, got)
@pytest.mark.parametrize("fill_value", [[888, 999]])
def test_dataframe_with_nulls_where_with_scalars(fill_value):
pdf = pd.DataFrame(
{
"A": [-1, 2, -3, None, 5, 6, -7, 0],
"B": [4, -2, 3, None, 7, 6, 8, 0],
}
)
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf % 3 == 0, fill_value)
got = gdf.where(gdf % 3 == 0, fill_value)
assert_eq(expect, got)
def test_dataframe_with_different_types():
# Testing for int and float
pdf = pd.DataFrame(
{"A": [111, 22, 31, 410, 56], "B": [-10.12, 121.2, 45.7, 98.4, 87.6]}
)
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf > 50, -pdf)
got = gdf.where(gdf > 50, -gdf)
assert_eq(expect, got)
# Testing for string
pdf = | pd.DataFrame({"A": ["a", "bc", "cde", "fghi"]}) | pandas.DataFrame |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Representation Probing """
from typing import Optional, Tuple, Union
from absl import app
from absl import logging
from datasets import DatasetDict
from datasets import load_from_disk
from einops import rearrange
from einops import repeat
import jax
import jax.numpy as jnp
import jax.random as jr
import numpy as np
import pandas as pd
from pandas import DataFrame
import toolz.curried as T
from tqdm import trange
import tree
from probing._src.configurable import configurable
from probing._src.constants import COLORS
from probing.representations import data
from probing.representations import models
@configurable
def repr_probing( # pytype: disable=annotation-type-mismatch
repr_ds: Optional[str] = None,
preds_path: Optional[str] = None,
results_path: Optional[str] = None,
seed: int = 12345,
nb_seeds: int = 5,
nb_points: int = 10,
batch_size: int = 64,
n_training_steps: int = 4000,
max_parallel: int = -1,
log_freq: int = 0,
max_batch_size: int = 1024,
ds_fits_in_vram: bool = True,
learning_rate: float = 1e-4,
hidden_sizes: Tuple[int] = (512, 512),
validation_split: str = 'validation',
) -> Tuple[DataFrame, DataFrame]:
"""Run representation probing.
Depending on the representation size, we may need to do jobs in smaller
batches.
Args:
seed: Random seed
nb_seeds: Number of random seeds per point
nb_points: Number of point to run along the curve
batch_size: Batch size for each model.
n_training_steps: Number of training steps.
max_parallel: Maximum number of models that can be trained in parallel.
log_freq: Logging frequency
max_batch_size: Maximum batch size to use during evaluation.
learning_rate: Learning rate
hidden_sizes: Size of each hidden layer.
repr_dataset: Directory containing a hf dataset with representations.
preds: path to store predictions
results: path to store results in
ds_fits_in_vram: predicate indicating if the dataset fits in VRAM. This
should only be set as a last resort, max_parallel is much faster.
validation_split: split to use for calculating validation metrics. this
should be `validattion` or `test`.
"""
if not isinstance(repr_ds, DatasetDict):
repr_ds = load_from_disk(repr_ds)
if validation_split == 'train':
raise ValueError(
'validation split cannot be train, choose one of "validation" or "test".'
)
if validation_split == "test":
logging.warning('received validation_split="test".')
jobs = data.generate_jobs(
repr_ds['train'],
nb_seeds=nb_seeds,
nb_points=nb_points,
seed=seed,
)
# configure chex compile assertions
chex_expect_num_compile = 1
if len(jobs) % max_parallel != 0:
logging.warning(
'the # of jobs (%d) should be divisible by max_parallel (%d), otherwise'
'jax will have to recompile every step for the last set of models.',
len(jobs), max_parallel)
chex_expect_num_compile = 2
val_ds = repr_ds[validation_split]
# Create RNGs
# Initialise the model's parameters and the optimiser's state.
# each initialization uses a different rng.
n_models = len(jobs)
rng = jr.PRNGKey(seed)
rngs = jr.split(rng, n_models)
rngs, init_rngs, data_rngs = zip(*[jr.split(rng, 3) for rng in rngs])
train_ds = repr_ds['train']
val_ds = repr_ds['test']
# build models.
# Depending on the representation size, we may need to do jobs in smaller
# batches, however, we will maintain the same functions throughout.
# only the parameter sets need get reset.
input_shape = np.shape(train_ds[0]['hidden_states'])
n_classes = len(train_ds[0]['label'])
init_fn, update_fn, metrics_fn = models.build_models(
input_shape,
hidden_sizes,
batch_size=batch_size,
n_classes=n_classes,
learning_rate=learning_rate)
# create train iter
train_iter = data.jax_multi_iterator(
train_ds,
batch_size,
ds_fits_in_vram=ds_fits_in_vram,
max_traces=chex_expect_num_compile,
)
# add vmaps
update_fn = jax.vmap(update_fn)
# validation function uses the same data for all models.
valid_fn = jax.vmap(metrics_fn, in_axes=(0, None))
evaluate = models.evaluate(valid_fn, val_ds, max_batch_size)
# Create inner loop --->
inner_loop = _repr_curve_inner(train_iter, init_fn, update_fn, evaluate,
log_freq, n_training_steps)
# zip up the rngs into jobs and partition s.t. < max_parallel
inner_jobs = list(zip(jobs, rngs, init_rngs, data_rngs))
if max_parallel > 0:
inner_jobs = T.partition_all(max_parallel, inner_jobs)
else:
inner_jobs = [inner_jobs]
records, preds = zip(*T.map(inner_loop, inner_jobs))
df = _format_predictions(val_ds, preds, jobs)
# store results
results = _generate_results(records)
df_result = | pd.DataFrame.from_records(results) | pandas.DataFrame.from_records |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from sklearn.pipeline import Pipeline
from hcrystalball.feature_extraction import HolidayTransformer
@pytest.mark.parametrize(
"X_y_with_freq, country_code, country_code_column, country_code_column_value, extected_error",
[
("series_with_freq_D", "DE", None, None, None),
("series_with_freq_D", None, "holiday_col", "DE", None),
("series_with_freq_M", "DE", None, None, ValueError), # not daily freq
("series_with_freq_Q", "DE", None, None, ValueError), # not daily freq
("series_with_freq_Y", "DE", None, None, ValueError), # not daily freq
(
"series_with_freq_D",
None,
"holiday_colsssss",
"DE",
KeyError,
), # there needs to be holiday_col in X
(
"series_with_freq_D",
None,
None,
None,
ValueError,
), # needs to have country_code or country_code_column
(
"series_with_freq_D",
"LALA",
"LALA",
None,
ValueError,
), # cannot have country_code and country_code_column in the same time
(
"series_with_freq_D",
"LALA",
None,
None,
ValueError,
), # country_code needs to be proper country
(
"series_with_freq_D",
None,
"holiday_col",
"Lala",
ValueError,
), # country_code needs to be proper country
],
indirect=["X_y_with_freq"],
)
def test_holiday_transformer_inputs(
X_y_with_freq,
country_code,
country_code_column,
country_code_column_value,
extected_error,
):
X, _ = X_y_with_freq
if extected_error is not None:
with pytest.raises(extected_error):
holiday_transformer = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
)
if country_code_column:
X["holiday_col"] = country_code_column_value
holiday_transformer.fit_transform(X)
else:
holiday_transformer = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
)
if country_code_column:
X[country_code_column] = country_code_column_value
holiday_transformer.fit_transform(X)
if country_code_column:
assert holiday_transformer.get_params()["country_code"] is None
@pytest.mark.parametrize(
"country_code, country_code_column, country_code_column_value, exp_col_name",
[
("CZ", None, None, "_holiday_CZ"),
(None, "holiday_col", "CZ", "_holiday_holiday_col"),
],
)
def test_holiday_transformer_transform(
country_code, country_code_column, country_code_column_value, exp_col_name
):
expected = {exp_col_name: ["Labour Day", "", "", "", "", "", "", "Liberation Day", "", ""]}
X = pd.DataFrame(index=pd.date_range(start="2019-05-01", periods=10))
df_expected = | pd.DataFrame(expected, index=X.index) | pandas.DataFrame |
import datetime as dt
import numpy as np
import pathlib
import pandas as pd
from functools import partial
from .deprecations import deprecated_kwargs
from . import utils
from copy import deepcopy
from collections import OrderedDict
from collections.abc import Iterable
from openpyxl import load_workbook
from openpyxl.cell.cell import get_column_letter
from openpyxl.xml.functions import fromstring, QName
from openpyxl.utils import cell
from styleframe.container import Container
from styleframe.series import Series
from styleframe.styler import Styler, ColorScaleConditionalFormatRule
try:
pd_timestamp = pd.Timestamp
except AttributeError:
pd_timestamp = pd.tslib.Timestamp
class StyleFrame:
"""
A wrapper class that wraps a :class:`pandas.DataFrame` object and represent a stylized dataframe.
Stores container objects that have values and styles that will be applied to excel
:param obj: Any object that pandas' dataframe can be initialized with: an existing dataframe, a dictionary,
a list of dictionaries or another StyleFrame.
:param styler_obj: Will be used as the default style of all cells.
:type styler_obj: :class:`.Styler`
"""
P_FACTOR = 1.3
A_FACTOR = 13
def __init__(self, obj, styler_obj=None):
from_another_styleframe = False
from_pandas_dataframe = False
if styler_obj and not isinstance(styler_obj, Styler):
raise TypeError('styler_obj must be {}, got {} instead.'.format(Styler.__name__, type(styler_obj).__name__))
if isinstance(obj, pd.DataFrame):
from_pandas_dataframe = True
if obj.empty:
self.data_df = deepcopy(obj)
else:
self.data_df = obj.applymap(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, pd.Series):
self.data_df = obj.apply(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, (dict, list)):
self.data_df = pd.DataFrame(obj).applymap(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, StyleFrame):
self.data_df = deepcopy(obj.data_df)
from_another_styleframe = True
else:
raise TypeError("{} __init__ doesn't support {}".format(type(self).__name__, type(obj).__name__))
self.data_df.columns = [Container(col, deepcopy(styler_obj)) if not isinstance(col, Container) else deepcopy(col)
for col in self.data_df.columns]
self.data_df.index = [Container(index, deepcopy(styler_obj)) if not isinstance(index, Container) else deepcopy(index)
for index in self.data_df.index]
if from_pandas_dataframe:
self.data_df.index.name = obj.index.name
self._columns_width = obj._columns_width if from_another_styleframe else OrderedDict()
self._rows_height = obj._rows_height if from_another_styleframe else OrderedDict()
self._has_custom_headers_style = obj._has_custom_headers_style if from_another_styleframe else False
self._cond_formatting = []
self._default_style = styler_obj or Styler()
self._index_header_style = obj._index_header_style if from_another_styleframe else self._default_style
self._known_attrs = {'at': self.data_df.at,
'loc': self.data_df.loc,
'iloc': self.data_df.iloc,
'applymap': self.data_df.applymap,
'groupby': self.data_df.groupby,
'index': self.data_df.index,
'fillna': self.data_df.fillna}
def __str__(self):
return str(self.data_df)
def __len__(self):
return len(self.data_df)
def __getitem__(self, item):
if isinstance(item, pd.Series):
return self.data_df.__getitem__(item).index
if isinstance(item, list):
return StyleFrame(self.data_df.__getitem__(item))
return Series(self.data_df.__getitem__(item))
def __setitem__(self, key, value):
if isinstance(value, (Iterable, pd.Series)):
self.data_df.__setitem__(Container(key), list(map(Container, value)))
else:
self.data_df.__setitem__(Container(key), Container(value))
def __delitem__(self, item):
return self.data_df.__delitem__(item)
def __getattr__(self, attr):
if attr in self.data_df.columns:
return self.data_df[attr]
try:
return self._known_attrs[attr]
except KeyError:
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
@property
def columns(self):
return self.data_df.columns
@columns.setter
def columns(self, columns):
self.data_df.columns = [col if isinstance(col, Container) else Container(value=col)
for col in columns]
def _get_column_as_letter(self, sheet, column_to_convert, startcol=0):
col = column_to_convert.value if isinstance(column_to_convert, Container) else column_to_convert
if not isinstance(col, (int, str)):
raise TypeError("column must be an index, column letter or column name")
column_as_letter = None
if col in self.data_df.columns: # column name
column_index = self.data_df.columns.get_loc(col) + startcol + 1 # worksheet columns index start from 1
column_as_letter = cell.get_column_letter(column_index)
# column index
elif isinstance(col, int) and col >= 1:
column_as_letter = cell.get_column_letter(startcol + col)
# assuming we got column letter
elif isinstance(col, str) and col <= get_column_letter(sheet.max_column):
column_as_letter = col
if column_as_letter is None or cell.column_index_from_string(column_as_letter) > sheet.max_column:
raise IndexError("column: %s is out of columns range." % column_to_convert)
return column_as_letter
@classmethod
def read_excel(cls, path, sheet_name=0, read_style=False, use_openpyxl_styles=False,
read_comments=False, **kwargs):
"""
Creates a StyleFrame object from an existing Excel.
.. note:: :meth:`read_excel` also accepts all arguments that :func:`pandas.read_excel` accepts as kwargs.
:param str path: The path to the Excel file to read.
:param sheetname:
.. deprecated:: 1.6
Use ``sheet_name`` instead.
.. versionchanged:: 4.0
Removed
:param sheet_name: The sheet name to read. If an integer is provided then it be used as a zero-based
sheet index. Default is 0.
:type sheet_name: str or int
:param bool read_style: If ``True`` the sheet's style will be loaded to the returned StyleFrame object.
:param bool use_openpyxl_styles: If ``True`` (and `read_style` is also ``True``) then the styles in the returned
StyleFrame object will be Openpyxl's style objects. If ``False``, the styles will be :class:`.Styler` objects.
.. note:: Using ``use_openpyxl_styles=False`` is useful if you are going to filter columns or rows by style, for example:
::
sf = sf[[col for col in sf.columns if col.style.font == utils.fonts.arial]]
:param bool read_comments: If ``True`` (and `read_style` is also ``True``) cells' comments will be loaded to the returned StyleFrame object. Note
that reading comments without reading styles is currently not supported.
:return: StyleFrame object
:rtype: :class:`StyleFrame`
"""
def _get_scheme_colors_from_excel(wb):
xlmns = 'http://schemas.openxmlformats.org/drawingml/2006/main'
if wb.loaded_theme is None:
return []
root = fromstring(wb.loaded_theme)
theme_element = root.find(QName(xlmns, 'themeElements').text)
color_schemes = theme_element.findall(QName(xlmns, 'clrScheme').text)
colors = []
for colorScheme in color_schemes:
for tag in ['lt1', 'dk1', 'lt2', 'dk2', 'accent1', 'accent2', 'accent3', 'accent4', 'accent5', 'accent6']:
accent = list(colorScheme.find(QName(xlmns, tag).text))[0]
if 'window' in accent.attrib['val']:
colors.append(accent.attrib['lastClr'])
else:
colors.append(accent.attrib['val'])
return colors
def _get_style_object(sheet, theme_colors, row, column):
cell = sheet.cell(row=row, column=column)
if use_openpyxl_styles:
return cell
else:
return Styler.from_openpyxl_style(cell, theme_colors,
read_comments and cell.comment)
def _read_style():
wb = load_workbook(path)
if isinstance(sheet_name, str):
sheet = wb[sheet_name]
elif isinstance(sheet_name, int):
sheet = wb.worksheets[sheet_name]
else:
raise TypeError("'sheet_name' must be a string or int, got {} instead".format(type(sheet_name)))
theme_colors = _get_scheme_colors_from_excel(wb)
# Set the headers row height
if header_arg is not None:
headers_row_idx = header_arg + 1
sf._rows_height[headers_row_idx] = sheet.row_dimensions[headers_row_idx].height
get_style_object = partial(_get_style_object, sheet=sheet, theme_colors=theme_colors)
for col_index, col_name in enumerate(sf.columns):
col_index_in_excel = col_index + 1
if col_index_in_excel == excel_index_col:
for row_index, sf_index in enumerate(sf.index, start=2):
sf_index.style = get_style_object(row=row_index, column=col_index_in_excel)
col_index_in_excel += 1 # Move next to excel indices column
sf.columns[col_index].style = get_style_object(row=1, column=col_index_in_excel)
for row_index, sf_index in enumerate(sf.index, start=start_row_index):
sf.at[sf_index, col_name].style = get_style_object(row=row_index, column=col_index_in_excel)
sf._rows_height[row_index] = sheet.row_dimensions[row_index].height
sf._columns_width[col_name] = sheet.column_dimensions[sf._get_column_as_letter(sheet, col_name)].width
header_arg = kwargs.get('header', 0)
if read_style and isinstance(header_arg, Iterable):
raise ValueError('Not supporting multiple index columns with read style.')
if header_arg is None:
start_row_index = 1
else:
start_row_index = header_arg + 2
index_col = kwargs.get('index_col')
excel_index_col = index_col + 1 if index_col is not None else None
if read_style and isinstance(excel_index_col, Iterable):
raise ValueError('Not supporting multiple index columns with read style.')
sf = cls(pd.read_excel(path, sheet_name, **kwargs))
if read_style:
_read_style()
sf._has_custom_headers_style = True
return sf
@classmethod
def read_excel_as_template(cls, path, df, use_df_boundaries=False, **kwargs):
"""
.. versionadded:: 3.0.1
Create a StyleFrame object from an excel template with data of the given DataFrame.
.. note:: :meth:`read_excel_as_template` also accepts all arguments that :meth:`read_excel` accepts as kwargs except for ``read_style`` which must be ``True``.
:param str path: The path to the Excel file to read.
:param df: The data to apply to the given template.
:type df: :class:`pandas.DataFrame`
:param bool use_df_boundaries: If ``True`` the template will be cut according to the boundaries of the given DataFrame.
:return: StyleFrame object
:rtype: :class:`StyleFrame`
"""
sf = cls.read_excel(path=path, read_style=True, **kwargs)
num_of_rows, num_of_cols = len(df.index), len(df.columns)
template_num_of_rows, template_num_of_cols = len(sf.index), len(sf.columns)
num_of_cols_to_copy_with_style = min(num_of_cols, template_num_of_cols)
num_of_rows_to_copy_with_style = min(num_of_rows, template_num_of_rows)
for col_index in range(num_of_cols_to_copy_with_style):
for row_index in range(num_of_rows_to_copy_with_style):
sf.iloc[row_index, col_index].value = df.iloc[row_index, col_index]
# Insert extra data in cases where the df is larger than the template.
for extra_col in df.columns[template_num_of_cols:]:
sf[extra_col] = df[extra_col][:template_num_of_rows]
for row_index in df.index[template_num_of_rows:]:
sf_index = Container(value=row_index)
sf.loc[sf_index] = list(map(Container, df.loc[row_index]))
sf.rename({sf.columns[col_index].value: df_col
for col_index, df_col in enumerate(df.columns)},
inplace=True)
if use_df_boundaries:
sf.data_df = sf.data_df.iloc[:num_of_rows, :num_of_cols]
rows_height = OrderedDict()
rows_height_range = range(num_of_rows)
for i, (k, v) in enumerate(sf._rows_height.items()):
if i in rows_height_range:
rows_height[k] = v
sf._rows_height = rows_height
columns_width = OrderedDict()
columns_width_range = range(num_of_cols)
for i, (k, v) in enumerate(sf._columns_width.items()):
if i in columns_width_range:
columns_width[k] = v
sf._columns_width = columns_width
return sf
# noinspection PyPep8Naming
@classmethod
def ExcelWriter(cls, path, **kwargs):
"""
A shortcut for :class:`pandas.ExcelWriter`, and accepts any argument it accepts except for ``engine``
"""
if 'engine' in kwargs:
raise ValueError('`engine` argument for StyleFrame.ExcelWriter can not be set')
return | pd.ExcelWriter(path, engine='openpyxl', **kwargs) | pandas.ExcelWriter |
import pandas as pd
import pickle
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
import numpy as np
import datetime as dt
from LDA import remove_stopwords, lemmatization, make_bigrams, sent_to_words
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# LOAD CLUSTERING MODEL
with open("data/cluster_model.pkl", "rb") as f:
cluster_model = pickle.load(f)
# LOAD LDA MODEL
lda_model = gensim.models.LdaModel.load('data/LDA/lda.model')
id2word = corpora.Dictionary.load('data/LDA/lda.model.id2word')
def get_interests():
"""
Load the raw interest csv file.
:return: The full interest.csv file in pandas dataframe
"""
interest = pd.read_csv('data/interest.csv')
return(interest)
def get_posts():
"""
Load the raw posts csv file.
:return: The full posts.csv file in pandas dataframe
"""
posts = pd.read_csv('data/posts.csv')
return(posts)
def get_users():
"""
Load the raw users csv file.
:return: The full users.csv file in pandas dataframe
"""
users = pd.read_csv('data/users.csv')
return(users)
def filter_posts(uid,date):
"""
Returns posts that have been filtered to be before a given date and aren't owned by the user
:param uid (str): user-id to filter by
:param date (str): date value to filter by
:return: pandas dataframe filtered of any posts greater than date and not owned by user
"""
posts = get_posts()
posts = posts[posts['uid'] != uid]
posts = posts[posts['post_time'] < date]
return posts
def get_user_data(uid):
"""
Returns the selected user account information
:param uid (str): user-id
:return: single-row pandas dataframe of user account information
"""
users = get_users()
user = users[users['uid'] == uid].reset_index(drop=True)
return user
def get_user_interest(uid):
"""
Returns the selected user interest information
:param uid (str): user-id
:return: single-row pandas dataframe of user interest information
"""
interests = get_interests()
interest = interests[interests['uid'] == uid].reset_index(drop=True)
return interest
def cluster_user(uid):
"""
Returns categorised ID of the selected user from the clustering model
:param uid (str): user-id
:return: single integer value of ID category
"""
# Load needed data for user
users = get_user_data(uid)
interests = get_user_interest(uid)
# Create Age Buckets for clustering
users['date'] = pd.to_datetime(users['dob'], format='%d/%m/%Y', errors='coerce')
users['age'] = dt.datetime.now() - users['date']
users['age'] = (users['age']).dt.days
users['age'] = users['age']/365
users['age_cat'] = np.where(users['age']<20,1,
np.where((users['age']>=20) & (users['age']<25),2,
np.where((users['age']>=25) & (users['age']<30),3,
np.where((users['age']>=30) & (users['age']<35),4,
np.where((users['age']>=35) & (users['age']<40),5,
np.where((users['age']>=40) & (users['age']<45),6,
np.where((users['age']>=45) & (users['age']<50),7,
np.where((users['age']>=50) & (users['age']<55),8,
np.where((users['age']>=55) & (users['age']<60),9,
np.where((users['age']>=60) & (users['age']<65),10,11))))))))))
user_age = users[['uid', 'age_cat']]
user = | pd.merge(users,interests, left_on='uid', right_on='uid', how='left') | pandas.merge |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, hour=23,
minute=59, second=59)
self.assertEqual(ival_W.asfreq('A'), ival_W_to_A)
self.assertEqual(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
self.assertEqual(ival_W.asfreq('Q'), ival_W_to_Q)
self.assertEqual(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
self.assertEqual(ival_W.asfreq('M'), ival_W_to_M)
self.assertEqual(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
self.assertEqual(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
self.assertEqual(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
self.assertEqual(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
self.assertEqual(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
self.assertEqual(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
self.assertEqual(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
self.assertEqual(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
self.assertEqual(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
self.assertEqual(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
self.assertEqual(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
self.assertEqual(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
self.assertEqual(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
self.assertEqual(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
self.assertEqual(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
self.assertEqual(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
self.assertEqual(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
self.assertEqual(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
self.assertEqual(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
self.assertEqual(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
self.assertEqual(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
self.assertEqual(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
self.assertEqual(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
self.assertEqual(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
self.assertEqual(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
self.assertEqual(ival_W.asfreq('W'), ival_W)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
ival_W.asfreq('WK')
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=1)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-SAT', year=2007, month=1, day=6)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-FRI', year=2007, month=1, day=5)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-THU', year=2007, month=1, day=4)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-WED', year=2007, month=1, day=3)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-TUE', year=2007, month=1, day=2)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-MON', year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_B.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B.asfreq('D'), ival_B_to_D)
self.assertEqual(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
self.assertEqual(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
self.assertEqual(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
self.assertEqual(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
self.assertEqual(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
self.assertEqual(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
self.assertEqual(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_D.asfreq('A'), ival_D_to_A)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
self.assertEqual(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
self.assertEqual(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
self.assertEqual(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
self.assertEqual(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
self.assertEqual(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
self.assertEqual(ival_D.asfreq('M'), ival_D_to_M)
self.assertEqual(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
self.assertEqual(ival_D.asfreq('W'), ival_D_to_W)
self.assertEqual(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
self.assertEqual(ival_D_friday.asfreq('B'), ival_B_friday)
self.assertEqual(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
self.assertEqual(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
self.assertEqual(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
self.assertEqual(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
self.assertEqual(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
self.assertEqual(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
self.assertEqual(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
self.assertEqual(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
self.assertEqual(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
self.assertEqual(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
self.assertEqual(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=59, second=59)
self.assertEqual(ival_H.asfreq('A'), ival_H_to_A)
self.assertEqual(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
self.assertEqual(ival_H.asfreq('Q'), ival_H_to_Q)
self.assertEqual(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
self.assertEqual(ival_H.asfreq('M'), ival_H_to_M)
self.assertEqual(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
self.assertEqual(ival_H.asfreq('W'), ival_H_to_W)
self.assertEqual(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
self.assertEqual(ival_H.asfreq('D'), ival_H_to_D)
self.assertEqual(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
self.assertEqual(ival_H.asfreq('B'), ival_H_to_B)
self.assertEqual(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
self.assertEqual(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
self.assertEqual(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
self.assertEqual(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
self.assertEqual(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
self.assertEqual(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=59)
self.assertEqual(ival_T.asfreq('A'), ival_T_to_A)
self.assertEqual(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
self.assertEqual(ival_T.asfreq('Q'), ival_T_to_Q)
self.assertEqual(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
self.assertEqual(ival_T.asfreq('M'), ival_T_to_M)
self.assertEqual(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
self.assertEqual(ival_T.asfreq('W'), ival_T_to_W)
self.assertEqual(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
self.assertEqual(ival_T.asfreq('D'), ival_T_to_D)
self.assertEqual(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
self.assertEqual(ival_T.asfreq('B'), ival_T_to_B)
self.assertEqual(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
self.assertEqual(ival_T.asfreq('H'), ival_T_to_H)
self.assertEqual(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
self.assertEqual(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
self.assertEqual(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
self.assertEqual(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1, hour=0, minute=0,
second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = | Period(freq='W', year=2007, month=1, day=7) | pandas.Period |
import numpy as np
import pandas as pd
import xarray as xr
import typing as tp
NdType = tp.Union[np.ndarray, pd.DataFrame, xr.DataArray, pd.Series]
NdTupleType = tp.Union[
tp.Tuple[NdType],
tp.Tuple[NdType, NdType],
tp.Tuple[NdType, NdType, NdType],
tp.Tuple[NdType, NdType, NdType, NdType],
]
XR_TIME_DIMENSION = "time"
def nd_universal_adapter(d1_function, nd_args: NdTupleType, plain_args: tuple) -> NdType:
if isinstance(nd_args[0], np.ndarray):
return nd_np_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], pd.DataFrame):
return nd_pd_df_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], pd.Series):
return nd_pd_s_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], xr.DataArray):
return nd_xr_da_adapter(d1_function, nd_args, plain_args)
raise Exception("unsupported")
def nd_np_adapter(d1_function, nd_args: tp.Tuple[np.ndarray], plain_args: tuple) -> np.ndarray:
shape = nd_args[0].shape
if len(shape) == 1:
args = nd_args + plain_args
return d1_function(*args)
nd_args_2d = tuple(a.reshape(-1, shape[-1]) for a in nd_args)
result2d = np.empty_like(nd_args_2d[0], )
for i in range(nd_args_2d[0].shape[0]):
slices = tuple(a[i] for a in nd_args_2d)
args = slices + plain_args
result2d[i] = d1_function(*args)
return result2d.reshape(shape)
def nd_pd_df_adapter(d1_function, nd_args: tp.Tuple[pd.DataFrame], plain_args: tuple) -> pd.DataFrame:
np_nd_args = tuple(a.to_numpy().transpose() for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.DataFrame(np_result, columns=nd_args[0].columns, index=nd_args[0].index)
def nd_pd_s_adapter(d1_function, nd_args: tp.Tuple[pd.Series], plain_args: tuple) -> pd.Series:
np_nd_args = tuple(a.to_numpy() for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.Series(np_result, nd_args[0].index)
def nd_xr_da_adapter(d1_function, nd_args: tp.Tuple[xr.DataArray], plain_args: tuple) -> xr.DataArray:
origin_dims = nd_args[0].dims
transpose_dims = tuple(i for i in origin_dims if i != XR_TIME_DIMENSION) + (XR_TIME_DIMENSION,)
np_nd_args = tuple(a.transpose(*transpose_dims).values for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
return xr.DataArray(np_result, dims=transpose_dims, coords=nd_args[0].coords).transpose(*origin_dims)
def nd_to_1d_universal_adapter(np_function, nd_args: NdTupleType, plain_args: tuple) -> NdType:
if isinstance(nd_args[0], np.ndarray):
return nd_to_1d_np_adapter(nd_args, plain_args)
if isinstance(nd_args[0], pd.DataFrame):
return nd_to_1d_pd_df_adapter(np_function, nd_args, plain_args)
if isinstance(nd_args[0], xr.DataArray):
return nd_to_1d_xr_da_adapter(np_function, nd_args, plain_args)
raise Exception("unsupported")
def nd_to_1d_np_adapter(np_function, nd_args: tp.Tuple[np.ndarray], plain_args: tuple) -> np.ndarray:
args = nd_args + plain_args
return np_function(*args)
def nd_to_1d_pd_df_adapter(np_function, nd_args: tp.Tuple[pd.DataFrame], plain_args: tuple) -> pd.Series:
np_nd_args = tuple(a.to_numpy().transpose() for a in nd_args)
np_result = nd_to_1d_np_adapter(np_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return | pd.Series(np_result, index=nd_args[0].index) | pandas.Series |
"""Module containg test of chela modules"""
import sys
sys.path.append('chela/')
import chela
import pandas as pd
import numpy as np
import pytest
@pytest.mark.parametrize('basic_check_formula',
[
'',
'1',
'123H',
'Al9o2',
'3O',
'?Ge',
'Mn!',
'O#F',
])
class TestBasicFormula:
"""Tests for basic check formula as function and as method.
Test if the check_formula detect void formulas,incorrect characters,formulas starting with numbers, only numbers.
"""
def test_basic_check_formula(self,basic_check_formula):
with pytest.raises(ValueError):
chela.basic_check_formula(basic_check_formula)
def test_first_part_check_formula(self,basic_check_formula):
with pytest.raises(ValueError):
chela.check_formula(basic_check_formula)
def test_pandas_ext_check_formula(self,basic_check_formula):
with pytest.raises(ValueError):
pd.DataFrame().chela.check_formula(basic_check_formula)
@pytest.mark.parametrize('basic_check_formula',
[
'H',
'Al9',
'OH',
'Ge',
'Mn91Al1',
'OFH',
'Mn42.3Al63.1Fe21.0'
])
class TestBasicFormula:
"""Tests for basic check formula as function and as method.
Test if the check_formula detect void formulas,incorrect characters,formulas starting with numbers, only numbers.
"""
def test_basic_check_formula(self,basic_check_formula):
assert not chela.basic_check_formula(basic_check_formula)
def test_first_part_check_formula(self,basic_check_formula):
assert not chela.check_formula(basic_check_formula)
def test_pandas_ext_check_formula(self,basic_check_formula):
assert not pd.DataFrame().chela.check_formula(basic_check_formula)
@pytest.mark.parametrize('advanced_check_formula',
[
'H0',
'H2O0',
'Xu',
'Yoyo2',
'HH',
'HOFO',
'N2H6N2',
])
class TestAdvancedFormula:
"""Tests for advanced check formula as function and as method.
Test if the check_formula detect 0 quantity, inexistent atomic symbols, repeated elements.
"""
def test_advanced_check_formula(self,advanced_check_formula):
with pytest.raises(ValueError):
chela.advanced_check_formula(advanced_check_formula)
def test_second_part_check_formula(self,advanced_check_formula):
with pytest.raises(ValueError):
chela.check_formula(advanced_check_formula)
def test_pandas_ext_check_formula(self,advanced_check_formula):
with pytest.raises(ValueError):
pd.DataFrame().chela.check_formula(advanced_check_formula)
@pytest.mark.parametrize('advanced_check_formula',
[
'H',
'Al9',
'OH',
'Ge',
'Mn91Al1',
'OFH',
'Mn42.3Al63.1Fe21.0'
])
class TestAdvancedFormula:
"""Tests for advanced check formula as function and as method.
Test if the check_formula detect 0 quantity, inexistent atomic symbols, repeated elements.
"""
def test_advanced_check_formula(self,advanced_check_formula):
assert not chela.advanced_check_formula(advanced_check_formula)
def test_second_part_check_formula(self,advanced_check_formula):
assert not chela.check_formula(advanced_check_formula)
def test_pandas_ext_check_formula(self,advanced_check_formula):
assert not pd.DataFrame().chela.check_formula(advanced_check_formula)
@pytest.mark.parametrize('string_formula,dict_formula',
[
('H',{'H':1}),
('H2O',{'H':2,'O':1}),
('OH',{'H':1,'O':1}),
('NH3',{'N':1,'H':3}),
('Al2O3',{'Al':2,'O':3}),
('CaCO3',{'Ca':1,'C':1,'O':3}),
('Na2CO3',{'Na':2,'C':1,'O':3}),
('Al63.0Fe20.1Mn10.2',{'Al':63.0,'Fe':20.1,'Mn':10.2}),
])
class TestStringoToDict:
"""Test the correctness of the conversion from string to dictionary"""
def test_from_string_to_dict(self,string_formula,dict_formula):
assert chela.from_string_to_dict(string_formula) == dict_formula
def test_pandas_ext_from_string_to_dict(self,string_formula,dict_formula):
assert | pd.DataFrame() | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
import re
import string
@pd.api.extensions.register_dataframe_accessor('zookeeper')
class ZooKeeper:
def __init__(self, pandas_obj):
# validate and assign object
self._validate(pandas_obj)
self._obj = pandas_obj
# define incorporated modules - columns consisting of others will not have the dtype changed
self._INCORPORATED_MODULES = ['builtins', 'numpy', 'pandas']
# define a possible list of null values
self._NULL_VALS = [None, np.nan, 'np.nan', 'nan', np.inf, 'np.inf', 'inf', -np.inf, '-np.inf', '', 'n/a', 'na',
'N/A', 'NA', 'unknown', 'unk', 'UNKNOWN', 'UNK']
# assign dtypes and limits
# boolean
BOOL_STRINGS_TRUE = ['t', 'true', 'yes', 'on']
BOOL_STRINGS_FALSE = ['f', 'false', 'no', 'off']
self._BOOL_MAP_DICT = {i: True for i in BOOL_STRINGS_TRUE}.update({i: False for i in BOOL_STRINGS_FALSE})
self._DTYPE_BOOL_BASE = np.bool
self._DTYPE_BOOL_NULLABLE = pd.BooleanDtype()
# unsigned integers - base and nullable
self._DTYPES_UINT_BASE = [np.uint8, np.uint16, np.uint32, np.uint64]
self._DTYPES_UINT_NULLABLE = [pd.UInt8Dtype(), pd.UInt16Dtype(), pd.UInt32Dtype(), pd.UInt64Dtype()]
self._LIMIT_LOW_UINT = [np.iinfo(i).min for i in self._DTYPES_UINT_BASE]
self._LIMIT_HIGH_UINT = [np.iinfo(i).max for i in self._DTYPES_UINT_BASE]
# signed integers - base and nullable
self._DTYPES_INT_BASE = [np.int8, np.int16, np.int32, np.int64]
self._DTYPES_INT_NULLABLE = [pd.Int8Dtype(), pd.Int16Dtype(), pd.Int32Dtype(), pd.Int64Dtype()]
self._LIMIT_LOW_INT = [np.iinfo(i).min for i in self._DTYPES_INT_BASE]
self._LIMIT_HIGH_INT = [np.iinfo(i).max for i in self._DTYPES_INT_BASE]
# floats - nullable by default
self._DTYPES_FLOAT = [np.float16, np.float32, np.float64]
# datetime - nullable by default
self._DTYPE_DATETIME = np.datetime64
# string
self._DTYPE_STRING = pd.StringDtype()
# categorical - nullable by default
self._DTYPE_CATEGORICAL = pd.CategoricalDtype()
@staticmethod
def _validate(obj):
# any necessary validations here (raise AttributeErrors, etc)
# todo check isinstance(df, pd.DataFrame) and/or df.empty?
pass
# todo add other methods
"""
automate data profiling
- pandas_profiling
- missingo
- any others?
unit handling
- column unit attributes
- unit conversion
- column descriptions
automate machine learning pre-processing
- imputation
- scaling
- encoding
"""
def simplify_columns(self):
# todo add any other needed simplifications
# get columns
cols = self._obj.columns.astype('str')
# replace punctuation and whitespace with underscore
chars = re.escape(string.punctuation)
cols = [re.sub(r'[' + chars + ']', '_', col) for col in cols]
cols = ['_'.join(col.split('\n')) for col in cols]
cols = [re.sub('\s+', '_', col) for col in cols]
# drop multiple underscores to a single one
cols = [re.sub('_+', '_', col) for col in cols]
# remove trailing or leading underscores
cols = [col[1:] if col[0] == '_' else col for col in cols]
cols = [col[:-1] if col[-1] == '_' else col for col in cols]
# convert to lower case
cols = [col.lower() for col in cols]
# reassign column names
self._obj.columns = cols
def _minimize_memory_col_int(self, col):
# get range of values
val_min = self._obj[col].min()
val_max = self._obj[col].max()
# check whether signed or unsigned
bool_signed = val_min < 0
# check for null values
bool_null = np.any(pd.isna(self._obj[col]))
# get conversion lists
if bool_signed:
val_bins_lower = self._LIMIT_LOW_INT
val_bins_upper = self._LIMIT_HIGH_INT
if bool_null:
val_dtypes = self._DTYPES_INT_NULLABLE
else:
val_dtypes = self._DTYPES_INT_BASE
else:
val_bins_lower = self._LIMIT_LOW_UINT
val_bins_upper = self._LIMIT_HIGH_UINT
if bool_null:
val_dtypes = self._DTYPES_UINT_NULLABLE
else:
val_dtypes = self._DTYPES_UINT_BASE
# apply conversions
idx = max(np.where(np.array(val_bins_lower) <= val_min)[0][0],
np.where(np.array(val_bins_upper) >= val_max)[0][0])
self._obj[col] = self._obj[col].astype(val_dtypes[idx])
def _minimize_memory_col_float(self, col, tol):
if np.sum(self._obj[col] - self._obj[col].apply(lambda x: round(x, 0))) == 0:
# check if they are actually integers (no decimal values)
self._minimize_memory_col_int(col)
else:
# find the smallest float dtype that has an error less than the tolerance
for i_dtype in self._DTYPES_FLOAT:
if np.abs(self._obj[col] - self._obj[col].astype(i_dtype)).max() <= tol:
self._obj[col] = self._obj[col].astype(i_dtype)
break
def reduce_memory_usage(self, tol_float=1E-6, category_fraction=0.5, drop_null_cols=True, drop_null_rows=True,
reset_index=False, print_reduction=False, print_warnings=True):
# get the starting memory usage - optional because it can add significant overhead to run time
if print_reduction:
mem_start = self._obj.memory_usage(deep=True).values.sum()
# null value handling
# apply conversions for null values
self._obj.replace(self._NULL_VALS, pd.NA, inplace=True)
# drop null columns and rows
if drop_null_cols:
self._obj.dropna(axis=1, how='all', inplace=True)
if drop_null_rows:
self._obj.dropna(axis=0, how='all', inplace=True)
# replace boolean-like strings with booleans
self._obj.replace(self._BOOL_MAP_DICT, inplace=True)
# loop by column to predict value
for i_col, i_dtype in self._obj.dtypes.to_dict().items():
# skip if column is ful of nulls and wasn't dropped
if not drop_null_cols:
if np.all(pd.isna(self._obj[i_col])):
continue
# get non-null values and the unique modules
vals_not_null = self._obj.loc[pd.notna(self._obj[i_col]), i_col].values
modules = np.unique([type(val).__module__.split('.')[0] for val in vals_not_null])
# skip if col contains non-supported modules
if np.any([val not in self._INCORPORATED_MODULES for val in modules]):
continue
# check if any null values are present
null_vals_present = np.any(pd.isna(self._obj[i_col]))
# check and assign dtypes
# todo add option to coerce small number of values and still proceed with dtype application
if pd.isna(pd.to_numeric(vals_not_null, errors='coerce')).sum() == 0:
# numeric dtype
self._obj[i_col] = pd.to_numeric(self._obj[i_col], errors='coerce')
vals_not_null = self._obj.loc[pd.notna(self._obj[i_col]), i_col].values
# check if bool, int, or float
if np.all(np.logical_or(vals_not_null == 0, vals_not_null == 1)):
# boolean
if null_vals_present:
self._obj[i_col] = self._obj[i_col].astype(self._DTYPE_BOOL_NULLABLE)
else:
self._obj[i_col] = self._obj[i_col].astype(self._DTYPE_BOOL_BASE)
else:
# apply float, will use int if possible
self._minimize_memory_col_float(i_col, tol_float)
elif pd.isna(pd.to_datetime(vals_not_null, errors='coerce')).sum() == 0:
# datetime
# todo add option to split datetime into year col, month col, and day col
self._obj[i_col] = pd.to_datetime(self._obj[i_col], errors='coerce')
else:
# get types
val_types = np.unique([str(val.__class__).split("'")[1] for val in vals_not_null])
# check if there are any non-string iterables
bool_iters = np.any([False if (str(val.__class__).split("'")[1] == 'str') else
(True if hasattr(val, '__iter__') else False) for val in vals_not_null])
# check if any are strings
if 'str' in val_types and ((len(val_types) == 1) or not bool_iters):
# convert to strings
if len(val_types) != 1:
self._obj.loc[pd.notna(self._obj[i_col]), i_col] = self._obj.loc[
| pd.notna(self._obj[i_col]) | pandas.notna |
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from MergeDataFrameToTable import MergeDFToTable
import numpy as np
import pandas as pd
from sklearn import cluster
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
"""
outbound_data
"""
#----------------------------
inb_line = spark.sql("""select *
from dsc_dws.dws_dsc_wh_ou_daily_kpi_sum
where operation_day between '""" + start_date + """' and '""" + end_date + """'
and ou_code = 'CN-298' """)
inb_line.show(10,False)
june_hpwh = inb_line.toPandas()
june_hpwh = june_hpwh.dropna(axis = 1, inplace = True, how = 'all')
# june_hpwh.columns = june_hpwh.columns.to_series().str.slice(32).values
#----------------------------
df_shipped_qty = june_hpwh.groupby(['inc_day'])['shipped_qty'].sum()
alg1 = cluster.MiniBatchKMeans(n_clusters = 3, random_state = 707)
hist1 = alg1.fit(df_shipped_qty.to_numpy().reshape(-1,1))
df_shipped_qty = pd.DataFrame(df_shipped_qty).reset_index()
df_shipped_qty['cluster_centers'] = alg1.labels_
cl_1 = pd.concat([pd.DataFrame(hist1.cluster_centers_), pd.Series(np.arange(0,3))], axis = 1)
cl_1.columns = ['cluster_values', 'cluster_centers']
df_shipped_qty = df_shipped_qty.merge(cl_1, on = 'cluster_centers', how = 'inner')
#----------------------------
"""
inbound_data
"""
oub_line = spark.sql("""select *
from dsc_dwd.dwd_wh_dsc_inbound_line_dtl_di
where inc_day between '20210601' and '20210720'
and src = 'scale'
and wms_warehouse_id = 'HPI_WH'""")
june_hpwh2 = oub_line.toPandas()
june_hpwh2 = june_hpwh2.dropna(axis = 1, inplace = True, how = 'all')
june_hpwh2.columns = june_hpwh2.columns.to_series().str.slice(31).values
rec_qty = (june_hpwh2.groupby(['inc_day'])['receive_qty'].sum()).to_numpy()
hist2 = alg1.fit(rec_qty.reshape(-1,1))
cl_2 = pd.concat([pd.DataFrame(hist2.cluster_centers_), pd.Series(np.arange(0,3))], axis = 1)
cl_2.columns = ['cluster_values', 'cluster_centers']
df_receive_qty = pd.concat([pd.DataFrame(june_hpwh2.groupby(['inc_day'])['receive_qty'].sum()).reset_index(),
pd.Series(hist2.labels_)], axis = 1, ignore_index = True)
df_receive_qty.columns = ['inc_day', 'inbound_ttl_qty', 'cluster_centers']
df_receive_qty = df_receive_qty.merge(cl_2, on = 'cluster_centers', how = 'inner')
#----------------------------
"""
hr_data
# select * from
# dsc_dwd.dwd_hr_dsc_working_hour_dtl_di
# where inc_day between '20210601' and '20210720'
# and ou_code = 'CN-298'
"""
# hr data
hr = spark.sql("""select *
from dsc_dwd.dwd_hr_dsc_working_hour_dtl_di
where inc_day between '20210601' and '20210720'
and ou_code = 'CN-298'""")
june_hr = hr.toPandas()
june_hr = june_hr.dropna(axis = 1, inplace = True, how = 'all')
# june_hr.columns = june_hr.columns.to_series().str.slice(31).values
june_hr['working_date'] = june_hr['working_date'].str.slice(0, -9).values
june_hr_hc = june_hr[june_hr['working_hours'] != 0].groupby(['working_date'])['emp_name'].count()
june_hr_new = june_hr.groupby(['working_date'])['working_hours'].sum()
# weekend adjustment.
# june_hr_new = june_hr_new[june_hr.groupby(['working_date'])['working_hours'].sum() != 0 ]
june_hr_hc = pd.DataFrame(june_hr_hc).reset_index()
june_hr_hc = june_hr_hc[june_hr_hc['working_date']]
june_hr_new = pd.DataFrame(june_hr_new).reset_index()
june_hr_new = june_hr_new[june_hr_new['working_date']]
# alg1 = KMeans(n_clusters = 3)
hist3 = alg1.fit(june_hr_new['working_hours'].to_numpy().reshape(-1,1))
hr_cluster_centers = pd.DataFrame(hist3.cluster_centers_)
hr_cluster_centers['cc_hr'] = [0,1,2]
june_hr_new['cc_hr'] = hist3.labels_
june_hr_ttl = june_hr_new.merge(june_hr_hc, on = 'working_date', how = 'left').fillna(0)
june_hr_ttl['working_date'] = [int(i.replace('-', '')) for i in pd.to_datetime(june_hr_ttl['working_date']).astype(str)]
# values = pd.concat([june_hr_ttl.groupby(['cc_hr'])['working_hours'].std(),
# june_hr_ttl.groupby(['cc_hr'])['working_hours'].median(),
# june_hr_ttl.groupby(['cc_hr'])['working_hours'].mean()], axis = 1).reset_index()
# values.columns = ['cc_hr','std_cc_hr', 'median_cc_hr', 'mean_cc_hr']
# june_hr_ttl = june_hr_ttl.merge(values, on = 'cc_hr', how = 'left')
june_hr_ttl = june_hr_ttl.merge(hr_cluster_centers, on = 'cc_hr', how = 'left')
"""
concat
"""
full = june_hr_ttl.merge(df_receive_qty, left_on = 'working_date', right_on = 'inc_day', how = 'left').fillna(0)
full = full.merge(df_shipped_qty, left_on = 'working_date', right_on = 'inc_day', how = 'left',suffixes=('_x', '_y')).fillna(0)
full = full.drop(['inc_day_x', 'inc_day_y'], axis = 1)
full = | pd.DataFrame(full) | pandas.DataFrame |
"""Create figure 7 for paper."""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import matplotlib.ticker as ticker
# Import global path and file variables
from settings import *
# read result of the same metric
# select the result on the two rows which represent the identified fixing group
path = MORRIS_DATA_DIR
f_default = np.append([0, 0.1, 0.4, 0.5], np.linspace(0.2, 0.3, 11))
f_default.sort()
f_default = [str(round(i, 2)) for i in f_default]
f_default[0] = '0.0'
names = ['mae', 'var', 'ppmc', 'mae_upper', 'var_upper',
'ppmc_upper', 'mae_lower', 'var_lower', 'ppmc_lower']
df = {}
for fn in names:
df[fn] = pd.DataFrame(columns=['group1', 'group2'], index=f_default)
for val in f_default:
f_read = pd.read_csv('{}{}{}{}{}'.format(path, val, '/', fn, '.csv'))
df[fn].loc[val, 'group1'] = f_read.loc[15, 'result_90']
df[fn].loc[val, 'group2'] = f_read.loc[12, 'result_90']
# transform df from dict into dataframe with multiple columns
df = | pd.concat(df, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Division By Zero
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
strict=True)
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1),
pd.Timedelta(days=2)])
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_df_radd_str_invalid(self, dtype, data):
df = pd.DataFrame(data, dtype=dtype)
with pytest.raises(TypeError):
'foo_' + df
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_int(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([2, 3, 4], dtype=dtype)
result = 1 + df
tm.assert_frame_equal(result, expected)
result = df + 1
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_nan(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
result = np.nan + df
tm.assert_frame_equal(result, expected)
result = df + np.nan
tm.assert_frame_equal(result, expected)
def test_df_radd_str(self):
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
class TestPeriodFrameArithmetic(object):
def test_ops_frame_period(self):
# GH 13043
df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'),
pd.Period('2015-02', freq='M')],
'B': [pd.Period('2014-01', freq='M'),
pd.Period('2014-02', freq='M')]})
assert df['A'].dtype == object
assert df['B'].dtype == object
p = pd.Period('2015-03', freq='M')
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame({'A': np.array([2 * off, 1 * off], dtype=object),
'B': np.array([14 * off, 13 * off], dtype=object)})
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'),
pd.Period('2015-06', freq='M')],
'B': [pd.Period('2015-05', freq='M'),
| pd.Period('2015-06', freq='M') | pandas.Period |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified).
__all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc',
'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran',
'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone',
'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked']
# Cell
#File: racdiv.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B02001 - Race
# Universe: Total Population
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def racdiv( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df_hisp = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
df_hisp = df_hisp.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df_hisp = df_hisp.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100
df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100
df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100
# =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2))
df1['Diversity_index'] = ( 1- (
( df1['African-American%'] /100 )**2
+( df1['White%'] /100 )**2
+( df1['American Indian%'] /100 )**2
+( df1['Asian%'] /100 )**2
+( df1['Native Hawaii/Pac Islander%'] /100 )**2
)*(
( df1['Hisp %'] /100 )**2
+(1-( df1['Hisp %'] /100) )**2
) ) * 100
return df1['Diversity_index']
# Cell
#File: pasi.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def pasi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100
return df1['Asian%NH']
# Cell
#File: elheat.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def elheat( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_004E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <elheat_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_004E','B25040_001E'])
)
update vital_signs.data
set elheat = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: empl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Employed Indicator
#input: Year
#output:
import pandas as pd
import glob
def empl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64
#/
#nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <empl_14> */ --
WITH tbl AS (
select csa,
( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY[ 'B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_007E','B23001_014E','B23001_021E','B23001_028E','B23001_035E','B23001_042E','B23001_049E','B23001_056E','B23001_063E','B23001_070E','B23001_093E','B23001_100E','B23001_107E','B23001_114E','B23001_121E','B23001_128E','B23001_135E','B23001_142E','B23001_149E','B23001_156E'])
)
update vital_signs.data
set empl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: fam.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def fam( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Delete Unassigned--Jail
df = df[df.index != 'Unassigned--Jail']
# Move Baltimore to Bottom
bc = df.loc[ 'Baltimore City' ]
df = df.drop( df.index[1] )
df.loc[ 'Baltimore City' ] = bc
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
return df1['18Under']
# Cell
#File: female.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def female( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheLadies'] = df[ 'B01001_026E_Total_Female' ]
return df1['onlyTheLadies']
# Cell
#File: femhhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: male, hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def femhhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
str19 = rootStr + ',_no_spouse_present'
femhh = str17 if year == '17' else str19 if year == '19' else str16
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
df1['FemaleHH'] = df[ femhh ] / df['B11005_002E_Total_Households_with_one_or_more_people_under_18_years'] * 100
df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households']
df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households']
df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18']
return df1['FemaleHH']
# Cell
#File: heatgas.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def heatgas( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_002E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <heatgas_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_002E','B25040_001E'])
)
update vital_signs.data
set heatgas = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hh40inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 25K-40K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh40inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh40inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_006E','B19001_007E','B19001_008E','B19001_001E'])
)
UPDATE vital_signs.data
set hh40inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh60inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household 45-60K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh60inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh60inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_009E','B19001_010E','B19001_011E','B19001_001E'])
)
UPDATE vital_signs.data
set hh60inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh75inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 60-70K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh75inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
#12/1
return fi.apply(lambda x: ( x[fi.columns[1] ] / x[fi.columns[0]])*100, axis=1)
"""
/* hh75inc */ --
WITH tbl AS (
select csa,
( value[1] / value[2] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_012E','B19001_001E'])
)
UPDATE vital_signs.data
set hh75inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hhchpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17001 - POVERTY STATUS IN THE PAST 12 MONTHS BY SEX BY AGE
# Universe: Population for whom poverty status is determined more information
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhchpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
denominators = addKey(df, denominators, col)
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] #Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S1701_C03_002E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
"""
/* <hhchpov_14> */
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12])
/ nullif(
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15] + value[16] + value[17] + value[18] + value[19] + value[20] + value[21] + value[22] + value[23] + value[24] ),
0)
) * 100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B17001_004E','B17001_005E','B17001_006E','B17001_007E','B17001_008E','B17001_009E','B17001_018E','B17001_019E','B17001_020E','B17001_021E','B17001_022E','B17001_023E','B17001_033E','B17001_034E','B17001_035E','B17001_036E','B17001_037E','B17001_038E','B17001_047E','B17001_048E','B17001_049E','B17001_050E','B17001_051E','B17001_052E'])
)
update vital_signs.data
set hhchpov = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hhm75.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income Over 75K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhm75( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 002
key = getColName(df, '002')
val = getColByName(df, '002')
fi[key] = val
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 004
key = getColName(df, '004')
val = getColByName(df, '004')
fi[key] = val
# append into that dataframe col 005
key = getColName(df, '005')
val = getColByName(df, '005')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[0]]-( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ]+ x[fi.columns[4] ]+ x[fi.columns[5] ]+ x[fi.columns[6] ]+ x[fi.columns[7] ]+ x[fi.columns[8] ]+ x[fi.columns[9] ]+ x[fi.columns[10] ]+ x[fi.columns[11] ] ) ) / x[fi.columns[0]])*100, axis=1)
# Cell
#File: hhpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17017 - Household Poverty, Uses Table B17017 which includes V
# Poverty Status in the Past 12 Months by Household Type by Age of Householder (Universe = households)
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17017*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 032
key = getColName(df, '032')
val = getColByName(df, '032')
fi[key] = val
# construct the denominator, returns 0 iff the other two rows are equal.
fi['denominator'] = nullIfEqual( df, '003', '032')
# Delete Rows where the 'denominator' column is 0
fi = fi[fi['denominator'] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: (x[fi.columns[0]] / x['denominator'])*100, axis=1)
# Cell
#File: hhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def hhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['tot'] = df[ 'B11005_001E_Total' ]
return df1['tot']
# Cell
#File: hsdipl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With High School Diploma and Some College or Associates Degree
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def hsdipl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_003E','B06009_004E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_003E','B06009_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( ( value[1] + value[2] ) / nullif(value[3],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <hsdipl_14> */ --
WITH tbl AS (
select csa,
( ( value[1] + value[2] ) / nullif(value[3],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_003E','B06009_004E','B06009_001E'])
)
update vital_signs.data
set hsdipl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: lesshs.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With Less Than a High School Diploma or GED Indicator
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def lesshs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_002E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <lesshs_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_002E','B06009_001E'])
)
update vital_signs.data
set lesshs = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: male.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def male( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheFellas'] = df[ 'B01001_002E_Total_Male' ]
return df1['onlyTheFellas']
# Cell
#File: nilf.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Not in Labor Force Indicator
#input: Year
#output:
import pandas as pd
import glob
def nilf( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64
# /
# nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <nilf_14> */ --
WITH tbl AS (
select csa,
( (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014', ARRAY['B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_009E','B23001_016E','B23001_023E','B23001_030E','B23001_037E','B23001_044E','B23001_051E','B23001_058E','B23001_065E','B23001_072E','B23001_095E','B23001_102E','B23001_109E','B23001_116E','B23001_123E','B23001_130E','B23001_137E','B23001_144E','B23001_151E','B23001_158E'])
)
update vital_signs.data
set nilf = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: othrcom.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 years and over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population Using Other Means to Commute to Work (Taxi, Motorcycle, Bicycle, Other) Indicator
#input: Year
#output:
import pandas as pd
import glob
def othrcom( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_041E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_041E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[3] / nullif((value[1]-value[2]),0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
# 100- "6.7", "59.8", "9.2", "18.4", "3.7", = 2.2
# 100- (walked + drvalone + carpool + pubtran + workfromhome(13e))
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_010E,S0801_C01_003E,S0801_C01_004E,S0801_C01_009E,S0801_C01_013E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
walked = float(table.loc[1, table.columns[1]] )
drvalone = float(table.loc[1, table.columns[2]] )
carpool = float(table.loc[1, table.columns[3]] )
pubtran = float(table.loc[1, table.columns[4]] )
workfromhome = float(table.loc[1, table.columns[5]] )
fi['final']['Baltimore City'] = 100 - ( walked + drvalone + carpool + pubtran + workfromhome )
return fi['final']
"""
/* <othrcom_14> */ --
WITH tbl AS (
select csa,
( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_041E'])
)
update vital_signs.data
set othrcom = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: p2more.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def p2more( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['TwoOrMore%NH'] = df['B03002_009E_Total_Not_Hispanic_or_Latino_Two_or_more_races'] / tot * 100
return df1['TwoOrMore%NH']
# Cell
#File: pubtran.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 Years and Over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population that Uses Public Transportation to Get to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def pubtran( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_025E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_025E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[3] / nullif((value[1]-value[2]),0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_009E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
""" /* <pubtran_14> */ --
WITH tbl AS (
select csa,
( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_025E'])
)
update vital_signs.data
set pubtran = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age5( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Under 5
df1['under_5'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / total * 100
return df1['under_5']
# Cell
#File: age24.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age24( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['eighteen_to_24'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / total * 100
return df1['eighteen_to_24']
# Cell
#File: age64.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age64( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['twentyfive_to_64'] = ( df[ 'B01001_011E_Total_Male_25_to_29_years' ]
+ df[ 'B01001_012E_Total_Male_30_to_34_years' ]
+ df[ 'B01001_013E_Total_Male_35_to_39_years' ]
+ df[ 'B01001_014E_Total_Male_40_to_44_years' ]
+ df[ 'B01001_015E_Total_Male_45_to_49_years' ]
+ df[ 'B01001_016E_Total_Male_50_to_54_years' ]
+ df[ 'B01001_017E_Total_Male_55_to_59_years' ]
+ df[ 'B01001_018E_Total_Male_60_and_61_years' ]
+ df[ 'B01001_019E_Total_Male_62_to_64_years' ]
+ df[ 'B01001_035E_Total_Female_25_to_29_years' ]
+ df[ 'B01001_036E_Total_Female_30_to_34_years' ]
+ df[ 'B01001_037E_Total_Female_35_to_39_years' ]
+ df[ 'B01001_038E_Total_Female_40_to_44_years' ]
+ df[ 'B01001_039E_Total_Female_45_to_49_years' ]
+ df[ 'B01001_040E_Total_Female_50_to_54_years' ]
+ df[ 'B01001_041E_Total_Female_55_to_59_years' ]
+ df[ 'B01001_042E_Total_Female_60_and_61_years' ]
+ df[ 'B01001_043E_Total_Female_62_to_64_years' ]
) / total * 100
return df1['twentyfive_to_64']
# Cell
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age18( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['five_to_17'] = ( df[ 'B01001_004E_Total_Male_5_to_9_years' ]
+ df[ 'B01001_005E_Total_Male_10_to_14_years' ]
+ df[ 'B01001_006E_Total_Male_15_to_17_years' ]
+ df[ 'B01001_028E_Total_Female_5_to_9_years' ]
+ df[ 'B01001_029E_Total_Female_10_to_14_years' ]
+ df[ 'B01001_030E_Total_Female_15_to_17_years' ]
) / total * 100
return df1['five_to_17']
# Cell
#File: age65.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age65( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['sixtyfive_and_up'] = ( df[ 'B01001_020E_Total_Male_65_and_66_years' ]
+ df[ 'B01001_021E_Total_Male_67_to_69_years' ]
+ df[ 'B01001_022E_Total_Male_70_to_74_years' ]
+ df[ 'B01001_023E_Total_Male_75_to_79_years' ]
+ df[ 'B01001_024E_Total_Male_80_to_84_years' ]
+ df[ 'B01001_025E_Total_Male_85_years_and_over' ]
+ df[ 'B01001_044E_Total_Female_65_and_66_years' ]
+ df[ 'B01001_045E_Total_Female_67_to_69_years' ]
+ df[ 'B01001_046E_Total_Female_70_to_74_years' ]
+ df[ 'B01001_047E_Total_Female_75_to_79_years' ]
+ df[ 'B01001_048E_Total_Female_80_to_84_years' ]
+ df[ 'B01001_049E_Total_Female_85_years_and_over' ]
) / total * 100
return df1['sixtyfive_and_up']
# Cell
#File: affordm.py
#Author: <NAME>
#Date: 1/25/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25091 - MORTGAGE STATUS BY SELECTED MONTHLY OWNER COSTS AS A PERCENTAGE OF HOUSEHOLD INCOME IN THE PAST 12 MONTHS
# Universe: Owner-occupied housing units
# Table Creates:
#purpose: Produce Housing and Community Development - Affordability Index - Mortgage Indicator
#input: Year
#output:
import pandas as pd
import glob
def affordm( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25091*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25091_008E','B25091_009E','B25091_010E','B25091_011E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25091_002E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
WITH tbl AS (
select csa,
( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E'])
)
update vital_signs.data
set affordm = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: affordr.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25070 - GROSS RENT AS A PERCENTAGE OF HOUSEHOLD INCOME IN THE PAST 12 MONTHS
# Universe: Renter-occupied housing units
#purpose: Produce Housing and Community Development - Affordability Index - Rent Indicator
#input: Year
#output:
import pandas as pd
import glob
def affordr( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25070*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25070_007E','B25070_008E','B25070_009E','B25070_010E','B25070_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25070_007E','B25070_008E','B25070_009E','B25070_010E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25070_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
WITH tbl AS (
select csa,
( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25070_007E','B25070_008E','B25070_009E','B25070_010E','B25070_001E'])
)
update vital_signs.data
set affordr = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: bahigher.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) with a Bachelor's Degree or Above
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def bahigher( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_005E','B06009_006E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_005E','B06009_006E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( ( value[1] + value[2] ) / nullif(value[3],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <hsdipl_14> */ --
WITH tbl AS (
select csa,
( ( value[1] + value[2] ) / nullif(value[3],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_003E','B06009_004E','B06009_001E'])
)
update vital_signs.data
set hsdipl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
B06009_004E
label "Estimate!!Total!!Some college or associate's degree"
B06009_003E
label "Estimate!!Total!!High school graduate (includes equivalency)"
B06009_002E
label "Estimate!!Total!!Less than high school graduate"
B06009_001E
label "Estimate!!Total"
B06009_005E
label "Estimate!!Total!!Bachelor's degree"
B06009_006E
label "Estimate!!Total!!Graduate or professional degree"
"""
# Cell
#File: carpool.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 Years and Over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population that Carpool to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def carpool( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_017E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_017E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[3] / (value[1]-value[2]) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_004E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
"""
WITH tbl AS (
select csa,
( value[3] / nullif( (value[1]-value[2]) ,0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B08101_001E','B08101_049E','B08101_017E'])
)
update vital_signs.data
set carpool = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2013';
"""
# Cell
#File: drvalone.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 Years and Over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population that Drove Alone to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def drvalone( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_009E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_009E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[3] / nullif((value[1]-value[2]),0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_003E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
"""
/* <drvalone_13> */ --
WITH tbl AS (
select csa,
( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B08101_001E','B08101_049E','B08101_009E'])
)
update vital_signs.data
set drvalone = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2013';
"""
# Cell
#File: hh25inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income Under 25K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh25inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 002
key = getColName(df, '002')
val = getColByName(df, '002')
fi[key] = val
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 004
key = getColName(df, '004')
val = getColByName(df, '004')
fi[key] = val
# append into that dataframe col 005
key = getColName(df, '005')
val = getColByName(df, '005')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ]+ x[fi.columns[4] ] ) / x[fi.columns[0]])*100, axis=1)
# Cell
#File: mhhi.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2016 INFLATION-ADJUSTED DOLLARS)
# Universe: Households
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Sustainability - Percent of Population that Walks to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def mhhi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
info = pd.DataFrame(
[
['B19001_002E', 0, 10000],
['B19001_003E', 10000, 4999 ],
['B19001_004E', 15000, 4999 ],
['B19001_005E', 20000, 4999 ],
['B19001_006E', 25000, 4999 ],
['B19001_007E', 30000, 4999],
['B19001_008E', 35000, 4999 ],
['B19001_009E', 40000, 4999 ],
['B19001_010E', 45000, 4999 ],
['B19001_011E', 50000, 9999 ],
['B19001_012E', 60000, 14999],
['B19001_013E', 75000, 24999 ],
['B19001_014E', 100000, 24999 ],
['B19001_015E', 125000, 24999 ],
['B19001_016E', 150000, 49000 ],
['B19001_017E', 200000, 1000000000000000000000000 ],
],
columns=['variable', 'lower', 'range']
)
# Final Dataframe
data_table = pd.DataFrame()
for index, row in info.iterrows():
#print(row['variable'], row['lower'], row['range'])
data_table = addKey(df, data_table, row['variable'])
# create a table of the accumulating total accross the columns from left to right for each csa.
temp_table = data_table.cumsum(axis=1)
# get the csa midpoint by divide column index 16 (the last column) of the cumulative totals
temp_table['midpoint'] = (temp_table.iloc[ : , -1 :] /2) # V3
temp_table['midpoint_index'] = False
temp_table['midpoint_index_value'] = False # Z3
temp_table['midpoint_index_lower'] = False # W3
temp_table['midpoint_index_range'] = False # X3
temp_table['midpoint_index_minus_one_cumulative_sum'] = False #Y3
# step 3 - csa_agg3: get the midpoint index by "when midpoint > agg[1] and midpoint <= agg[2] then 2"
# Get CSA Midpoint Index using the breakpoints in our info table.
# For each CSA
for index, row in temp_table.iterrows():
# Get the index of the first column where our midpoint is greater than the columns value.
# Do not use the temp columns (we just created)
midpoint = row['midpoint']
midpoint_index = 0
for column in row.iloc[:-6]:
# set midpoint index to the column with the highest value possible that is under midpoint
if( midpoint >= int(column) ):
# print (str(column) + ' - ' + str(midpoint))
temp_table.loc[ index, 'midpoint_index' ] = midpoint_index +1
midpoint_index += 1
temp_table = temp_table.drop('Unassigned--Jail')
for index, row in temp_table.iterrows():
temp_table.loc[ index, 'midpoint_index_value' ] = data_table.loc[ index, data_table.columns[row['midpoint_index']] ]
temp_table.loc[ index, 'midpoint_index_lower' ] = info.loc[ row['midpoint_index'] ]['lower']
temp_table.loc[ index, 'midpoint_index_range' ] = info.loc[ row['midpoint_index'] ]['range']
temp_table.loc[ index, 'midpoint_index_minus_one_cumulative_sum'] = row[ row['midpoint_index']-1 ]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# Calculation = (midpoint_lower::numeric + (midpoint_range::numeric * ( (midpoint - midpoint_upto_agg) / nullif(midpoint_total,0)
# Calculation = W3+X3*((V3-Y3)/Z3)
# v3 -> 1 - midpoint of households == sum / 2
# w3 -> 2 - lower limit of the income range containing the midpoint of the housing total == row[lower]
# x3 -> width of the interval containing the medium == row[range]
# z3 -> number of hhs within the interval containing the median == row[total]
# y3 -> 4 - cumulative frequency up to, but no==NOT including the median interval
#~~~~~~~~~~~~~~~
temp_table['final'] = temp_table['midpoint_index_lower']+temp_table['midpoint_index_range']*((temp_table['midpoint']-temp_table['midpoint_index_minus_one_cumulative_sum'])/temp_table['midpoint_index_value'])
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S1901_C01_012E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
temp_table['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return temp_table['final']
"""
/* <mmhhi_14> */ --
with tbl_csa as (
select a.*,b.count from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B19001_002E','B19001_003E','B19001_004E','B19001_005E','B19001_006E','B19001_007E','B19001_008E','B19001_009E','B19001_010E','B19001_011E','B19001_012E','B19001_013E','B19001_014E','B19001_015E','B19001_016E','B19001_017E','B19013_001E'])
a left join (select csa,count(*) as count from vital_signs.tracts group by csa) b
on a.csa = b.csa
),
info as (
select 'B19001_002E' as variable, 0 as lower, 10000 as range
union all select 'B19001_003E' as variable, 10000 as lower, 4999 as range
union all select 'B19001_004E' as variable, 15000 as lower, 4999 as range
union all select 'B19001_005E' as variable, 20000 as lower, 4999 as range
union all select 'B19001_006E' as variable, 25000 as lower, 4999 as range
union all select 'B19001_007E' as variable, 30000 as lower, 4999 as range
union all select 'B19001_008E' as variable, 35000 as lower, 4999 as range
union all select 'B19001_009E' as variable, 40000 as lower, 4999 as range
union all select 'B19001_010E' as variable, 45000 as lower, 4999 as range
union all select 'B19001_011E' as variable, 50000 as lower, 9999 as range
union all select 'B19001_012E' as variable, 60000 as lower, 14999 as range
union all select 'B19001_013E' as variable, 75000 as lower, 24999 as range
union all select 'B19001_014E' as variable, 100000 as lower, 24999 as range
union all select 'B19001_015E' as variable, 125000 as lower, 24999 as range
union all select 'B19001_016E' as variable, 150000 as lower, 49000 as range
union all select 'B19001_017E' as variable, 200000 as lower, null as range
),
csa_agg as (
select csa,value as total,count,
ARRAY[
(value[1]),
(value[1] + value[2]),
(value[1] + value[2] + value[3]),
(value[1] + value[2] + value[3] + value[4]),
(value[1] + value[2] + value[3] + value[4] + value[5]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15]),
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15] + value[16])
] as agg,
value[17] as median,
variable from tbl_csa
),
csa_agg2 as (
select csa,count,median,total,agg,variable,
agg[16]/2::numeric as midpoint from csa_agg
),
csa_agg3 as (
select csa,count,median,total,agg,variable,midpoint,
(case
when midpoint <= agg[1] then 1
when midpoint > agg[1] and midpoint <= agg[2] then 2
when midpoint > agg[2] and midpoint <= agg[3] then 3
when midpoint > agg[3] and midpoint <= agg[4] then 4
when midpoint > agg[4] and midpoint <= agg[5] then 5
when midpoint > agg[5] and midpoint <= agg[6] then 6
when midpoint > agg[6] and midpoint <= agg[7] then 7
when midpoint > agg[7] and midpoint <= agg[8] then 8
when midpoint > agg[8] and midpoint <= agg[9] then 9
when midpoint > agg[9] and midpoint <= agg[10] then 10
when midpoint > agg[10] and midpoint <= agg[11] then 11
when midpoint > agg[11] and midpoint <= agg[12] then 12
when midpoint > agg[12] and midpoint <= agg[13] then 13
when midpoint > agg[13] and midpoint <= agg[14] then 14
when midpoint > agg[14] and midpoint <= agg[15] then 15
when midpoint > agg[15] and midpoint <= agg[16] then 16
when midpoint > agg[16] then 17
end) as midpoint_idx from csa_agg2
),
csa_agg4 as (
select csa,count,median,total,agg,variable,midpoint,midpoint_idx,
total[midpoint_idx] as midpoint_total,
(case
when (midpoint_idx - 1) = 0 then 0
else total[(midpoint_idx - 1)]
end) as midpoint_upto_total,
agg[midpoint_idx] as midpoint_agg, (case when (midpoint_idx - 1) = 0 then 0 else agg[(midpoint_idx - 1)] end) as midpoint_upto_agg,
variable[midpoint_idx] as midpoint_variable
from csa_agg3
),
csa_agg5 as (
select a.*,b.lower as midpoint_lower, b.range as midpoint_range from
csa_agg4 a left join info b on a.midpoint_variable = b.variable
),
tbl as (
select (CASE
when count = 1 OR csa = 'Baltimore City'
then median
else
(midpoint_lower::numeric +
(midpoint_range::numeric * (
(midpoint - midpoint_upto_agg) / nullif(midpoint_total,0)
)
)
)
END) as result,csa
from csa_agg5
)
UPDATE vital_signs.data
set mhhi = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: nohhint.py
#Author: <NAME>
#Date: 1/25/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B28011 - INTERNET SUBSCRIPTIONS IN HOUSEHOLD
# Universe: Households
#purpose: Percent of Population with Broadband Internet Access
#input: Year
#output:
import pandas as pd
import glob
def nohhint( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B28011*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B28011_001E', 'B28011_008E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B28011_008E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B28011_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
WITH tbl AS (
select csa,
( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E'])
)
update vital_signs.data
set affordm = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: novhcl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08201 - HOUSEHOLD SIZE BY VEHICLES AVAILABLE
# Universe: Households
#purpose: Produce Sustainability - Percent of Households with No Vehicles Available Indicator
#input: Year
#output:
import pandas as pd
import glob
def novhcl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08201*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08201_002E','B08201_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08201_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08201_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[1]/ nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <novhcl_14> */ --
WITH tbl AS (
select csa,
( value[1]/ nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08201_002E','B08201_001E'])
)
update vital_signs.data
set novhcl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: paa.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def paa( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['African-American%NH'] = df[ 'B03002_004E_Total_Not_Hispanic_or_Latino_Black_or_African_American_alone' ]/ tot * 100
return df1['African-American%NH']
# Cell
#File: ppac.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def ppac( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = | pd.DataFrame() | pandas.DataFrame |
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os, shutil
import re
import csv
from utils import bigrams, trigram, replace_collocation
from tika import parser
import timeit
import pandas as pd
from nltk.stem import PorterStemmer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import string
from sklearn.feature_extraction.text import CountVectorizer
from gensim.test.utils import datapath
from gensim.models.word2vec import Text8Corpus
from gensim.models.phrases import Phrases
# from gensim.models.phrases import ENGLISH_CONNECTOR_WORDS
import matplotlib
import matplotlib.pyplot as plt
import random
random.seed(2019)
TRANSCRIPT_PATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/collection/python/output/transcript_raw_text")
BB_PATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/collection/python/output/bluebook_raw_text")
STATEMENT_PATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/derivation/python/output/statements_text_extraction.csv")
OUTPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/data")
SPEAKER_PATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/analysis/python/output")
ECONDATA = os.path.expanduser("~/Dropbox/MPCounterfactual/src/economic_data")
def generate_rawtranscripts():
raw_doc = os.listdir(TRANSCRIPT_PATH) # as above
filelist = sorted(raw_doc) # sort the pdfs in order
onlyfiles = [f for f in filelist if os.path.isfile(os.path.join(TRANSCRIPT_PATH, f))] # keep if in correct dir
raw_text = pd.DataFrame([]) # empty dataframe
start = timeit.default_timer()
for i, file in enumerate(filelist):
#print('Document {} of {}: {}'.format(i, len(filelist), file))
with open(os.path.join(TRANSCRIPT_PATH, file), 'r') as inf:
parsed = inf.read()
try:
pre = re.compile("Transcript\s?of\s?(?:the:?)?\s?Federal\s?Open\s?Market\s?Committee",re.IGNORECASE)
parsed = re.split(pre,parsed)[1]
except:
try:
pre = re.compile("Transcript\s?of\s?(?:Telephone:?)?\s?Conference\s?Call",re.IGNORECASE)
parsed = re.split(pre,parsed)[1]
except:
print("No split")
parsed = parsed
interjections = re.split('\nMR. |\nMS. |\nCHAIRMAN |\nVICE CHAIRMAN ', parsed) # split the entire string by the names (looking for MR, MS, Chairman or Vice Chairman)
temp_df = pd.DataFrame(columns=['Date', 'Speaker', 'content']) # create a temporary dataframe
interjections = [interjection.replace('\n', ' ') for interjection in
interjections] # replace \n linebreaks with spaces
temp = [re.split('(^\S*)', interjection.lstrip()) for interjection in
interjections] # changed to this split because sometimes (rarely) there was not a period, either other punctuation or whitespace
speaker = []
content = []
for interjection in temp:
speaker.append(interjection[1].strip(string.punctuation))
content.append(interjection[2])
temp_df['Speaker'] = speaker
temp_df['content'] = content # save interjections
temp_df['Date'] = file[:10]
raw_text = pd.concat([raw_text, temp_df], axis=0)
end = timeit.default_timer()
#raw_text.to_excel(os.path.join(CACHE_PATH,'raw_text.xlsx')) # save as raw_text.xlsx
print("Transcripts processed. Time: {}".format(end - start))
docs = raw_text.groupby('Date')['content'].sum().to_list()
return docs,raw_text
def digit_count(line,digits=10):
numbers = sum(c.isdigit() for c in line)
boo = numbers> digits
return boo
def preprocess_longdocs():
MONTHS = ["january", "february", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december"]
short_months = re.compile("(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) ?\. ?")
nums3 = re.compile("^\(?[\+|\-]?\s?\d+\.?\,?\d*\.?\d*\)?")
nums2 = re.compile('^[- \d\.,p\*†]+$')
nums = re.compile('^ ?(\w )+\w?$')
num = re.compile('^-?\d+[\.,]?\d*[\.,]?\d+$')
#sum_suffix = re.compile('( -?\d+\.?\d*[\w%]?){2,}$')
num_sep = re.compile('[-\d,\.]+ --- [-\d,\.]+ .*[\d%]$')
rd = re.compile('\n\n *recent ?.?d ?evelc?op?me?nts?.? *1?\n.?[\(\n]')
pre = re.compile('ocr process and were not checked or corrected by staff',re.MULTILINE)
ir = re.compile('\n(for immediate release)|(for release at \d:\d+).*\n')
qiv = re.compile('^\(?(\d* ?\-)? ?qiv')
conf = re.compile('^confidential\s*\(fr\)')
last = re.compile("^content[\\n]?\s?last[\\n]?\s?modified[\\n]?\s?\d+\/\d+\/\d+")
files = [file for file in sorted(os.listdir(BB_PATH)) if file!=".DS_Store"]
docs = []
docdates = []
for fidx, infile in enumerate(files):
if os.path.isfile(os.path.join(BB_PATH, infile)):
#print("{}\t{}".format(fidx, infile))
with open(os.path.join(BB_PATH, infile), 'r') as inf:
content = inf.read()
try:
content = re.split(rd, content.lower())[1]
except:
content = content.lower()
try:
content = re.split(pre, content)[1]
except:
content = content
docdates.append(infile[:10])
newlines = []
#content = re.sub(r'-\n\s*', '', content) # remove '-' from words that are split at newlines (very low precision, high recall) not done currently
for idx,line in enumerate(content.split('\n')):
#print(idx)
#if idx%1000==999:
#print(idx)
line=line.strip().strip(" *")
line = re.sub("___+", "", line)
line = re.sub("p - preliminar", "", line)
if not (len(line) < 3 or nums2.match(line.strip("*").strip())!= None
or nums.match(line.strip("*").strip())!= None
or (num.match(line.split()[0].strip("*").strip())!= None and num.match(line.split()[-1].strip("*").strip())!= None)
or line.lower().strip() in MONTHS
or re.search(short_months, line.lower())
or re.search(conf, line.strip())
or re.search(num_sep, line.lower())
or re.search(nums3, line.strip())
or re.search(qiv, line)
or re.search(last, line)
or digit_count(line)):
newlines.append(line)
docs.append(' '.join(newlines))
print("Bluebooks processed")
return docs,docdates
def contains_punctuation(w):
return any(char in string.punctuation for char in w)
def contains_numeric(w):
return any(char.isdigit() for char in w)
def remove_empty(in_docs,docindex=[], doctype=""):
newdocs = [doc for doc in in_docs if doc!=[]]
newindex = []
try:
newindex = [docindex[docidx] for docidx,doc in enumerate(in_docs) if doc!=[]]
except:
print("No index given")
if len(newdocs) != len(newindex):
raise Exception("Length of index and data does not match")
if doctype == "test":
# Remove test documents with length=1
newdocs = [doc for doc in newdocs if len(doc)>1]
testindex = []
try:
testindex = [newindex[docidx] for docidx,doc in enumerate(newdocs) if doc!=[]]
except:
print("No index given")
if len(newdocs) != len(newindex):
raise Exception("Length of index and data does not match")
newindex = testindex
return newdocs,newindex
def data_preprocess(docs,docindex,data,DATASET,max_df,min_df):
print("\n"+"*"*80)
init_docs = docs.to_list()
# Create count vectorizer
print('counting document frequency of words...')
cvectorizer = CountVectorizer(min_df=min_df, max_df=max_df, stop_words=None)
cvz = cvectorizer.fit_transform(init_docs).sign()
# Get vocabulary
print('building the vocabulary...')
sum_counts = cvz.sum(axis=0)
v_size = sum_counts.shape[1]
sum_counts_np = np.zeros(v_size, dtype=int)
for v in range(v_size):
sum_counts_np[v] = sum_counts[0,v]
word2id = dict([(w, cvectorizer.vocabulary_.get(w)) for w in cvectorizer.vocabulary_])
id2word = dict([(cvectorizer.vocabulary_.get(w), w) for w in cvectorizer.vocabulary_])
del cvectorizer
print('vocabulary size: {}'.format(len(list(set(" ".join(init_docs).split())))))
# Sort elements in vocabulary
idx_sort = np.argsort(sum_counts_np)
vocab_aux = [id2word[idx_sort[cc]] for cc in range(v_size)]
# Filter out stopwords (if any)
print('vocabulary size after removing stopwords from list: {}'.format(len(vocab_aux)))
# Create dictionary and inverse dictionary
vocab = vocab_aux
del vocab_aux
word2id = dict([(w, j) for j, w in enumerate(vocab)])
id2word = dict([(j, w) for j, w in enumerate(vocab)])
# Split in train/test/valid
print('tokenizing documents and splitting into train/test/valid...')
num_docs = cvz.shape[0]
trSize = int(np.floor(0.85*num_docs))
tsSize = int(np.floor(0.10*num_docs))
vaSize = int(num_docs - trSize - tsSize)
del cvz
idx_permute = np.random.permutation(num_docs).astype(int)
# Remove words not in train_data
vocab = list(set([w for idx_d in range(trSize) for w in docs[idx_permute[idx_d]].split() if w in word2id]))
word2id = dict([(w, j) for j, w in enumerate(vocab)])
id2word = dict([(j, w) for j, w in enumerate(vocab)])
print(' vocabulary after removing words not in train: {}'.format(len(vocab)))
docs_all = [[word2id[w] for w in init_docs[idx_d].split() if w in word2id] for idx_d in range(num_docs)]
docs_tr = [[word2id[w] for w in init_docs[idx_permute[idx_d]].split() if w in word2id] for idx_d in range(trSize)]
docs_ts = [[word2id[w] for w in init_docs[idx_permute[idx_d+trSize]].split() if w in word2id] for idx_d in range(tsSize)]
docs_va = [[word2id[w] for w in init_docs[idx_permute[idx_d+trSize+tsSize]].split() if w in word2id] for idx_d in range(vaSize)]
print(' number of documents: {} [this should be equal to {}]'.format(len(docs_all), num_docs))
print(' number of documents (train): {} [this should be equal to {}]'.format(len(docs_tr), trSize))
print(' number of documents (test): {} [this should be equal to {}]'.format(len(docs_ts), tsSize))
print(' number of documents (valid): {} [this should be equal to {}]'.format(len(docs_va), vaSize))
idx_all= [idx_permute[idx_d] for idx_d in range(num_docs)]
idx_tr = [idx_permute[idx_d] for idx_d in range(trSize)]
idx_ts = [idx_permute[idx_d] for idx_d in range(tsSize)]
idx_va = [idx_permute[idx_d] for idx_d in range(vaSize)]
# Remove empty documents
print('removing empty documents...')
docs_all,idx_all = remove_empty(docs_all, idx_all)
docs_tr,idx_tr = remove_empty(docs_tr, idx_tr)
docs_ts,idx_ts = remove_empty(docs_ts, idx_ts, doctype = "test")
docs_va,idx_va = remove_empty(docs_va, idx_va)
# Split test set in 2 halves
print('splitting test documents in 2 halves...')
docs_ts_h1 = [[w for i,w in enumerate(doc) if i<=len(docs_ts)/2.0-1] for doc in docs_ts]
docs_ts_h2 = [[w for i,w in enumerate(doc) if i>len(docs_ts)/2.0-1] for doc in docs_ts]
idx_ts_h1 = [i for idx,i in enumerate(idx_ts) if idx <= len(idx_ts)/2.0-1]
idx_ts_h2 = [i for idx,i in enumerate(idx_ts) if idx > len(idx_ts)/2.0-1]
# Getting lists of words and doc_indices
print('creating lists of words...')
def create_list_words(in_docs):
return [x for y in in_docs for x in y]
words_all = create_list_words(docs_all)
words_tr = create_list_words(docs_tr)
words_ts = create_list_words(docs_ts)
words_ts_h1 = create_list_words(docs_ts_h1)
words_ts_h2 = create_list_words(docs_ts_h2)
words_va = create_list_words(docs_va)
print(' len(words_tr): ', len(words_all))
print(' len(words_tr): ', len(words_tr))
print(' len(words_ts): ', len(words_ts))
print(' len(words_ts_h1): ', len(words_ts_h1))
print(' len(words_ts_h2): ', len(words_ts_h2))
print(' len(words_va): ', len(words_va))
# Get doc indices
print('getting doc indices...')
def create_doc_indices(in_docs):
aux = [[j for i in range(len(doc))] for j, doc in enumerate(in_docs)]
return [int(x) for y in aux for x in y]
doc_indices_all = create_doc_indices(docs_all)
doc_indices_tr = create_doc_indices(docs_tr)
doc_indices_ts = create_doc_indices(docs_ts)
doc_indices_ts_h1 = create_doc_indices(docs_ts_h1)
doc_indices_ts_h2 = create_doc_indices(docs_ts_h2)
doc_indices_va = create_doc_indices(docs_va)
print(' len(np.unique(doc_indices_all)): {} [this should be {}]'.format(len(np.unique(doc_indices_all)), len(docs_all)))
print(' len(np.unique(doc_indices_tr)): {} [this should be {}]'.format(len(np.unique(doc_indices_tr)), len(docs_tr)))
print(' len(np.unique(doc_indices_ts)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts)), len(docs_ts)))
print(' len(np.unique(doc_indices_ts_h1)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts_h1)), len(docs_ts_h1)))
print(' len(np.unique(doc_indices_ts_h2)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts_h2)), len(docs_ts_h2)))
print(' len(np.unique(doc_indices_va)): {} [this should be {}]'.format(len(np.unique(doc_indices_va)), len(docs_va)))
# Number of documents in each set
n_docs_all = len(docs_all)
n_docs_tr = len(docs_tr)
n_docs_ts = len(docs_ts)
n_docs_ts_h1 = len(docs_ts_h1)
n_docs_ts_h2 = len(docs_ts_h2)
n_docs_va = len(docs_va)
# Remove unused variables
del docs_tr
del docs_ts
del docs_ts_h1
del docs_ts_h2
del docs_va
# Save vocabulary to file
path_save = f'{OUTPATH}/{DATASET}/'
if not os.path.isdir(path_save):
os.system('mkdir -p ' + path_save)
#print("money" in vocab)
with open(path_save + 'vocab.pkl', 'wb') as f:
pickle.dump(vocab, f)
# Create covariates
if not isinstance(data, type(None)):
print('create covariates...')
data_all = data.iloc[idx_all]
data_tr = data.iloc[idx_tr]
data_ts = data.iloc[idx_ts]
data_va = data.iloc[idx_va]
data_ts_h1 = data.iloc[idx_ts_h1]
data_ts_h2 = data.iloc[idx_ts_h1]
data_all.to_pickle(f"{path_save}/data_all.pkl")
data_tr.to_pickle(f"{path_save}/data_tr.pkl")
data_ts.to_pickle(f"{path_save}/data_ts.pkl")
data_va.to_pickle(f"{path_save}/data_va.pkl")
data_ts_h1.to_pickle(f"{path_save}/data_ts_h1.pkl")
data_ts_h2.to_pickle(f"{path_save}/data_ts_h2.pkl")
# Create bow representation
print('creating bow representation...')
def create_bow(doc_indices, words, n_docs, vocab_size):
return sparse.coo_matrix(([1]*len(doc_indices),(doc_indices, words)), shape=(n_docs, vocab_size)).tocsr()
bow_all = create_bow(doc_indices_all, words_all, n_docs_all, len(vocab))
bow_tr = create_bow(doc_indices_tr, words_tr, n_docs_tr, len(vocab))
bow_ts = create_bow(doc_indices_ts, words_ts, n_docs_ts, len(vocab))
bow_ts_h1 = create_bow(doc_indices_ts_h1, words_ts_h1, n_docs_ts_h1, len(vocab))
bow_ts_h2 = create_bow(doc_indices_ts_h2, words_ts_h2, n_docs_ts_h2, len(vocab))
bow_va = create_bow(doc_indices_va, words_va, n_docs_va, len(vocab))
del words_tr
del words_ts
del words_ts_h1
del words_ts_h2
del words_va
del doc_indices_tr
del doc_indices_ts
del doc_indices_ts_h1
del doc_indices_ts_h2
del doc_indices_va
del vocab
# Split bow into token/value pairs
print('splitting bow into token/value pairs and saving to disk...')
def split_bow(bow_in, n_docs):
indices = [[w for w in bow_in[doc,:].indices] for doc in range(n_docs)]
counts = [[c for c in bow_in[doc,:].data] for doc in range(n_docs)]
return indices, counts
bow_all_tokens, bow_all_counts = split_bow(bow_all, n_docs_all)
savemat(path_save + 'bow_all_tokens.mat', {'tokens': bow_all_tokens}, do_compression=True)
savemat(path_save + 'bow_all_counts.mat', {'counts': bow_all_counts}, do_compression=True)
del bow_all
del bow_all_tokens
del bow_all_counts
bow_tr_tokens, bow_tr_counts = split_bow(bow_tr, n_docs_tr)
savemat(path_save + 'bow_tr_tokens.mat', {'tokens': bow_tr_tokens}, do_compression=True)
savemat(path_save + 'bow_tr_counts.mat', {'counts': bow_tr_counts}, do_compression=True)
del bow_tr
del bow_tr_tokens
del bow_tr_counts
bow_ts_tokens, bow_ts_counts = split_bow(bow_ts, n_docs_ts)
savemat(path_save + 'bow_ts_tokens.mat', {'tokens': bow_ts_tokens}, do_compression=True)
savemat(path_save + 'bow_ts_counts.mat', {'counts': bow_ts_counts}, do_compression=True)
del bow_ts
del bow_ts_tokens
del bow_ts_counts
bow_ts_h1_tokens, bow_ts_h1_counts = split_bow(bow_ts_h1, n_docs_ts_h1)
savemat(path_save + 'bow_ts_h1_tokens.mat', {'tokens': bow_ts_h1_tokens}, do_compression=True)
savemat(path_save + 'bow_ts_h1_counts.mat', {'counts': bow_ts_h1_counts}, do_compression=True)
del bow_ts_h1
del bow_ts_h1_tokens
del bow_ts_h1_counts
bow_ts_h2_tokens, bow_ts_h2_counts = split_bow(bow_ts_h2, n_docs_ts_h2)
savemat(path_save + 'bow_ts_h2_tokens.mat', {'tokens': bow_ts_h2_tokens}, do_compression=True)
savemat(path_save + 'bow_ts_h2_counts.mat', {'counts': bow_ts_h2_counts}, do_compression=True)
del bow_ts_h2
del bow_ts_h2_tokens
del bow_ts_h2_counts
bow_va_tokens, bow_va_counts = split_bow(bow_va, n_docs_va)
savemat(path_save + 'bow_va_tokens.mat', {'tokens': bow_va_tokens}, do_compression=True)
savemat(path_save + 'bow_va_counts.mat', {'counts': bow_va_counts}, do_compression=True)
del bow_va
del bow_va_tokens
del bow_va_counts
print(f'{DATASET} ready !!')
print('*' * 80)
def statements_raw():
data = pd.read_csv(STATEMENT_PATH)
docs = data["statement"].to_list()
docdates = data["meeting_end_date"].to_list()
print("Statements processed")
return docs,docdates
def word_count(df,name):
df["date"] = pd.to_datetime(df["date"])
df["year"] = df["date"].dt.year
df_year = df.groupby(['year'])["content"].agg(lambda x: ' '.join(x)).reset_index()["content"]
df_year_date = df.groupby(['year'])["content"].agg(lambda x: ' '.join(x)).reset_index()['year'].to_list()
df_year = data_clean(df_year,df_year.index)
# produce doc stats
token_counts_yr = [len(doc) for doc in df_year["cleaned_content"].to_list()]
fig, ax = plt.subplots()
ax.plot(df_year_date,token_counts_yr)
plt.savefig(f"output/{name}_yearly.png")
df_date = df.groupby(['date'])["content"].agg(lambda x: ' '.join(x)).reset_index()["content"]
df_date_date = df.groupby(['date'])["content"].agg(lambda x: ' '.join(x)).reset_index()['date'].to_list()
df_date = data_clean(df_date,df_date.index)
# produce doc stats
token_counts = [len(doc) for doc in df_date["cleaned_content"].to_list()]
del ax
del fig
fig, ax = plt.subplots()
df_date_date = [np.datetime64(dat) for dat in df_date_date]
ax.plot(df_date_date,token_counts)
plt.savefig(f"output/{name}_mymeeting.png")
plt.suptitle(name)
return df_year, token_counts_yr
def build_embdata(max_df,min_df,phrase_itera,threshold,DATASET):
bb_docs,docdates = preprocess_longdocs()
bb_df = pd.DataFrame(zip(docdates,bb_docs),columns=["date","content"])
word_count(bb_df,"bluebook")
transcript_docs,raw_text = generate_rawtranscripts()
transcripts_pr = raw_text.groupby(['Date'])["content"].agg(lambda x: ' '.join(x)).reset_index()["content"].to_list()
transcripts_dates = raw_text.groupby(['Date'])["content"].agg(lambda x: ' '.join(x)).reset_index()['Date'].to_list()
transcript_df = pd.DataFrame(zip(transcripts_dates,transcripts_pr),columns=["date","content"])
word_count(transcript_df,"transcript")
statement_docs,docdates = statements_raw()
transcript_df = pd.DataFrame(zip(docdates,statement_docs),columns=["date","content"])
word_count(transcript_df,"statements")
if not os.path.exists(f"{OUTPATH}/{DATASET}"):
os.makedirs(f"{OUTPATH}/{DATASET}")
docs = bb_docs + transcript_docs + statement_docs
df_cleancontent = data_clean( | pd.Series(docs) | pandas.Series |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
#!/usr/bin/python
import sys, getopt
import os
import pandas as pd
import numpy as np
import pyquaternion as pyq
from pyquaternion import Quaternion
from scipy import signal
from scipy.spatial.transform import Slerp
from scipy.spatial.transform import Rotation as R
def main(argv):
inputfile = ''
calfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:c:o:",["ifile=", "cfile=","ofile="])
except getopt.GetoptError:
print('test.py -i <inputfile> -c <calfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <inputfile> -c calfile -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-c", "--ifile"):
calfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
# Creating Functions
def orientation_matrix(q0, q1, q2, q3):
# based on https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
r11 = 2 * (q0 ** 2 + q1 ** 2) - 1
r12 = 2 * (q1 * q2 - q0 * q3)
r13 = 2 * (q1 * q3 + q0 * q2)
r21 = 2 * (q1 * q2 + q0 * q3)
r22 = 2 * (q0 ** 2 + q2 ** 2) - 1
r23 = 2 * (q2 * q3 - q0 * q1)
r31 = 2 * (q1 * q3 - q0 * q2)
r32 = 2 * (q2 * q3 + q0 * q1)
r33 = 2 * (q0 ** 2 + q3 ** 2) - 1
return r11, r12, r13, r21, r22, r23, r31, r32, r33
def compute_relative_orientation(seg, cal):
'''
Calculating the relative orientation between two matrices. This is used for the initial normalization
procedure using the standing calibration
'''
R_11 = np.array([])
R_12 = np.array([])
R_13 = np.array([])
R_21 = np.array([])
R_22 = np.array([])
R_23 = np.array([])
R_31 = np.array([])
R_32 = np.array([])
R_33 = np.array([])
for i in range(seg.shape[0]):
segment = np.asmatrix([
[np.array(seg['o11'])[i], np.array(seg['o12'])[i], np.array(seg['o13'])[i]],
[np.array(seg['o21'])[i], np.array(seg['o22'])[i], np.array(seg['o23'])[i]],
[np.array(seg['o31'])[i], np.array(seg['o32'])[i], np.array(seg['o33'])[i]]
])
segment_cal = np.asmatrix([
[np.array(cal['o11'])[i], np.array(cal['o12'])[i], np.array(cal['o13'])[i]],
[np.array(cal['o21'])[i], np.array(cal['o22'])[i], np.array(cal['o23'])[i]],
[np.array(cal['o31'])[i], np.array(cal['o32'])[i], np.array(cal['o33'])[i]]
])
# normalization
r = np.matmul(segment, segment_cal.T)
new_orientations = np.asarray(r).reshape(-1)
R_11 = np.append(R_11, new_orientations[0])
R_12 = np.append(R_12, new_orientations[1])
R_13 = np.append(R_13, new_orientations[2])
R_21 = np.append(R_21, new_orientations[3])
R_22 = np.append(R_22, new_orientations[4])
R_23 = np.append(R_23, new_orientations[5])
R_31 = np.append(R_31, new_orientations[6])
R_32 = np.append(R_32, new_orientations[7])
R_33 = np.append(R_33, new_orientations[8])
return R_11, R_12, R_13, R_21, R_22, R_23, R_31, R_32, R_33
def compute_joint_angle(df, child, parent):
c = df[df[' jointType'] == child]
p = df[df[' jointType'] == parent]
ml = np.array([])
ap = np.array([])
v = np.array([])
# Compute Rotation Matrix Components
for i in range(c.shape[0]):
segment = np.asmatrix([
[np.array(c['n_o11'])[i], np.array(c['n_o12'])[i], np.array(c['n_o13'])[i]],
[np.array(c['n_o21'])[i], np.array(c['n_o22'])[i], np.array(c['n_o23'])[i]],
[np.array(c['n_o31'])[i], np.array(c['n_o32'])[i], np.array(c['n_o33'])[i]]
])
reference_segment = np.asmatrix([
[np.array(p['n_o11'])[i], np.array(p['n_o12'])[i], np.array(p['n_o13'])[i]],
[np.array(p['n_o21'])[i], np.array(p['n_o22'])[i], np.array(p['n_o23'])[i]],
[np.array(p['n_o31'])[i], np.array(p['n_o32'])[i], np.array(p['n_o33'])[i]]
])
# transformation of segment to reference segment
r = np.matmul(reference_segment.T, segment)
# decomposition to Euler angles
rotations = R.from_matrix(r).as_euler('xyz', degrees=True)
ml = np.append(ml, rotations[0])
ap = np.append(ap, rotations[1])
v = np.append(v, rotations[2])
return ml, ap, v
def resample_df(d, new_freq=30, method='linear'):
# Resamples data at 30Hz unless otherwise specified
joints_without_quats = [3, 15, 19, 21, 22, 23, 24]
resampled_df = pd.DataFrame(
columns=['# timestamp', ' jointType', ' orientation.X', ' orientation.Y', ' orientation.Z',
' orientation.W', ' position.X', ' position.Y', ' position.Z'])
new_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 26 16:16:58 2018
@author: rugantio
"""
import pandas as pd
df = pd.read_csv('./exploit.csv')
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import abc
import time, random
import pandas as pd
import os
import numpy as np
import benchutils as utils
import knowledgebases
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn import preprocessing
class FeatureSelectorFactory():
"""Singleton class.
Python code encapsulates it in a way that is not shown in Sphinx, so have a look at the descriptions in the source code.
Creates feature selector object based on a given name.
New feature selection approaches must be registered here.
Names for feature selectors must follow to a particular scheme, with keywords separated by _:
- first keyword is the actual selector name
- if needed, second keyword is the knowledge base
- if needed, third keyword is the (traditional) approach to be combined
Examples:
- Traditional Approaches have only one keyword, e.g. InfoGain or ANOVA
- LassoPenalty_KEGG provides KEGG information to the LassoPenalty feature selection approach
- Weighted_KEGG_InfoGain --> Factory creates an instance of KBweightedSelector which uses KEGG as knowledge base and InfoGain as traditional selector.
While the focus here lies on the combination of traditional approaches with prior biological knowledge, it is theoretically possible to use ANY selector object for combination that inherits from :class:`FeatureSelector`.
:param config: configuration parameters for UMLS web service as specified in config file.
:type config: dict
"""
class __FeatureSelectorFactory():
def createFeatureSelector(self, name):
"""Create selector from a given name.
Separates creation process into (traditional) approaches (only one keyword), approaches requiring a knowledge base, and approaches requiring both a knowledge base and another selector, e.g. a traditional one.
:param name: selector name following the naming conventions: first keyword is the actual selector name, second keyword is the knowledge base, third keyword another selector to combine. Keywords must be separated by "_". Example: Weighted_KEGG_InfoGain
:type name: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
parts = name.split("_")
if len(parts) == 1:
return self.createTraditionalSelector(name)
elif len(parts) == 2:
return self.createIntegrativeSelector(parts[0], parts[1])
elif len(parts) == 3:
return self.createCombinedSelector(parts[0], parts[1], parts[2])
utils.logError("ERROR: The provided selector name does not correspond to the expected format. "
"A selector name should consist of one or more keywords separated by _. "
"The first keyword is the actual approach (e.g. weighted, or a traditional approach), "
"the second keyword corresponds to a knowledge base to use (e.g. KEGG),"
"the third keyword corresponds to a traditional selector to use (e.g. when using a modifying or combining approach")
exit()
def createTraditionalSelector(self, selectorName):
"""Creates a (traditional) selector (without a knowledge base) from a given name.
Register new implementations of a (traditional) selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
if selectorName == "Random":
return RandomSelector()
if selectorName == "VB-FS":
return VarianceSelector()
if selectorName == "Variance":
return Variance2Selector()
if selectorName == "ANOVA":
return AnovaSelector()
if selectorName == "mRMR":
return MRMRSelector()
if selectorName == "SVMpRFE":
return SVMRFESelector()
# RUN WEKA FEATURE SELECTION AS SELECTED
if selectorName == "InfoGain":
return InfoGainSelector()
if selectorName == "ReliefF":
return ReliefFSelector()
#if "-RFE" in selectorName or "-SFS" in selectorName: -- SFS is currently disabled because sometimes the coef_ param is missing and error is thrown
if "-RFE" in selectorName:
return WrapperSelector(selectorName)
if selectorName == "Lasso":
return LassoSelector()
if selectorName == "RandomForest":
return RandomForestSelector()
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createIntegrativeSelector(self, selectorName, kb):
"""Creates a feature selector using a knowledge base from the given selector and knowledge base names.
Register new implementations of a prior knowledge selector here that does not requires a (traditional) selector.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "NetworkActivity":
featuremapper = PathwayActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "CorgsNetworkActivity":
featuremapper = CORGSActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "LassoPenalty":
return LassoPenalty(knowledgebase)
if selectorName == "KBonly":
return KbSelector(knowledgebase)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createCombinedSelector(self, selectorName, trad, kb):
"""Creates a feature selector that combines a knowledge base and another feature selector based on the given names.
Register new implementations of a prior knowledge selector that requires another selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param trad: name of the (traditional) feature selector.
:type trad: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
tradSelector = self.createTraditionalSelector(trad)
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "Postfilter":
return PostFilterSelector(knowledgebase, tradSelector)
if selectorName == "Prefilter":
return PreFilterSelector(knowledgebase, tradSelector)
if selectorName == "Extension":
return ExtensionSelector(knowledgebase, tradSelector)
if selectorName == "Weighted":
return KBweightedSelector(knowledgebase, tradSelector)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
instance = None
def __init__(self):
if not FeatureSelectorFactory.instance:
FeatureSelectorFactory.instance = FeatureSelectorFactory.__FeatureSelectorFactory()
def __getattr__(self, name):
return getattr(self.instance, name)
class FeatureSelector:
"""Abstract super class for feature selection functionality.
Every feature selection class has to inherit from this class and implement its :meth:`FeatureSelector.selectFeatures` method and - if necessary - its :meth:`FeatureSelector.setParams` method.
Once created, feature selection can be triggered by first setting parameters (input, output, etc) as needed with :meth:`FeatureSelector.setParams`.
The actual feature selection is triggered by invoking :meth:`FeatureSelector.selectFeatures`.
:param input: absolute path to input dataset.
:type input: str
:param output: absolute path to output directory (where the ranking will be stored).
:type output: str
:param dataset: the dataset for which to select features. Will be loaded dynamically based on self.input at first usage.
:type dataset: :class:`pandas.DataFrame`
:param dataConfig: config parameters for input data set.
:type dataConfig: dict
:param name: selector name
:type name: str
"""
def __init__(self, name):
self.input = None
self.output = None
self.dataset = None
self.loggingDir = None
self.dataConfig = utils.getConfig("Dataset")
self.setTimeLogs(utils.createTimeLog())
self.enableLogFlush()
self.name = name
super().__init__()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Invoke feature selection functionality in this method when implementing a new selector
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getTimeLogs(self):
"""Gets all logs for this selector.
:return: dataframe of logged events containing start/end time, duration, and a short description.
:rtype: :class:`pandas.DataFrame`
"""
return self.timeLogs
def setTimeLogs(self, newTimeLogs):
"""Overwrites the current logs with new ones.
:param newTimeLogs: new dataframe of logged events containing start/end time, duration, and a short description.
:type newTimeLogs: :class:`pandas.DataFrame`
"""
self.timeLogs = newTimeLogs
def disableLogFlush(self):
"""Disables log flushing (i.e., writing the log to a separate file) of the selector at the end of feature selection.
This is needed when a :class:`CombiningSelector` uses a second selector and wants to avoid that its log messages are written, potentially overwriting logs from another selector of the same name.
"""
self.enableLogFlush = False
def enableLogFlush(self):
"""Enables log flushing, i.e. writing the logs to a separate file at the end of feature selection.
"""
self.enableLogFlush = True
def getName(self):
"""Gets the selector's name.
:return: selector name.
:rtype: str
"""
return self.name
def getData(self):
"""Gets the labeled dataset from which to select features.
:return: dataframe containing the dataset with class labels.
:rtype: :class:`pandas.DataFrame`
"""
if self.dataset is None:
self.dataset = pd.read_csv(self.input, index_col=0)
return self.dataset
def getUnlabeledData(self):
"""Gets the dataset without labels.
:return: dataframe containing the dataset without class labels.
:rtype: :class:`pandas.DataFrame`
"""
dataset = self.getData()
return dataset.loc[:, dataset.columns != "classLabel"]
def getFeatures(self):
"""Gets features from the dataset.
:return: list of features.
:rtype: list of str
"""
return self.getData().columns[1:]
def getUniqueLabels(self):
"""Gets the unique class labels available in the dataset.
:return: list of distinct class labels.
:rtype: list of str
"""
return list(set(self.getLabels()))
def getLabels(self):
"""Gets the labels in the data set.
:return: all labels from the dataset.
:rtype: list of str
"""
return list(self.getData()["classLabel"])
def setParams(self, inputPath, outputDir, loggingDir):
"""Sets parameters for the feature selection run: path to the input datast and path to the output directory.
:param inputPath: absolute path to the input file containing the dataset for analysis.
:type inputPath: str
:param outputDir: absolute path to the output directory (where to store the ranking)
:type outputDir: str
:param loggingDir: absolute path to the logging directory (where to store log files)
:type loggingDir: str
"""
self.input = inputPath
self.output = outputDir
self.loggingDir = loggingDir
def writeRankingToFile(self, ranking, outputFile, index = False):
"""Writes a given ranking to a specified file.
:param ranking: dataframe with the ranking.
:type ranking: :class:`pandas.DataFrame`
:param outputFile: absolute path of the file where ranking will be stored.
:type outputFile: str
:param index: whether to write the dataframe's index or not.
:type index: bool, default False
"""
if not ranking.empty:
ranking.to_csv(outputFile, index = index, sep = "\t")
else:
#make sure to write at least the header if the dataframe is empty
with open(outputFile, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
class PythonSelector(FeatureSelector):
"""Abstract.
Inherit from this class when implementing a feature selector using any of scikit-learn's functionality.
As functionality invocation, input preprocessing and output postprocessing are typically very similar/the same for such implementations, this class already encapsulates it.
Instead of implementing :meth:`PythonSelector.selectFeatures`, implement :meth:`PythonSelector.runSelector`.
"""
def __init__(self, name):
super().__init__(name)
@abc.abstractmethod
def runSelector(self, data, labels):
"""Abstract - implement this method when inheriting from this class.
Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
pass
def selectFeatures(self):
"""Executes the feature selection procedure.
Prepares the input data set to match scikit-learn's expected formats and postprocesses the output to create a ranking.
:return: absolute path to the output ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
data, labels = self.prepareInput()
selector = self.runSelector(data, labels)
self.prepareOutput(outputFile, data, selector)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
def prepareInput(self):
"""Prepares the input data set before running any of scikit-learn's selectors.
Removes the labels from the input data set and encodes the labels in numbers.
:return: dataset (without labels) and labels encoded in numbers.
:rtype: :class:`pandas.DataFrame` and list of int
"""
start = time.time()
labels = self.getLabels()
data = self.getUnlabeledData()
le = preprocessing.LabelEncoder()
numeric_labels = le.fit_transform(labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Input Preparation")
return data, numeric_labels
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.scores_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
class RSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param rConfig: config parameters to execute R code.
:type rConfig: dict
"""
def __init__(self, name):
self.rConfig = utils.getConfig("R")
self.scriptName = "FS_" + name + ".R"
super().__init__(name)
@abc.abstractmethod
def createParams(self, filename):
"""Abstract.
Implement this method to set the parameters your R script requires.
:param filename: absolute path of the output file.
:type filename: str
:return: list of parameters to use for R code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external R code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
params = self.createParams(outputFile)
utils.runRCommand(self.rConfig, self.scriptName , params)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return filename
class JavaSelector(FeatureSelector):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param javaConfig: config parameters to execute java code.
:type javaConfig: dict
"""
def __init__(self, name):
self.javaConfig = utils.getConfig("Java")
super().__init__(name)
@abc.abstractmethod
def createParams(self):
"""Abstract.
Implement this method to set the parameters your java code requires.
:return: list of parameters to use for java code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external java code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.name + ".csv"
params = self.createParams()
utils.runJavaCommand(self.javaConfig, "/WEKA_FeatureSelector.jar", params)
output_filepath = self.output + filename
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return output_filepath
############################### PRIOR KNOWLEDGE SELECTORS ###############################
class PriorKnowledgeSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Super class for all prior knowledge approaches.
If you want to implement an own prior knowledge approach that uses a knowledge base (but not a second selector and no network approaches), inherit from this class.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param alternativeSearchTerms: list of alternative search terms to use for querying the knowledge base.
:type alternativeSearchTerms: list of str
"""
def __init__(self, name, knowledgebase):
self.knowledgebase = knowledgebase
super().__init__(name)
self.alternativeSearchTerms = self.collectAlternativeSearchTerms()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def collectAlternativeSearchTerms(self):
"""Gets all alternative search terms that were specified in the config file and put them into a list.
:return: list of alternative search terms to use for querying the knowledge base.
:rtype: list of str
"""
alternativeTerms = self.dataConfig["alternativeSearchTerms"].split(" ")
searchTerms = []
for term in alternativeTerms:
searchTerms.append(term.replace("_", " "))
return searchTerms
def getSearchTerms(self):
"""Gets all search terms to use for querying a knowledge base.
Search terms that will be used are a) the class labels in the dataset, and b) the alternative search terms that were specified in the config file.
:return: list of search terms to use for querying the knowledge base.
:rtype: list of str
"""
searchTerms = list(self.getUniqueLabels())
searchTerms.extend(self.alternativeSearchTerms)
return searchTerms
def getName(self):
"""Returns the full name (including applied knowledge base) of this selector.
:return: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
#selector class for modifying integrative approaches
class CombiningSelector(PriorKnowledgeSelector):
"""Super class for prior knoweldge approaches that use a knowledge base AND combine it with any kind of selector, e.g. a traditional approach.
Inherit from this class if you want to implement a feature selector that requires both a knowledge base and another selector, e.g. because it combines information from both.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, name, knowledgebase, tradApproach):
self.tradSelector = tradApproach
self.tradSelector.disableLogFlush()
super().__init__(name, knowledgebase)
self.tradSelector.setTimeLogs(self.timeLogs)
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method as desired when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getName(self):
"""Returns the full name (including applied knowledge base and feature selector) of this selector.
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def getExternalGenes(self):
"""Gets all genes related to the provided search terms from the knowledge base.
:returns: list of gene names.
:rtype: list of str
"""
start = time.time()
externalGenes = self.knowledgebase.getRelevantGenes(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Getting External Genes")
return externalGenes
class NetworkSelector(PriorKnowledgeSelector):
"""Abstract.
Inherit from this method if you want to implement a new network approach that actually conducts feature EXTRACTION, i.e. maps the original data set to have pathway/subnetworks.
Instead of :meth:`FeatureSelector.selectFeatures` implement :meth:`NetworkSelector.selectPathways` when inheriting from this class.
Instances of :class:`NetworkSelector` and inheriting classes also require a :class:`PathwayMapper` object that transfers the dataset to the new feature space.
Custom implementations thus need to implement a) a selection strategy to select pathways and b) a mapping strategy to compute new feature values for the selected pathways.
:param featureMapper: feature mapping object that transfers the feature space.
:type featureMapper: :class:`FeatureMapper` or inheriting class
"""
def __init__(self, name, knowledgebase, featuremapper):
self.featureMapper = featuremapper
super().__init__(name, knowledgebase)
@abc.abstractmethod
def selectPathways(self, pathways):
"""Selects the pathways that will become the new features of the data set.
Implement this method (instead of :meth:`FeatureSelector.selectFeatures` when inheriting from this class.
:param pathways: dict of pathways (pathway names as keys) to select from.
:type pathways: dict
:returns: pathway ranking as dataframe
:rtype: :class:`pandas.DataFrame`
"""
pass
def writeMappedFile(self, mapped_data, fileprefix):
"""Writes the mapped dataset with new feature values to the same directory as the original file is located (it will be automatically processed then).
:param mapped_data: dataframe containing the dataset with mapped feature space.
:type mapped_data: :class:`pandas.DataFrame`
:param fileprefix: prefix of the file name, e.g. the directory path
:type fileprefix: str
:return: absolute path of the file name to store the mapped data set.
:rtype: str
"""
mapped_filepath = fileprefix + "_" + self.getName() + ".csv"
mapped_data.to_csv(mapped_filepath)
return mapped_filepath
def getName(self):
"""Gets the selector name (including the knowledge base).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
def filterPathways(self, pathways):
filtered_pathways = {}
for pathwayName in pathways:
genes = pathways[pathwayName].nodes_by_label.keys()
#check if there is an overlap between the pathway and data set genes
existingGenes = list(set(self.getFeatures()) & set(genes))
if len(existingGenes) > 0:
filtered_pathways[pathwayName] = pathways[pathwayName]
else:
utils.logWarning("WARNING: No genes of pathway " + pathwayName + " found in dataset. Pathway will not be considered")
return filtered_pathways
def selectFeatures(self):
"""Instead of selecting existing features, instances of :class:`NetworkSelector` select pathways or submodules as features.
For that, it first queries its knowledge base for pathways.
It then selects the top k pathways (strategy to be implemented in :meth:`NetworkSelector.selectPathways`) and subsequently maps the dataset to its new feature space.
The mapping will be conducted by an object of :class:`PathwayMapper` or inheriting classes.
If a second dataset for cross-validation is available, the feature space of this dataset will also be transformed.
:returns: absolute path to the pathway ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
overallstart = time.time()
pathways = self.knowledgebase.getRelevantPathways(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, end, "Get Pathways")
#filter pathways to only those that contain at least one gene from the data set
pathways = self.filterPathways(pathways)
start = time.time()
pathwayRanking = self.selectPathways(pathways)
outputFile = self.output + self.getName() + ".csv"
self.writeRankingToFile(pathwayRanking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Pathway Selection")
pathwayNames = pathwayRanking["attributeName"]
start = time.time()
mapped_data = self.featureMapper.mapFeatures(self.getData(), pathways)
fileprefix = os.path.splitext(self.input)[0]
mapped_filepath = self.writeMappedFile(mapped_data, fileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Mapping")
#if crossvalidation is enabled, we also have to map the crossvalidation file
if (utils.getConfigBoolean("Evaluation", "enableCrossEvaluation")):
start = time.time()
#we need to get the cross validation file that had been moved into the intermediate folder
crossValidationPath = utils.getConfigValue("General", "crossVal_preprocessing") + "ready/"
crossValidationFile = utils.getConfigValue("Evaluation", "crossEvaluationData")
crossValFilename = os.path.basename(crossValidationFile)
crossValFilepath = crossValidationPath + crossValFilename
crossValData = pd.read_csv(crossValFilepath, index_col=0)
mapped_crossValData = self.featureMapper.mapFeatures(crossValData, pathways)
crossvalFileprefix = os.path.splitext(crossValFilepath)[0]
crossval_mapped_filepath = self.writeMappedFile(mapped_crossValData, crossvalFileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "CrossValidation Feature Mapping")
overallend = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, overallend, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
############################### FILTER ###############################
class RandomSelector(FeatureSelector):
"""Baseline Selector: Randomly selects any features.
"""
def __init__(self):
super().__init__("Random")
def selectFeatures(self):
"""Randomly select any features from the feature space.
Assigns a score of 0.0 to every feature
:returns: absolute path to the ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outFilename = self.output + filename
#randomly pick any features
with open(self.input, 'r') as infile:
header = infile.readline().rstrip().split(",")
max_index = len(header)
min_index = 2
shuffled_indices = random.sample(range(min_index, max_index), max_index - 2)
with open(outFilename, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
for i in shuffled_indices:
line = "\"" + header[i] + "\"\t\"0.0000\"\n"
outfile.write(line)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outFilename
class AnovaSelector(PythonSelector):
"""Runs ANOVA feature selection using scikit-learn implementation
"""
def __init__(self):
super().__init__("ANOVA")
def runSelector(self, data, labels):
"""Runs the ANOVA feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
#setting k to "all" returns all features
selector = SelectKBest(f_classif, k="all")
selector.fit_transform(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "ANOVA")
return selector
class Variance2Selector(PythonSelector):
"""Runs variance-based feature selection using scikit-learn.
"""
def __init__(self):
super().__init__("Variance")
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
We need to override this method because variance selector has no attribute scores but variances.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = | pd.DataFrame() | pandas.DataFrame |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = | tm.makeTimeDataFrame() | pandas._testing.makeTimeDataFrame |
import io
import json
import glob
import os
from PIL import Image
import xml.etree.ElementTree as ET
import tensorflow as tf
import numpy as np
import cv2
import pandas as pd
class DataAnnotator(object):
def __init__(self, classes):
self.classes = classes # array of class labels
def list_to_csv(self, annotations, outfile):
columns = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = | pd.DataFrame(annotations, columns=columns) | pandas.DataFrame |
import unittest
import pandas as pd
from runpandarun.dataset import Dataset, RESAMPLE_METHODS, RESAMPLE_INTERVALS
from runpandarun.store import Datastore
from runpandarun.storage import Storage
class Test(unittest.TestCase):
def setUp(self):
self.store = Datastore('./example/config.yml')
def test_init(self):
store = self.store
self.assertIn('datastore-testdata', repr(store))
self.assertIn('datastore-testdata', repr(store._storage))
self.assertIn('datastore-testdata', store._storage.backend.get_base_path())
def test_store(self):
store = self.store
self.assertIsInstance(store._storage, Storage)
# updating
store.update()
self.assertIsNotNone(store.last_update)
self.assertIsNotNone(store.last_complete_update)
last_complete_update = store.last_complete_update
store.update()
self.assertGreater(store.last_complete_update, last_complete_update)
def test_store_datasets(self):
store = self.store
self.assertIsInstance(store.datasets, list)
self.assertIsInstance([d for d in store], list)
self.assertEqual(4, len(store.datasets))
dataset = store.datasets[0]
self.assertEqual(getattr(store, 'my_dataset'), dataset)
dataset = store.datasets[1]
self.assertEqual(getattr(store, 'a_local_csv'), dataset)
def test_datasets(self):
dataset = self.store.datasets[0]
self.assertIsInstance(dataset, Dataset)
def test_df(self):
df = self.store.datasets[0].get_df()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual('id', df.index.name)
def test_json(self):
ds = self.store.a_local_json
self.assertTrue(ds.config.dt_index)
df = ds.get_df()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual('date', df.index.name)
def test_dtindex(self):
df = self.store.a_local_csv.get_df()
self.assertIsInstance(df.index, pd.DatetimeIndex)
def test_resampling(self):
ds = self.store.a_local_csv
for interval in RESAMPLE_INTERVALS.keys():
resample = getattr(ds, interval, None)
self.assertIsNotNone(resample)
for method in RESAMPLE_METHODS.keys():
func = getattr(resample, method, None)
self.assertIsNotNone(func)
self.assertTrue(callable(func))
if interval == 'yearly':
df = func()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df), len(df.index.year.unique()))
if method == 'count':
self.assertEqual(df.shape[1], 1)
self.assertEqual(list(df.columns), ['count'])
def test_combine_long(self):
df1 = self.store.a_local_csv.get_df()
df2 = self.store.a_local_json.get_df()
combined = self.store.combined
self.assertSetEqual(set(combined.columns), set(df1.columns))
self.assertEqual(len(df1) + len(df2), len(combined))
self.assertTrue(combined.equals( | pd.concat([df1, df2]) | pandas.concat |
__author__ = '<NAME>'
import zipfile
import re
import pandas as pd
import numpy as np
from .. import data_helper as dh
def test_lahman_download(data_dir):
"""Verify the Lahman Data was downloaded, unzipped and reogranized."""
lahman_dir = data_dir / 'lahman'
raw_dir = lahman_dir / 'raw'
wrangled_dir = lahman_dir / 'wrangled'
assert lahman_dir.is_dir()
assert wrangled_dir.is_dir()
assert raw_dir.is_dir()
# 2 directories and 1 file
assert len(list(lahman_dir.iterdir())) == 3
# zip from master branch of https://github.com/chadwickbureau/baseballdatabank
zipfilename = raw_dir.joinpath('baseballdatabank-master.zip')
assert zipfilename.is_file()
zipped = zipfile.ZipFile(zipfilename)
zip_core_files = [file for file in zipped.namelist()
if file.startswith('baseballdatabank-master/core/') and
file.endswith('.csv')]
# each csv file in the zipfile should be in raw_dir
assert len(list(raw_dir.glob('*.csv'))) == len(zip_core_files)
def test_retrosheet_download(data_dir):
"""Verify the Retrosheet data was downloaded and and unzipped."""
retrosheet_dir = data_dir / 'retrosheet'
raw_dir = retrosheet_dir / 'raw'
wrangled_dir = retrosheet_dir / 'wrangled'
assert retrosheet_dir.is_dir()
assert wrangled_dir.is_dir()
assert raw_dir.is_dir()
teams = raw_dir.glob('TEAM*')
years = sorted([team.name[4:] for team in teams])
for year in years:
zipdata = raw_dir.joinpath(f'{year}eve.zip')
assert zipdata.exists()
# should be same number of files in raw_dir as in zipfile
files = [file for file in raw_dir.glob(f'*{year}*') if not file.name.endswith('.zip')]
zipped = zipfile.ZipFile(zipdata)
assert len(files) == len(zipped.namelist())
def test_download_years(batting):
"""Verify the Retrosheet years 1974 through 2019 inclusive were downloaded.
The data consistency tests have accuracy bounds tested on these years only!"""
assert (batting['year'].agg(['min', 'max']) == (1974, 2019)).all()
assert batting['year'].nunique() == (2019 - 1974) + 1
def test_lahman_people_pkey(lahman_people):
"""Verify the Lahman People primary and foreign keys."""
assert dh.is_unique(lahman_people, ['player_id']) # lahman player id
assert dh.is_unique(lahman_people, ['retro_id'], ignore_null=True) # retrosheet player id
def test_lahman_fielding_pkey(lahman_fielding):
"""Verfiy the Lahman Fielding primary keys."""
assert dh.is_unique(lahman_fielding, ['player_id', 'year', 'stint', 'pos'])
def test_lahman_batting_pkey(lahman_batting):
"""Verify the Lahman Batting primary key."""
assert dh.is_unique(lahman_batting, ['player_id', 'year', 'stint'])
def test_lahman_pitching_pkey(lahman_pitching):
"""Verify the Lahman Pitching primary key."""
assert dh.is_unique(lahman_pitching, ['player_id', 'year', 'stint'])
def test_lahman_salaries_pkey(data_dir):
"""Verify the Lahman Salaries primary key."""
filename = data_dir / 'lahman' / 'wrangled' / 'salaries.csv'
# check for duplicate IDs
salaries = dh.from_csv_with_types(filename)
assert dh.is_unique(salaries, ['player_id', 'year', 'team_id'])
def test_lahman_teams_pkey(lahman_teams):
"""Verify the Lahman Teams primary key."""
assert dh.is_unique(lahman_teams, ['team_id', 'year']) # lahman team_id
assert dh.is_unique(lahman_teams, ['team_id_retro', 'year']) # retrosheet team_id
def test_lahman_parks_pkey(data_dir):
"""Verify the Lahman Parks primary key."""
filename = data_dir / 'lahman' / 'wrangled' / 'parks.csv'
# check for duplicate IDs
parks = dh.from_csv_with_types(filename)
assert dh.is_unique(parks, ['park_key'])
# park_name is not unique
# assert dh.is_unique(parks, ['park_name']
def test_game_id(team_game):
"""Verify 1st 3 characters of game_id are the team batting last."""
filt = team_game['bat_last'] == False
team_game['home_team_id'] = team_game['team_id']
team_game.loc[filt, 'home_team_id'] = team_game.loc[filt, 'opponent_team_id']
assert (team_game['game_id'].str[:3] == team_game['home_team_id']).all()
def test_batting_flags(batting):
"""Verify the batting flags are 0 or 1.
g means in the game in the specified role.
For example, g_pr means in the game as a pinch runner."""
flag_cols = [
'g',
'g_dh',
'g_ph',
'g_pr'
]
assert batting[flag_cols].min().min() == 0
assert batting[flag_cols].max().max() == 1
def test_pitching_flags(pitching):
"""Verify the pitching flags are 0 or 1.
For example:
gs means the pitcher started the game
gf means the pitcher finished the game"""
flag_cols = [
'g',
'gs',
'cg',
'sho',
'gf',
'w',
'l',
'sv'
]
assert pitching[flag_cols].min().min() == 0
assert pitching[flag_cols].max().max() == 1
def test_fielding_flags(fielding):
"""Verify the fielding flags are either 0 or 1."""
flag_cols = [
'g',
'gs'
]
assert fielding[flag_cols].min().min() == 0
assert fielding[flag_cols].max().max() == 1
def test_batting_pkey(batting):
"""Verify the Retrosheet batting primary key."""
assert dh.is_unique(batting, ['player_id', 'game_id'])
def test_pitching_pkey(pitching):
"""Verify the Retrosheet pitching primary key."""
assert dh.is_unique(pitching, ['player_id', 'game_id'])
def test_fielding_pkey(fielding):
"""Verify the Retrosheet fielding primary key."""
assert dh.is_unique(fielding, ['player_id', 'game_id', 'pos'])
def test_team_game_pkey(team_game):
"""Verify the Retrosheet team_game primary key."""
assert dh.is_unique(team_game, ['team_id', 'game_id'])
def test_game_pkey(game):
"""Verify the Retrosheet game primary key."""
assert dh.is_unique(game, ['game_id'])
def test_lahman_retro_batting_data(batting, lahman_batting):
"""Compare Aggregated Lahman batting data to Aggregated Retrosheet batting data"""
# columns in common -- these are the columns to compare
b_cols = set(batting.columns) & set(lahman_batting.columns)
b_cols -= {'player_id', 'team_id', 'year'}
# there are 17 columns in common
assert len(b_cols) == 17
l_batting = lahman_batting[b_cols]
r_batting = batting[b_cols]
l_sums = l_batting.agg('sum').astype(int)
l_sums.sort_index(inplace=True)
r_sums = r_batting.agg('sum').astype(int)
r_sums.sort_index(inplace=True)
# verify all 17 batting attributes
# are within plus/minus 0.01% of each other when summed
assert (np.abs(1.0 - (l_sums / r_sums)) < .0001).all()
def test_lahman_retro_pitching_data(pitching, lahman_pitching):
"""Compare Aggregated Lahman pitching data to Aggregated Retrosheet pitching data"""
# columns in common -- these are the columns to compare
p_cols = set(lahman_pitching.columns) & set(pitching.columns)
p_cols -= {'player_id', 'team_id', 'year'}
# there are 21 columns in common
assert len(p_cols) == 21
l_pitching = lahman_pitching[p_cols]
r_pitching = pitching[p_cols]
l_sums = l_pitching.agg('sum').astype(int)
l_sums.sort_index(inplace=True)
r_sums = r_pitching.agg('sum').astype(int)
r_sums.sort_index(inplace=True)
# verify all values are within plus/minus 0.06% of each other
assert (np.abs(1.0 - (l_sums / r_sums)) < .0006).all()
def test_lahman_retro_fielding_data(fielding, lahman_fielding):
"""Compare Aggregated Lahman fielding per position data to
Aggregated Retrosheet fielding per position data."""
# find the common columns
f_cols = set(lahman_fielding.columns) & set(fielding.columns)
f_cols -= {'player_id', 'pos', 'team_id', 'year'}
f_cols = list(f_cols)
# work-around for Pandas 1.0.1 bugs
# sum does not up-cast for nullable integer types
# select_dtypes does not distinguish between nullable and non-nullable int types
idx = lahman_fielding[f_cols].dtypes.isin([pd.UInt8Dtype(), pd.UInt16Dtype()])
for col in lahman_fielding[f_cols].columns[idx]:
lahman_fielding[col] = lahman_fielding[col].astype('Int32')
l_sums = lahman_fielding.groupby('pos')[f_cols].agg('sum')
l_sums.sort_index(inplace=True)
# there are 7 fielding attributes and 7 fielding positions in Lahman
assert l_sums.shape == (7, 7)
r_sums = fielding.groupby('pos')[f_cols].agg('sum').astype('int')
# Lahman uses OF for sum of LF, CF, RF
r_sums.loc['OF'] = r_sums.loc['LF'] + r_sums.loc['CF'] + r_sums.loc['RF']
r_sums = r_sums.drop(['LF', 'CF', 'RF'])
r_sums.sort_index(inplace=True)
# there are now 7 fielding attributes and 7 fielding positions in Retrosheet sums
assert r_sums.shape == (7, 7)
# the indexes and columns should now be the same
assert l_sums.index.equals(r_sums.index)
assert l_sums.columns.equals(r_sums.columns)
filt = fielding['pos'].isin(['LF', 'CF', 'RF'])
r_of = fielding[filt]
# account for outfielders who played more than 1 outfield position in the same game
total_dups = r_of.duplicated(subset=['player_id', 'game_id'], keep=False).sum()
counted_dups = r_of.duplicated(subset=['player_id', 'game_id'], keep='first').sum()
r_sums.loc['OF', 'g'] -= (total_dups - counted_dups)
rel_accuarcy = l_sums / r_sums
# relative accuracy is within 0.8% for all 49 aggregated values
assert (np.abs(1.0 - rel_accuarcy) < 0.008).all().all()
def test_batting_team_game_data(batting, team_game):
"""Verify Retrosheet batting aggregated by (game_id, team_id)
is the same as team_game batting stats."""
exclude = ['game_id', 'team_id', 'player_id', 'game_start', 'year']
cols = set(batting.columns) & set(team_game.columns) - set(exclude)
cols = list(cols)
assert len(cols) == 17
b = batting[['game_id', 'team_id'] + cols].groupby(['game_id', 'team_id']).agg('sum')
b = b.reset_index().sort_index()
tg = team_game[['game_id', 'team_id'] + cols].sort_values(
['game_id', 'team_id']).reset_index(drop=True)
assert b.equals(tg)
def test_pitching_team_game_data(pitching, team_game):
"""Verify Retrosheet batting aggregated by (game_id, team_id)
is the same as team_game pitching stats
This shows that the two Retrosheet parsers are consistent with one another."""
cols = ['wp', 'bk', 'er']
p = pitching[['game_id', 'team_id'] + cols].groupby(['game_id', 'team_id']).agg('sum')
p = p.reset_index().sort_index()
tg = team_game[['game_id', 'team_id'] + cols].sort_values(
['game_id', 'team_id']).reset_index(drop=True)
assert p.equals(tg)
def test_fielding_team_game_data(fielding, team_game):
"""Verify Retrosheet fielding aggregated by (game_id, team_id)
is the same a team_game fielding stats
This shows that the two Retrosheet parsers are consistent with one another."""
cols = ['a', 'e', 'po', 'pb']
f = fielding[['game_id', 'team_id'] + cols].groupby(['game_id', 'team_id']).agg('sum')
f = f.reset_index().sort_index()
tg = team_game[['game_id', 'team_id'] + cols].sort_values(
['game_id', 'team_id']).reset_index(drop=True)
assert f.equals(tg)
def test_batting_lahman_game_data(batting, lahman_teams):
"""Verify Retrosheet batting aggregated by (year, team_id_lahman)
is the same as Lahman_teams.
This shows that Retrosheet batting and Lahman Teams are consistent with each other."""
# Add team_id_lahman
retro_batting = pd.merge(batting, lahman_teams[['team_id', 'year', 'team_id_retro']],
left_on=['year', 'team_id'],
right_on=['year', 'team_id_retro'],
how='inner', suffixes=['_retrosheet', '_lahman'])
# team_id_retro is now the same as team_id_retrosheet
retro_batting.drop('team_id_retro', axis=1, inplace=True)
pkey = ['year', 'team_id']
compare_cols = set(lahman_teams.columns) & set(retro_batting.columns) - set(pkey)
compare_cols -= {'g'} # cannot sum g by player per team to get g per team
compare_cols -= {'sb', 'cs'} # these stats are close, but don't tie out as well as others
compare_cols = list(compare_cols)
assert len(compare_cols) == 10
retro_batting_sums = retro_batting.groupby(['year', 'team_id_lahman'])[compare_cols].sum().astype('int')
retro_batting_sums.sort_index(inplace=True)
year_min, year_max = retro_batting['year'].aggregate(['min', 'max'])
year_filt = (lahman_teams['year'] >= year_min) & (lahman_teams['year'] <= year_max)
l_teams = lahman_teams.loc[year_filt, pkey + compare_cols]
l_teams = l_teams.set_index(pkey).sort_index()
# verify all 12880 values are within 0.5% of each other
assert np.abs(1.0 - (l_teams / retro_batting_sums)).max().max() < 0.005
def test_attendance_values(game):
"""Verify attendance has plausible values."""
# There was one baseball game in which the public was not allowed to attend.
# This is considered null rather than 0, as people wanted to attend, but were not allowed.
# https://www.baseball-reference.com/boxes/BAL/BAL201504290.shtml
assert game['attendance'].min() > 0
def test_temperature_values(game):
"""Verify temperature has plausible values."""
# http://chadwick.sourceforge.net/doc/cwgame.html#cwtools-cwgame-temperature
assert game['temperature'].min() > 0
def test_wind_speed_values(game):
"""Verify wind speed has plausible values."""
assert game['wind_speed'].min() >= 0
def test_wind_direction_values(game):
"""Verfiy wind direction is in known category."""
# http://chadwick.sourceforge.net/doc/cwgame.html#cwtools-cwgame-winddirection
valid_values = ['to_lf', 'to_cf', 'to_rf', 'l_to_r', 'from_lf', 'from_cf',
'from_rf', 'r_to_l']
assert game['wind_direction'].dropna().isin(valid_values).all()
def test_field_condition_values(game):
"""Verify field condition is in known category."""
# http://chadwick.sourceforge.net/doc/cwgame.html#cwtools-cwgame-fieldcondition
valid_values = ['soaked', 'wet', 'damp', 'dry']
assert game['field_condition'].dropna().isin(valid_values).all()
def test_precip_type_values(game):
"""Verify precipition type is in known category."""
# http://chadwick.sourceforge.net/doc/cwgame.html#cwtools-cwgame-precipitation
valid_values = ['none', 'drizzle', 'showers', 'rain', 'snow']
assert game['precip_type'].dropna().isin(valid_values).all()
def test_sky_condition_values(game):
"""Verify sky condition is in known category."""
# http://chadwick.sourceforge.net/doc/cwgame.html#cwtools-cwgame-sky
valid_values = ['sunny', 'cloudy', 'overcast', 'night', 'dome']
assert game['sky_condition'].dropna().isin(valid_values).all()
def test_game_length_values(game):
"""Verify number of outs is consistent with number of innings."""
outs = game['outs_ct']
inns = game['inn_ct']
# this is defined by the rules of baseball
assert ((5 * inns <= outs) & (outs <= 6 * inns)).all()
def test_game_length_minute_values(game):
"""Verify game length per out is plausible."""
outs = game['outs_ct']
mins = game['minutes_game_ct']
mins_per_out = mins / outs
# these bounds should be wide enough to encompass any future game
assert mins_per_out.min() > 1 and mins_per_out.max() < 6
def test_retro_lahman_batting_players(batting, lahman_people, lahman_batting):
"""Verify all Retrosheet batters are in Lahman batting"""
lahman_batters = pd.merge(lahman_batting['player_id'], lahman_people[['player_id', 'retro_id']])
r_batters = set(batting['player_id'].unique())
l_batters = set(lahman_batters['retro_id'].unique())
assert r_batters == l_batters
def test_retro_lahman_fielding_players(fielding, lahman_people, lahman_fielding):
"""Verify all Retrosheet fielders are in Lahman fielding"""
lahman_fielders = pd.merge(lahman_fielding['player_id'], lahman_people[['player_id', 'retro_id']])
r_fielders = set(fielding['player_id'].unique())
l_fielders = set(lahman_fielders['retro_id'].unique())
# There is one Retrosheet fielder not in Lahman fielding
assert len(r_fielders - l_fielders) == 1
assert len(l_fielders - r_fielders) == 0
missing_fielder = f'{(r_fielders - l_fielders).pop()}'
missing = fielding.query(f'player_id == "{missing_fielder}"')
# The missing fielder had zero fielding total chances.
assert missing['tc'].sum() == 0
# The missing fielder was on the field for no outs.
assert missing['inn_outs'].sum() == 0
def test_retro_lahman_pitching_players(pitching, lahman_pitching, lahman_people):
"""Verify all Retrosheet pitchers are in Lahman pitchers"""
lahman_pitchers = pd.merge(lahman_pitching['player_id'], lahman_people[['player_id', 'retro_id']])
r_pitchers = set(pitching['player_id'].unique())
l_pitchers = set(lahman_pitchers['retro_id'].unique())
assert r_pitchers == l_pitchers
def test_retro_lahman_player_ids(batting, lahman_people):
"""Verify the inverse of Lahman player_id to Retrosheet player_id mapping is valid.
In other words, each Retrosheet player_id is mapped to exactly one Lahman player_id.
Other tests verify that Retrosheet player_ids and Lahman player_ids are unique.
Note: every player who was in a game, has a Retrosheet batting record even if
they had no plate appearances."""
retro_players = pd.Series(batting['player_id'].unique(), name='player_id')
# use an inner join to verify that the mapping is one-to-one and onto
mapping = lahman_people[['player_id', 'retro_id']].merge(
retro_players, how='inner',
left_on=['retro_id'],
right_on=['player_id'],
suffixes=('_lahman', '_retro'))
assert len(retro_players) == len(mapping)
def test_retro_lahman_team_ids(team_game, lahman_teams):
"""Verify the inverse of the Lahman <team_id> to Retroshett <team_id> mapping is valid.
A <team_id> is (team_id, year).
The logic is analogous test_retro_lahman_player_ids() above."""
# create a Retrosheet dataframe having just the unique <team_id> values
retro_team_ids = team_game[['team_id', 'year']].copy()
retro_team_ids = retro_team_ids.drop_duplicates(subset=['team_id', 'year'])
# use an inner join to verify that the mapping is one-to-one and onto
mapping = lahman_teams.merge(retro_team_ids, how='inner',
left_on=['team_id_retro', 'year'],
right_on=['team_id', 'year'])
assert len(retro_team_ids) == len(mapping)
def test_retro_pitching_batting(pitching, batting):
"""Verify Retrosheet batting stats == pitching stats (allowed)"""
exclude = ['game_id', 'team_id', 'player_id', 'g', 'game_start', 'year']
cols = set(pitching.columns) & set(batting.columns) - set(exclude)
cols = list(cols)
assert len(cols) == 16
# sum over all pitchers over all years
p = pitching[cols].agg('sum')
# sum over all batters over all years
b = batting[cols].agg('sum')
# Retrosheet is completely consistent
p.equals(b)
def test_lahman_pitching_batting(lahman_pitching, lahman_batting):
"""Verify Lahman batting stats == pitching stats (allowed)"""
exclude = ['lg_id', 'player_id', 'stint', 'team_id', 'year', 'g']
cols = set(lahman_pitching.columns) & set(lahman_batting.columns)
cols -= set(exclude)
assert len(cols) == 10
# sum over all pitchers over all years
p = lahman_pitching[cols].agg('sum')
# sum over all batters over all years
b = lahman_batting[cols].agg('sum')
# the biggest difference is less than 0.01%
assert np.abs(1.0 - p / b).max() < 0.0001
def test_lahman_batting_teams(lahman_batting, lahman_teams):
"""Verify Lahman batting aggregated to the team level matches Lahman teams."""
exclude = ['lg_id', 'team_id', 'year', 'g']
key = ['team_id', 'year']
cols = set(lahman_batting.columns) & set(lahman_teams.columns) - set(exclude)
cols = list(cols)
assert len(cols) == 12
# work-around for Pandas 1.0.1 bugs
# sum does not up-cast for nullable integer types
# select_dtypes does not distinguish between nullable and non-nullable int types
idx = lahman_batting[cols].dtypes.isin([pd.UInt8Dtype(), pd.UInt16Dtype()])
for col in lahman_batting[cols].columns[idx]:
lahman_batting[col] = lahman_batting[col].astype('Int32')
idx = lahman_teams[cols].dtypes.isin([ | pd.UInt8Dtype() | pandas.UInt8Dtype |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other):
# FIXME: DataFrame fails because when when operating column-wise
# timedelta64 entries become NaT and are treated like datetimes
box = box_df_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi = pd.period_range("2000-12-31", periods=3)
parr = pi.array
result = parr - pi
expected = pi - pi
tm.assert_index_equal(result, expected)
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = pd.Series(
[pd.Period("2015-01-02", freq="D"), pd.Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + pd.Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = pd.Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = pd.Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
expected = pd.Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = pd.Series(
[pd.Period("2015-01-05", freq="D"), pd.Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
expected = pd.Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestPeriodIndexSeriesMethods:
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
tm.assert_equal(result, expected)
ser = pd.Series(values)
result = func(ser)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period("2011-01", freq="M")
off = idx.freq
exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2011-01", freq="M") - idx
exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx")
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize("ng", ["str", 1.5])
@pytest.mark.parametrize(
"func",
[
lambda obj, ng: obj + ng,
lambda obj, ng: ng + obj,
lambda obj, ng: obj - ng,
lambda obj, ng: ng - obj,
lambda obj, ng: np.add(obj, ng),
lambda obj, ng: np.add(ng, obj),
lambda obj, ng: np.subtract(obj, ng),
lambda obj, ng: np.subtract(ng, obj),
],
)
def test_parr_ops_errors(self, ng, func, box_with_array):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
obj = tm.box_expected(idx, box_with_array)
msg = (
r"unsupported operand type\(s\)|can only concatenate|"
r"must be str|object to str implicitly"
)
with pytest.raises(TypeError, match=msg):
func(obj, ng)
def test_pi_ops_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx"
)
expected = PeriodIndex(
["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx"
)
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(
["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(
["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(
["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(
["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(
["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
ser = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
for obj in [idx, ser]:
msg = r"Input has different freq=2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj + pd.offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
pd.offsets.Hour(2) + obj
msg = r"Input has different freq=-2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - pd.Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(idx, | pd.Period("2012-01", freq="M") | pandas.Period |
#!/home/sunnymarkliu/softwares/anaconda3/bin/python
# _*_ coding: utf-8 _*_
"""
@author: SunnyMarkLiu
@time : 17-12-22 下午7:23
"""
from __future__ import absolute_import, division, print_function
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
# remove warnings
import warnings
warnings.filterwarnings('ignore')
import datetime
import numpy as np
import pandas as pd
from pypinyin import lazy_pinyin
from sklearn.preprocessing import LabelEncoder
from conf.configure import Configure
from utils import data_utils
from tqdm import tqdm
def check_last_time_order_info(uid, userid_grouped, flag, check_name, last_time=1):
""" 最近的一次交易的具体信息 check_name """
if flag == 0:
return -1
df = userid_grouped[uid]
if df.shape[0] < last_time:
return -1
else:
return df.iloc[-last_time][check_name]
def pre_days_order_count(uid, userid_grouped, flag, days):
""" 往前 days 的 order 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['days_from_now'] < days]
return df.shape[0]
def pre_days_checkname_diff_count(uid, userid_grouped, flag, days, check_name):
""" 往前 days 的 order 的不同 check_name 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['days_from_now'] < days]
if df.shape[0] == 0:
return 0
else:
return len(df[check_name].unique())
def year_order_count(uid, userid_grouped, flag, year):
""" 2016年的 order 的不同 check_name 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
return df.shape[0]
def year_checkname_diff_count(uid, userid_grouped, flag, year, check_name):
""" year 的 order 的不同 check_name 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
if df.shape[0] == 0:
return 0
else:
return len(df[check_name].unique())
def year_order_month_count(uid, userid_grouped, flag, year):
""" 每年去了几个月份 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
if df.shape[0] == 0:
return 0
else:
return len(df['order_month'].unique())
def year_order_month_most(uid, userid_grouped, flag, year):
""" 每年一个月去的最多的次数 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
df = df.groupby(['order_month']).count()['orderTime'].reset_index()
if df.shape[0] == 0:
return 0
else:
return df['orderTime'].max()
def year_most_order_month(uid, userid_grouped, flag, year):
""" 每年去的最多次数的月份 """
if flag == 0:
return -1
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
df = df.groupby(['order_month']).count()['orderTime'].reset_index()
if df.shape[0] == 0:
return -1
else:
return df.sort_values(by='orderTime', ascending=False)['order_month'].values[0]
def year_good_order_count(uid, userid_grouped, flag, year):
""" 每年精品订单数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
return sum(df['orderType'])
def last_time_checkname_ratio(uid, userid_grouped, flag, check_name):
""" 最后一次 checkname 的占比 """
if flag == 0:
return 0
df = userid_grouped[uid]
last_check_name = df.iloc[-1][check_name]
last_count = df[check_name].tolist().count(last_check_name)
return 1.0 * last_count / df.shape[0]
def build_order_history_features(df, history):
features = pd.DataFrame({'userid': df['userid']})
df_ids = history['userid'].unique()
userid_grouped = dict(list(history.groupby('userid')))
#给trade表打标签,若id在login表中,则打标签为1,否则为0
features['has_history_flag'] = features['userid'].map(lambda uid: uid in df_ids).astype(int)
print("基本特征")
# build_order_history_features2 函数中提交提取,冗余
# 最近的一次交易的 orderType
# features['last_time_orderType'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'orderType', 1), axis=1)
# 倒数第二个 orderType
# features['last_2_time_orderType'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'orderType', 2), axis=1)
# features['last_3_time_orderType'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'orderType',3), axis=1)
# 倒数第二次距离现在的时间
# features['last_2_time_days_from_now'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'days_from_now', 2), axis=1)
# features['last_3_time_days_from_now'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'days_from_now', 3), axis=1)
# 最近的一次交易的 days_from_now, order_year, order_month, order_day, order_weekofyear, order_weekday
features['last_time_days_from_now'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'days_from_now'), axis=1)
features['last_time_order_year'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_year'), axis=1)
features['last_time_order_month'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_month'), axis=1)
features['last_time_order_day'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_day'), axis=1)
features['last_time_order_weekofyear'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekofyear'), axis=1)
features['last_time_order_weekday'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekday'), axis=1)
features['last_time_continent'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'continent'), axis=1)
features['last_time_country'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'country'), axis=1)
features['last_time_city'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'city'), axis=1)
print("计数特征")
# 往前 90days 的计数特征
features['pre_90days_order_count'] = features.apply(lambda row: pre_days_order_count(row['userid'], userid_grouped, row['has_history_flag'], 90), axis=1)
features['pre_90days_order_continent_count'] = features.apply(lambda row: pre_days_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 90, 'continent'), axis=1)
features['pre_90days_order_country_count'] = features.apply(lambda row: pre_days_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 90, 'country'), axis=1)
features['pre_90days_order_city_count'] = features.apply(lambda row: pre_days_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 90, 'city'), axis=1)
features['2016_order_count'] = features.apply(lambda row: year_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
features['2017_order_count'] = features.apply(lambda row: year_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
# features['order_count_diff'] = features['2016_order_count'] - features['2017_order_count']
# features['2016_order_continent_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2016, 'continent'), axis=1)
# features['2016_order_country_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2016, 'country'), axis=1)
# features['2016_order_city_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2016, 'city'), axis=1)
features['2017_order_continent_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2017, 'continent'), axis=1)
features['2017_order_country_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2017, 'country'), axis=1)
features['2017_order_city_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2017, 'city'), axis=1)
# 是否 2016 年和 2017 年都有 order
features['both_year_has_order'] = features.apply(lambda row: (row['2016_order_count'] > 0) & (row['2017_order_count'] > 0), axis=1).astype(int)
# 每年去了几个月份
features['2016_order_month_count'] = features.apply(lambda row: year_order_month_count(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
features['2017_order_month_count'] = features.apply(lambda row: year_order_month_count(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
# 每年一个月去的最多的次数
# features['2016_order_month_most'] = features.apply(lambda row: year_order_month_most(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
# features['2017_most_order_month'] = features.apply(lambda row: year_order_month_most(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
# 每年去的最多的月份
# features['2016_most_order_month'] = features.apply(lambda row: year_most_order_month(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
# features['2017_most_order_month'] = features.apply(lambda row: year_most_order_month(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
print('比率特征')
# 用户总订单数、精品订单数、精品订单比例
features['2016_good_order_count'] = features.apply(lambda row: year_good_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
features['2016_good_order_ratio'] = features.apply(lambda row: row['2016_good_order_count'] / row['2016_order_count'] if row['2016_order_count'] != 0 else 0, axis=1)
features['2017_good_order_count'] = features.apply(lambda row: year_good_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
features['2017_good_order_ratio'] = features.apply(lambda row: row['2017_good_order_count'] / row['2017_order_count'] if row['2017_order_count'] != 0 else 0, axis=1)
features['total_order_count'] = features['2016_order_count'] + features['2017_order_count']
features['total_good_order_count'] = features['2016_good_order_count'] + features['2017_good_order_count']
features['total_good_order_ratio'] = features.apply(lambda row: row['total_good_order_count'] / row['total_order_count'] if row['total_order_count'] != 0 else 0, axis=1)
# has_good_order 强特!!
features['has_good_order'] = (features['total_good_order_ratio'] > 0).astype(int)
features.drop(['2016_good_order_count', '2017_good_order_count', 'total_order_count', 'total_good_order_count'], axis=1, inplace=True)
# cv 变差一点点,不到1个万分点
# print('最后一次 order 的 check_name 的占比') #(未测试)
# features['last_time_order_year_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_year'), axis=1)
# features['last_time_order_month_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_month'), axis=1)
# features['last_time_order_day_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_day'), axis=1)
# features['last_time_order_weekofyear_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekofyear'), axis=1)
# features['last_time_order_weekday_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekday'), axis=1)
# features['last_time_continent_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'continent'), axis=1)
# features['last_time_country_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'country'), axis=1)
# features['last_time_city_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'city'), axis=1)
return features
def order_last_num(order):
""" 按时间倒序对订单排序 """
users = list(set(order['userid']))
order_c = order.copy()
order_c['order_number'] = 1
for i in tqdm(range(len(users))):
slit_df = order_c[order_c['userid'] == users[i]]
order_c.loc[slit_df.index, 'order_number'] = range(slit_df.shape[0],0,-1)
return order_c
def days_since_prior_order(order):
""" 用户两次订单之间的时间间隔 """
users = list(set(order['userid']))
order_c = order.copy()
order_c['days_since_prior_order'] = np.nan
for i in tqdm(range(len(users))):
slit_df = order_c[order_c['userid'] == users[i]]
time_shift = slit_df['orderTime'].shift(1)
time_series = pd.Series(slit_df['orderTime'].values - time_shift.values).map(lambda x: x/np.timedelta64(1, 's'))/(24*3600.0)
order_c.loc[slit_df.index, 'days_since_prior_order'] = time_series.values
return order_c
def build_time_category_encode(history):
history['orderTime'] = | pd.to_datetime(history['orderTime'], unit='s') | pandas.to_datetime |
### keras playground 2.0 LSTM (univariate)###
import os
import pickle
import pandas as pd
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, BatchNormalization, Dropout, \
LeakyReLU, LSTM
from keras import losses
from keras import backend as K
import matplotlib.pyplot as plt
from sklearn import preprocessing
# set working dir
os.chdir("C:/Users/peterpiontek/Google Drive/tensor-flow-state/tensor-flow-state")
# import homebrew
# directories
datadir = "./data/"
plotdir = "./plots/"
# read data
df = pd.read_pickle(datadir + '3months_weather.pkl')
# create train and test df
train_data = df[df['datetime'] < '2019-08-01']
test_data = df[df['datetime'] > '2019-07-31']
# define features and target
features = ['minute_sine', 'minute_cosine', 'hour_sine', 'hour_cosine', 'monday',
'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday',
'weekend', 'holiday', 'speed', 'windspeed_avg', 'windspeed_max',
'temperature', 'sunduration', 'sunradiation', 'precipitationduration',
'precipitation', 'airpressure', 'relativehumidity', 'mist', 'rain',
'snow', 'storm', 'ice']
target = ['flow']
# create train and test sets
X_train, y_train, X_test, y_test = train_data[features], train_data[target], \
test_data[features],test_data[target]
# rescale features
mm_xscaler = preprocessing.MinMaxScaler()
X_train_minmax = mm_xscaler.fit_transform(X_train)
X_test_minmax = mm_xscaler.transform(X_test)
# rescale target
mm_yscaler = preprocessing.MinMaxScaler()
y_train_minmax = mm_yscaler.fit_transform(y_train)
y_test_minmax = mm_yscaler.transform(y_test)
# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# choose a number of time steps
n_steps = 5
# SHAPE TRAIN DATA
# split into samples
X, y = split_sequence(y_train_minmax, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
# SHAPE TEST DATA
# split into samples
Xt, yt = split_sequence(y_test_minmax, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
Xt = Xt.reshape((Xt.shape[0], Xt.shape[1], n_features))
### SET UP MODEL ###
model = Sequential([
LSTM(128, return_sequences=True, input_shape=(n_steps, n_features)),
# LeakyReLU(alpha=0.1),
Activation('relu'),
LSTM(128),
Activation('relu'),
Dense(1),
# Activation('linear')
])
# summarize model to get an overview
model.summary()
# rmse func
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
# compile model
model.compile(optimizer = 'adam', loss = 'mse', metrics = [rmse])
## fit model to training features and target(s)
history = model.fit(X, y, epochs=3, verbose=1)
# predict
preds = pd.DataFrame()
# predict and add directly to new df
preds['prediction'] = mm_yscaler.inverse_transform(model.predict(Xt)).ravel()
# reset index of y_test so it can be recombined with predict df, then add it
y_test.reset_index(inplace=True, drop=True)
preds['y_true'] = y_test['flow']
# calculate difference
preds['difference'] = preds.prediction - preds.y_true
RMSE = ((preds.y_true - preds.prediction) ** 2).mean() ** 0.5
print("Test data RMSE:", RMSE)
# "predict" train set
preds_train = | pd.DataFrame() | pandas.DataFrame |
import collections
import os
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from datetime import datetime, timedelta
from typing import Tuple, Dict, Union
import pytz
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import (
is_any_elem_in_a_lst,
unzip_nested_zip,
hydro_logger,
download_one_zip,
download_small_file,
)
class Gages(DataSourceBase):
def __init__(self, data_path, download=False):
super().__init__(data_path)
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.gages_sites = self.read_site_info()
def get_name(self):
return "GAGES"
def get_constant_cols(self) -> np.array:
"""all readable attrs in GAGES-II"""
dir_gage_attr = self.data_source_description["GAGES_ATTR_DIR"]
var_desc_file = os.path.join(dir_gage_attr, "variable_descriptions.txt")
var_desc = pd.read_csv(var_desc_file)
return var_desc["VARIABLE_NAME"].values
def get_relevant_cols(self):
return np.array(["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"])
def get_target_cols(self):
return np.array(["usgsFlow"])
def get_other_cols(self) -> dict:
return {
"FDC": {"time_range": ["1980-01-01", "2000-01-01"], "quantile_num": 100}
}
def set_data_source_describe(self):
gages_db = self.data_source_dir
# region shapefiles
gage_region_dir = os.path.join(
gages_db,
"boundaries_shapefiles_by_aggeco",
"boundaries-shapefiles-by-aggeco",
)
gages_regions = [
"bas_ref_all",
"bas_nonref_CntlPlains",
"bas_nonref_EastHghlnds",
"bas_nonref_MxWdShld",
"bas_nonref_NorthEast",
"bas_nonref_SECstPlain",
"bas_nonref_SEPlains",
"bas_nonref_WestMnts",
"bas_nonref_WestPlains",
"bas_nonref_WestXeric",
]
# point shapefile
gagesii_points_file = os.path.join(
gages_db, "gagesII_9322_point_shapefile", "gagesII_9322_sept30_2011.shp"
)
# config of flow data
flow_dir = os.path.join(gages_db, "gages_streamflow", "gages_streamflow")
# forcing
forcing_dir = os.path.join(gages_db, "basin_mean_forcing", "basin_mean_forcing")
forcing_types = ["daymet"]
# attr
attr_dir = os.path.join(
gages_db, "basinchar_and_report_sept_2011", "spreadsheets-in-csv-format"
)
gauge_id_file = os.path.join(attr_dir, "conterm_basinid.txt")
download_url_lst = [
"https://water.usgs.gov/GIS/dsdl/basinchar_and_report_sept_2011.zip",
"https://water.usgs.gov/GIS/dsdl/gagesII_9322_point_shapefile.zip",
"https://water.usgs.gov/GIS/dsdl/boundaries_shapefiles_by_aggeco.zip",
"https://www.sciencebase.gov/catalog/file/get/59692a64e4b0d1f9f05fbd39",
]
usgs_streamflow_url = "https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no={}&referred_module=sw&period=&begin_date={}-{}-{}&end_date={}-{}-{}"
# GAGES-II time series data_source dir
gagests_dir = os.path.join(gages_db, "59692a64e4b0d1f9f05f")
population_file = os.path.join(
gagests_dir,
"Dataset8_Population-Housing",
"Dataset8_Population-Housing",
"PopulationHousing.txt",
)
wateruse_file = os.path.join(
gagests_dir,
"Dataset10_WaterUse",
"Dataset10_WaterUse",
"WaterUse_1985-2010.txt",
)
return collections.OrderedDict(
GAGES_DIR=gages_db,
GAGES_FLOW_DIR=flow_dir,
GAGES_FORCING_DIR=forcing_dir,
GAGES_FORCING_TYPE=forcing_types,
GAGES_ATTR_DIR=attr_dir,
GAGES_GAUGE_FILE=gauge_id_file,
GAGES_DOWNLOAD_URL_LST=download_url_lst,
GAGES_REGIONS_SHP_DIR=gage_region_dir,
GAGES_REGION_LIST=gages_regions,
GAGES_POINT_SHP_FILE=gagesii_points_file,
GAGES_POPULATION_FILE=population_file,
GAGES_WATERUSE_FILE=wateruse_file,
USGS_FLOW_URL=usgs_streamflow_url,
)
def read_other_cols(self, object_ids=None, other_cols=None, **kwargs) -> dict:
# TODO: not finish
out_dict = {}
for key, value in other_cols.items():
if key == "FDC":
assert "time_range" in value.keys()
if "quantile_num" in value.keys():
quantile_num = value["quantile_num"]
out = cal_fdc(
self.read_target_cols(
object_ids, value["time_range"], "usgsFlow"
),
quantile_num=quantile_num,
)
else:
out = cal_fdc(
self.read_target_cols(
object_ids, value["time_range"], "usgsFlow"
)
)
else:
raise NotImplementedError("No this item yet!!")
out_dict[key] = out
return out_dict
def read_attr_all(self, gages_ids: Union[list, np.ndarray]):
"""
read all attr data for some sites in GAGES-II
TODO: now it is not same as functions in CAMELS where read_attr_all has no "gages_ids" parameter
Parameters
----------
gages_ids : Union[list, np.ndarray]
gages sites' ids
Returns
-------
ndarray
all attr data for gages_ids
"""
dir_gage_attr = self.data_source_description["GAGES_ATTR_DIR"]
f_dict = dict() # factorize dict
# each key-value pair for atts in a file (list)
var_dict = dict()
# all attrs
var_lst = list()
out_lst = list()
# read all attrs
var_des = pd.read_csv(
os.path.join(dir_gage_attr, "variable_descriptions.txt"), sep=","
)
var_des_map_values = var_des["VARIABLE_TYPE"].tolist()
for i in range(len(var_des)):
var_des_map_values[i] = var_des_map_values[i].lower()
# sort by type
key_lst = list(set(var_des_map_values))
key_lst.sort(key=var_des_map_values.index)
# remove x_region_names
key_lst.remove("x_region_names")
for key in key_lst:
# in "spreadsheets-in-csv-format" directory, the name of "flow_record" file is conterm_flowrec.txt
if key == "flow_record":
key = "flowrec"
data_file = os.path.join(dir_gage_attr, "conterm_" + key + ".txt")
# remove some unused atttrs in bas_classif
if key == "bas_classif":
# https://stackoverflow.com/questions/22216076/unicodedecodeerror-utf8-codec-cant-decode-byte-0xa5-in-position-0-invalid-s
data_temp = pd.read_csv(
data_file,
sep=",",
dtype={"STAID": str},
usecols=range(0, 4),
encoding="unicode_escape",
)
else:
data_temp = pd.read_csv(data_file, sep=",", dtype={"STAID": str})
if key == "flowrec":
# remove final column which is nan
data_temp = data_temp.iloc[:, range(0, data_temp.shape[1] - 1)]
# all attrs in files
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
k = 0
n_gage = len(gages_ids)
out_temp = np.full(
[n_gage, len(var_lst_temp)], np.nan
) # 1d:sites,2d: attrs in current data_file
# sites intersection,ind2 is the index of sites in conterm_ files,set them in out_temp
range1 = gages_ids
range2 = data_temp.iloc[:, 0].astype(str).tolist()
assert all(x < y for x, y in zip(range2, range2[1:]))
# Notice the sequence of station ids ! Some id_lst_all are not sorted, so don't use np.intersect1d
ind2 = [range2.index(tmp) for tmp in range1]
for field in var_lst_temp:
if is_string_dtype(data_temp[field]): # str vars -> categorical vars
value, ref = pd.factorize(data_temp.loc[ind2, field], sort=True)
out_temp[:, k] = value
f_dict[field] = ref.tolist()
elif is_numeric_dtype(data_temp[field]):
out_temp[:, k] = data_temp.loc[ind2, field].values
k = k + 1
out_lst.append(out_temp)
out = np.concatenate(out_lst, 1)
return out, var_lst, var_dict, f_dict
def read_constant_cols(
self, object_ids=None, constant_cols: list = None, **kwargs
) -> np.array:
"""
read some attrs of some sites
Parameters
----------
object_ids : [type], optional
sites_ids, by default None
constant_cols : list, optional
attrs' names, by default None
Returns
-------
np.array
attr data for object_ids
"""
# assert all(x < y for x, y in zip(object_ids, object_ids[1:]))
attr_all, var_lst_all, var_dict, f_dict = self.read_attr_all(object_ids)
ind_var = list()
for var in constant_cols:
ind_var.append(var_lst_all.index(var))
out = attr_all[:, ind_var]
return out
def read_attr_origin(self, gages_ids, attr_lst) -> np.ndarray:
"""
this function read the attrs data in GAGES-II but not transform them to int when they are str
Parameters
----------
gages_ids : [type]
[description]
attr_lst : [type]
[description]
Returns
-------
np.ndarray
the first dim is types of attrs, and the second one is sites
"""
dir_gage_attr = self.data_source_description["GAGES_ATTR_DIR"]
var_des = pd.read_csv(
os.path.join(dir_gage_attr, "variable_descriptions.txt"), sep=","
)
var_des_map_values = var_des["VARIABLE_TYPE"].tolist()
for i in range(len(var_des)):
var_des_map_values[i] = var_des_map_values[i].lower()
key_lst = list(set(var_des_map_values))
key_lst.sort(key=var_des_map_values.index)
key_lst.remove("x_region_names")
out_lst = []
for i in range(len(attr_lst)):
out_lst.append([])
range1 = gages_ids
gage_id_file = self.data_source_description["GAGES_GAUGE_FILE"]
data_all = | pd.read_csv(gage_id_file, sep=",", dtype={0: str}) | pandas.read_csv |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = | pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import datetime as dt
import exchange_calendars
from dataclasses import dataclass, fields
from dateutil.tz import tzlocal, gettz
from dateutil.tz.tz import tzfile
from trader.common.logging_helper import setup_logging
logging = setup_logging(module_name='data')
from pandas.core.base import PandasObject
from arctic import Arctic, TICK_STORE, VERSION_STORE
from arctic.date import DateRange, string_to_daterange
from arctic.tickstore.tickstore import TickStore
from arctic.store.version_store import VersionStore
from arctic.exceptions import NoDataFoundException
from aioreactive.subject import AsyncMultiSubject
from aioreactive import AsyncObserver
from expression.system.disposable import Disposable, AsyncDisposable
from typing import Tuple, List, Optional, Dict, TypeVar, Generic, Type, Union, cast, Set
from durations import Duration
from exchange_calendars import ExchangeCalendar
from pandas import DatetimeIndex
from ib_insync.contract import Contract, ContractDetails
from ib_insync.objects import BarData, RealTimeBar
from trader.common.helpers import dateify, daily_close, daily_open, market_hours, get_contract_from_csv, symbol_to_contract
from trader.data.contract_metadata import ContractMetadata
from trader.data.data_access import Data, SecurityDefinition, TickData, DictData
from trader.data.universe import Universe, UniverseAccessor
from trader.listeners.ibaiorx import IBAIORx
from trader.listeners.ib_history_worker import IBHistoryWorker, WhatToShow
from ib_insync.ib import IB
class SecurityDataStream(AsyncMultiSubject[pd.DataFrame]):
def __init__(
self,
security: SecurityDefinition,
bar_size: str,
date_range: DateRange,
existing_data: Optional[pd.DataFrame] = None):
super().__init__()
self.security = security
self.columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'average', 'bar_count', 'bar_size']
self.date_range: DateRange
self.df: pd.DataFrame = | pd.DataFrame([], columns=self.columns) | pandas.DataFrame |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import time
from os.path import abspath, dirname, join
from unittest import TestCase
import typing
import re
import functools
import itertools
import pathlib
from collections import abc
import pytest
import numpy as np
import pandas as pd
import pandas.testing as tm
from pandas import Timedelta, read_csv
from parameterized import parameterized
import pytz
from pytz import UTC
from toolz import concat
from exchange_calendars import get_calendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_aliases,
_default_calendar_factories,
)
from exchange_calendars.errors import (
CalendarNameCollision,
InvalidCalendarName,
NoSessionsError,
)
from exchange_calendars.exchange_calendar import ExchangeCalendar, days_at_time
from .test_utils import T
class FakeCalendar(ExchangeCalendar):
name = "DMY"
tz = "Asia/Ulaanbaatar"
open_times = ((None, time(11, 13)),)
close_times = ((None, time(11, 49)),)
class CalendarRegistrationTestCase(TestCase):
def setup_method(self, method):
self.dummy_cal_type = FakeCalendar
self.dispatcher = ExchangeCalendarDispatcher({}, {}, {})
def teardown_method(self, method):
self.dispatcher.clear_calendars()
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
self.dispatcher.register_calendar("DMY", dummy_cal)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
# Deregister the calendar and ensure that it is removed
self.dispatcher.deregister_calendar("DMY")
with self.assertRaises(InvalidCalendarName):
self.dispatcher.get_calendar("DMY")
def test_register_calendar_type(self):
self.dispatcher.register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
self.dispatcher.register_calendar("DMY", dummy_cal)
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
self.dispatcher.deregister_calendar("DMY")
# if type is registered, can't register instance with same name
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
def test_force_registration(self):
self.dispatcher.register_calendar("DMY", self.dummy_cal_type())
first_dummy = self.dispatcher.get_calendar("DMY")
# force-register a new instance
self.dispatcher.register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = self.dispatcher.get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
# These are ordered aliases first, so that we can deregister the
# canonical factories when we're done with them, and we'll be done with
# them after they've been used by all aliases and by canonical name.
for name in concat([_default_calendar_aliases, _default_calendar_factories]):
self.assertIsNotNone(
dispatcher.get_calendar(name), "get_calendar(%r) returned None" % name
)
dispatcher.deregister_calendar(name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand(
[
# NYSE standard day
(
"2016-07-19",
0,
time(9, 31),
pytz.timezone("America/New_York"),
"2016-07-19 9:31",
),
# CME standard day
(
"2016-07-19",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2016-07-18 17:01",
),
# CME day after DST start
(
"2004-04-05",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2004-04-04 17:01",
),
# ICE day after DST start
(
"1990-04-02",
-1,
time(19, 1),
pytz.timezone("America/Chicago"),
"1990-04-01 19:01",
),
]
)
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert(UTC)
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
# Affects test_start_bound. Should be set to earliest date for which
# calendar can be instantiated, or None if no start bound.
START_BOUND: pd.Timestamp | None = None
# Affects test_end_bound. Should be set to latest date for which
# calendar can be instantiated, or None if no end bound.
END_BOUND: pd.Timestamp | None = None
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_for_breaks. True if one or more calendar sessions has a
# break.
HAVE_BREAKS = False
# Affects test_session_has_break.
SESSION_WITH_BREAK = None # None if no session has a break
SESSION_WITHOUT_BREAK = T("2011-06-15") # None if all sessions have breaks
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2011-01-04", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-04", tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp("2010-01-03", tz=UTC)
TEST_START_END_LAST = pd.Timestamp("2010-01-10", tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp("2010-01-04", tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp("2010-01-08", tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
"./resources",
filename + ".csv",
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC),
)
@classmethod
def setup_class(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(1, "T")
cls.one_hour = pd.Timedelta(1, "H")
cls.one_day = pd.Timedelta(1, "D")
cls.today = pd.Timestamp.now(tz="UTC").floor("D")
@classmethod
def teardown_class(cls):
cls.calendar = None
cls.answers = None
def test_bound_start(self):
if self.START_BOUND is not None:
cal = self.calendar_class(self.START_BOUND, self.today)
self.assertIsInstance(cal, ExchangeCalendar)
start = self.START_BOUND - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
self.calendar_class(start, self.today)
else:
# verify no bound imposed
cal = self.calendar_class(pd.Timestamp("1902-01-01", tz="UTC"), self.today)
self.assertIsInstance(cal, ExchangeCalendar)
def test_bound_end(self):
if self.END_BOUND is not None:
cal = self.calendar_class(self.today, self.END_BOUND)
self.assertIsInstance(cal, ExchangeCalendar)
end = self.END_BOUND + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
self.calendar_class(self.today, end)
else:
# verify no bound imposed
cal = self.calendar_class(self.today, pd.Timestamp("2050-01-01", tz="UTC"))
self.assertIsInstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
tm.assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_adhoc_holidays_specification(self):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(self.calendar.adhoc_holidays)
assert dti.tz is None
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
m = self.calendar.is_open_on_minute
for market_minute in self.answers.market_open[1:]:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(m(market_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(m(pre_market, _parse=False))
for market_minute in self.answers.market_close[:-1]:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(m(close_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(m(post_market, _parse=False))
def _verify_minute(
self,
calendar,
minute,
next_open_answer,
prev_open_answer,
next_close_answer,
prev_close_answer,
):
next_open = calendar.next_open(minute, _parse=False)
self.assertEqual(next_open, next_open_answer)
prev_open = self.calendar.previous_open(minute, _parse=False)
self.assertEqual(prev_open, prev_open_answer)
next_close = self.calendar.next_close(minute, _parse=False)
self.assertEqual(next_close, next_close_answer)
prev_close = self.calendar.previous_close(minute, _parse=False)
self.assertEqual(prev_close, prev_close_answer)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (
open_minute,
close_minute,
previous_open,
previous_close,
next_open,
next_close,
) in zip(
opens, closes, previous_opens, previous_closes, next_opens, next_closes
):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
minute_before_open,
open_minute,
previous_open,
close_minute,
previous_close,
)
# open minute
self._verify_minute(
self.calendar,
open_minute,
next_open,
previous_open,
close_minute,
previous_close,
)
# second minute of session
self._verify_minute(
self.calendar,
open_minute + self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# minute before the close
self._verify_minute(
self.calendar,
close_minute - self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# the close
self._verify_minute(
self.calendar,
close_minute,
next_open,
open_minute,
next_close,
previous_close,
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
close_minute + self.one_minute,
next_open,
open_minute,
next_close,
close_minute,
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2], self.calendar.next_minute(minute, _parse=False)
)
self.assertEqual(
all_minutes[idx], self.calendar.previous_minute(minute, _parse=False)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open, _parse=False),
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close, _parse=False),
)
def test_date_to_session_label(self):
m = self.calendar.date_to_session_label
sessions = self.answers.index[:30] # first 30 sessions
# test for error if request session prior to first calendar session.
date = self.answers.index[0] - self.one_day
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{self.answers.index[0]}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "previous", _parse=False)
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
last_session = None
for date in dates:
session_label = m(date, "previous", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date in dates.sort_values(ascending=False):
session_label = m(date, "next", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
date = self.answers.index[-1] + self.one_day
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{self.answers.index[-1]}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "next", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
not_sessions = dates[~dates.isin(sessions)][:5]
for not_session in not_sessions:
error_msg = (
f"`date` '{not_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "none", _parse=False)
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, _parse=False)
# non-valid direction (can only be thrown if gaps between sessions)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "not a direction", _parse=False)
def test_minute_to_session_label(self):
m = self.calendar.minute_to_session_label
# minute is prior to first session's open
minute_before_first_open = self.answers.iloc[0].market_open - self.one_minute
session_label = self.answers.index[0]
minutes_that_resolve_to_this_session = [
m(minute_before_first_open, _parse=False),
m(minute_before_first_open, direction="next", _parse=False),
]
unique_session_labels = set(minutes_that_resolve_to_this_session)
self.assertTrue(len(unique_session_labels) == 1)
self.assertIn(session_label, unique_session_labels)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="previous", _parse=False)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="none", _parse=False)
# minute is between first session's open and last session's close
for idx, (session_label, open_minute, close_minute, _, _) in enumerate(
self.answers.iloc[1:-2].itertuples(name=None)
):
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.index[idx + 2]
previous_session_label = self.answers.index[idx]
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
m(open_minute, _parse=False),
m(open_minute, direction="next", _parse=False),
m(open_minute, direction="previous", _parse=False),
m(open_minute, direction="none", _parse=False),
m(hour_into_session, _parse=False),
m(hour_into_session, direction="next", _parse=False),
m(hour_into_session, direction="previous", _parse=False),
m(hour_into_session, direction="none", _parse=False),
m(close_minute),
m(close_minute, direction="next", _parse=False),
m(close_minute, direction="previous", _parse=False),
m(close_minute, direction="none", _parse=False),
session_label,
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
m(minute_before_session, _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_before_session, direction="next", _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_after_session, direction="previous", _parse=False)
)
self.assertTrue(
all(
x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session
)
)
minutes_that_resolve_to_next_session = [
m(minute_after_session, _parse=False),
m(minute_after_session, direction="next", _parse=False),
next_session_label,
]
self.assertTrue(
all(
x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session
)
)
self.assertEqual(
m(minute_before_session, direction="previous", _parse=False),
previous_session_label,
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
m(minute_after_session, direction="next", _parse=False),
m(minute_after_session, direction="previous", _parse=False),
m(minute_after_session, direction="next", _parse=False),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label, session_label, next_session_label],
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
m(open_minute, "asdf", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
m(minute_before_session, direction="none", _parse=False)
# minute is later than last session's close
minute_after_last_close = self.answers.iloc[-1].market_close + self.one_minute
session_label = self.answers.index[-1]
minute_that_resolves_to_session_label = m(
minute_after_last_close, direction="previous", _parse=False
)
self.assertEqual(session_label, minute_that_resolves_to_session_label)
with self.assertRaises(ValueError):
m(minute_after_last_close, _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="next", _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="none", _parse=False)
@parameterized.expand(
[
(1, 0),
(2, 0),
(2, 1),
]
)
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(minutes.map(self.calendar.minute_to_session_label)),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label, _parse=False)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label, _parse=False),
session_labels[idx + 1],
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label, _parse=False),
session_labels[idx - 1],
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label, _parse=False)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(full_session_label)
_break_start, _break_end = self.calendar.break_start_and_end_for_session(
full_session_label
)
if not pd.isnull(_break_start):
constructed_minutes = np.concatenate(
[
pd.date_range(start=_open, end=_break_start, freq="min"),
pd.date_range(start=_break_end, end=_close, freq="min"),
]
)
else:
constructed_minutes = pd.date_range(start=_open, end=_close, freq="min")
np.testing.assert_array_equal(
minutes,
constructed_minutes,
)
# early close period
if self.HAVE_EARLY_CLOSES:
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = self.calendar.minutes_for_session(
early_close_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min"),
)
# late open period
if self.HAVE_LATE_OPENS:
late_open_session_label = self.calendar.late_opens[0]
minutes_for_late_open = self.calendar.minutes_for_session(
late_open_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
late_open_session_label
)
np.testing.assert_array_equal(
minutes_for_late_open,
pd.date_range(start=_open, end=_close, freq="min"),
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = self.calendar.schedule.index[first_idx : second_idx + 1]
rtrn = self.calendar.sessions_in_range(
first_session_label, second_session_label, _parse=False
)
np.testing.assert_array_equal(answer_key, rtrn)
def get_session_block(self):
"""
Get an "interesting" range of three sessions in a row. By default this
tries to find and return a (full session, early close session, full
session) block.
"""
if not self.HAVE_EARLY_CLOSES:
# If we don't have any early closes, just return a "random" chunk
# of three sessions.
return self.calendar.all_sessions[10:13]
shortened_session = self.calendar.early_closes[0]
shortened_session_idx = self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[shortened_session_idx - 1]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self.get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(sessions[0])
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = self.calendar.open_and_close_for_session(
sessions[1]
)
last_open, last_close = self.calendar.open_and_close_for_session(sessions[-1])
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(first_open, last_close, _parse=False)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open, minute_after_last_close, _parse=False
)
if self.GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
(
first_break_start,
first_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[0])
(
middle_break_start,
middle_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[1])
(
last_break_start,
last_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[-1])
intervals = [
(first_open, first_break_start, first_break_end, first_close),
(middle_open, middle_break_start, middle_break_end, middle_close),
(last_open, last_break_start, last_break_end, last_close),
]
all_minutes = []
for _open, _break_start, _break_end, _close in intervals:
if pd.isnull(_break_start):
all_minutes.append(
pd.date_range(start=_open, end=_close, freq="min"),
)
else:
all_minutes.append(
pd.date_range(start=_open, end=_break_start, freq="min"),
)
all_minutes.append(
pd.date_range(start=_break_end, end=_close, freq="min"),
)
all_minutes = np.concatenate(all_minutes)
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self.get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(sessions[0], sessions[-1])
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate(
[session0_minutes.values, session1_minutes.values, session2_minutes.values]
)
np.testing.assert_array_equal(concatenated_minutes, minutes.values)
def test_sessions_window(self):
sessions = self.get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1, _parse=False),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1], -1 * (len(sessions) - 1), _parse=False
),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
def test_session_distance(self):
sessions = self.get_session_block()
forward_distance = self.calendar.session_distance(
sessions[0],
sessions[-1],
_parse=False,
)
self.assertEqual(forward_distance, len(sessions))
backward_distance = self.calendar.session_distance(
sessions[-1],
sessions[0],
_parse=False,
)
self.assertEqual(backward_distance, -len(sessions))
one_day_distance = self.calendar.session_distance(
sessions[0],
sessions[0],
_parse=False,
)
self.assertEqual(one_day_distance, 1)
def test_open_and_close_for_session(self):
for session_label, open_answer, close_answer, _, _ in self.answers.itertuples(
name=None
):
found_open, found_close = self.calendar.open_and_close_for_session(
session_label, _parse=False
)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label, _parse=False)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label, _parse=False)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_opens.index.freq = None
tm.assert_series_equal(found_opens, self.answers["market_open"])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_closes.index.freq = None
tm.assert_series_equal(found_closes, self.answers["market_close"])
def test_daylight_savings(self):
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
m = dict(self.calendar.open_times)
m[pd.Timestamp.min] = m.pop(None)
open_times = pd.Series(m)
for date in self.DAYLIGHT_SAVINGS_DATES:
next_day = pd.Timestamp(date, tz=UTC)
open_date = next_day + Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(self.calendar.tz)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day),
)
open_ix = open_times.index.searchsorted(pd.Timestamp(date), side="right")
if open_ix == len(open_times):
open_ix -= 1
self.assertEqual(open_times.iloc[open_ix].hour, localized_open.hour)
self.assertEqual(open_times.iloc[open_ix].minute, localized_open.minute)
def test_start_end(self):
"""
Check ExchangeCalendar with defined start/end dates.
"""
calendar = self.calendar_class(
start=self.TEST_START_END_FIRST,
end=self.TEST_START_END_LAST,
)
self.assertEqual(
calendar.first_trading_session,
self.TEST_START_END_EXPECTED_FIRST,
)
self.assertEqual(
calendar.last_trading_session,
self.TEST_START_END_EXPECTED_LAST,
)
def test_has_breaks(self):
has_breaks = self.calendar.has_breaks()
self.assertEqual(has_breaks, self.HAVE_BREAKS)
def test_session_has_break(self):
if self.SESSION_WITHOUT_BREAK is not None:
self.assertFalse(
self.calendar.session_has_break(self.SESSION_WITHOUT_BREAK)
)
if self.SESSION_WITH_BREAK is not None:
self.assertTrue(self.calendar.session_has_break(self.SESSION_WITH_BREAK))
# TODO remove this class when all calendars migrated. No longer requried as
# `minute_index_to_session_labels` comprehensively tested under new suite.
class OpenDetectionTestCase(TestCase):
# This is an extra set of unit tests that were added during a rewrite of
# `minute_index_to_session_labels` to ensure that the existing
# calendar-generic test suite correctly covered edge cases around
# non-market minutes.
def test_detect_non_market_minutes(self):
cal = get_calendar("NYSE")
# NOTE: This test is here instead of being on the base class for all
# calendars because some of our calendars are 24/7, which means there
# aren't any non-market minutes to find.
day0 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-03", tz=UTC),
pd.Timestamp("2013-07-03", tz=UTC),
)
for minute in day0:
self.assertTrue(cal.is_open_on_minute(minute))
day1 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-05", tz=UTC),
pd.Timestamp("2013-07-05", tz=UTC),
)
for minute in day1:
self.assertTrue(cal.is_open_on_minute(minute))
def NYSE_timestamp(s):
return pd.Timestamp(s, tz="America/New_York").tz_convert(UTC)
non_market = [
# After close.
NYSE_timestamp("2013-07-03 16:01"),
# Holiday.
NYSE_timestamp("2013-07-04 10:00"),
# Before open.
NYSE_timestamp("2013-07-05 9:29"),
]
for minute in non_market:
self.assertFalse(cal.is_open_on_minute(minute), minute)
input_ = pd.to_datetime(
np.hstack([day0.values, minute.asm8, day1.values]),
utc=True,
)
with self.assertRaises(ValueError) as e:
cal.minute_index_to_session_labels(input_)
exc_str = str(e.exception)
self.assertIn("First Bad Minute: {}".format(minute), exc_str)
# TODO remove this class when all calendars migrated. No longer requried as
# this case is handled by new test base internally.
class NoDSTExchangeCalendarTestBase(ExchangeCalendarTestBase):
def test_daylight_savings(self):
"""
Several countries in Africa / Asia do not observe DST
so we need to skip over this test for those markets
"""
pass
def get_csv(name: str) -> pd.DataFrame:
"""Get csv file as DataFrame for given calendar `name`."""
filename = name.replace("/", "-").lower() + ".csv"
path = pathlib.Path(__file__).parent.joinpath("resources", filename)
df = pd.read_csv(
path,
index_col=0,
parse_dates=[0, 1, 2, 3, 4],
infer_datetime_format=True,
)
df.index = df.index.tz_localize("UTC")
for col in df:
df[col] = df[col].dt.tz_localize("UTC")
return df
class Answers:
"""Inputs and expected output for testing a given calendar and side.
Inputs and expected outputs are provided by public instance methods and
properties. These either read directly from the corresponding .csv file
or are evaluated from the .csv file contents. NB Properites / methods
MUST NOT make evaluations by way of repeating the code of the
ExchangeCalendar method they are intended to test!
Parameters
----------
calendar_name
Canonical name of calendar for which require answer info. For
example, 'XNYS'.
side {'both', 'left', 'right', 'neither'}
Side of sessions to treat as trading minutes.
"""
ONE_MIN = pd.Timedelta(1, "T")
TWO_MIN = pd.Timedelta(2, "T")
ONE_DAY = pd.Timedelta(1, "D")
LEFT_SIDES = ["left", "both"]
RIGHT_SIDES = ["right", "both"]
def __init__(
self,
calendar_name: str,
side: str,
):
self._name = calendar_name.upper()
self._side = side
# --- Exposed constructor arguments ---
@property
def name(self) -> str:
"""Name of corresponding calendar."""
return self._name
@property
def side(self) -> str:
"""Side of calendar for which answers valid."""
return self._side
# --- Properties read (indirectly) from csv file ---
@functools.lru_cache(maxsize=4)
def _answers(self) -> pd.DataFrame:
return get_csv(self.name)
@property
def answers(self) -> pd.DataFrame:
"""Answers as correspoding csv."""
return self._answers()
@property
def sessions(self) -> pd.DatetimeIndex:
"""Session labels."""
return self.answers.index
@property
def opens(self) -> pd.Series:
"""Market open time for each session."""
return self.answers.market_open
@property
def closes(self) -> pd.Series:
"""Market close time for each session."""
return self.answers.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time for each session."""
return self.answers.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time for each session."""
return self.answers.break_end
# --- get and helper methods ---
def get_next_session(self, session: pd.Timestamp) -> pd.Timestamp:
"""Get session that immediately follows `session`."""
assert (
session != self.last_session
), "Cannot get session later than last answers' session."
idx = self.sessions.get_loc(session) + 1
return self.sessions[idx]
def session_has_break(self, session: pd.Timestamp) -> bool:
"""Query if `session` has a break."""
return session in self.sessions_with_break
@staticmethod
def get_sessions_sample(sessions: pd.DatetimeIndex):
"""Return sample of given `sessions`.
Sample includes:
All sessions within first two years of `sessions`.
All sessions within last two years of `sessions`.
All sessions falling:
within first 3 days of any month.
from 28th of any month.
from 14th through 16th of any month.
"""
if sessions.empty:
return sessions
mask = (
(sessions < sessions[0] + pd.DateOffset(years=2))
| (sessions > sessions[-1] - pd.DateOffset(years=2))
| (sessions.day <= 3)
| (sessions.day >= 28)
| (14 <= sessions.day) & (sessions.day <= 16)
)
return sessions[mask]
def get_sessions_minutes(
self, start: pd.Timestamp, end: pd.Timestamp | int = 1
) -> pd.DatetimeIndex:
"""Get trading minutes for 1 or more consecutive sessions.
Parameters
----------
start
Session from which to get trading minutes.
end
Session through which to get trading mintues. Can be passed as:
pd.Timestamp: return will include trading minutes for `end`
session.
int: where int represents number of consecutive sessions
inclusive of `start`, for which require trading
minutes. Default is 1, such that by default will return
trading minutes for only `start` session.
"""
idx = self.sessions.get_loc(start)
stop = idx + end if isinstance(end, int) else self.sessions.get_loc(end) + 1
indexer = slice(idx, stop)
dtis = []
for first, last, last_am, first_pm in zip(
self.first_minutes[indexer],
self.last_minutes[indexer],
self.last_am_minutes[indexer],
self.first_pm_minutes[indexer],
):
if pd.isna(last_am):
dtis.append(pd.date_range(first, last, freq="T"))
else:
dtis.append(pd.date_range(first, last_am, freq="T"))
dtis.append(pd.date_range(first_pm, last, freq="T"))
return dtis[0].union_many(dtis[1:])
# --- Evaluated general calendar properties ---
@functools.lru_cache(maxsize=4)
def _has_a_session_with_break(self) -> pd.DatetimeIndex:
return self.break_starts.notna().any()
@property
def has_a_session_with_break(self) -> bool:
"""Does any session of answers have a break."""
return self._has_a_session_with_break()
@property
def has_a_session_without_break(self) -> bool:
"""Does any session of answers not have a break."""
return self.break_starts.isna().any()
# --- Evaluated properties for first and last sessions ---
@property
def first_session(self) -> pd.Timestamp:
"""First session covered by answers."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last session covered by answers."""
return self.sessions[-1]
@property
def sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last sessions covered by answers."""
return self.first_session, self.last_session
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of first session covered by answers."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of last session covered by answers."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
open_ = self.first_session_open
return open_ if self.side in self.LEFT_SIDES else open_ + self.ONE_MIN
@property
def last_trading_minute(self) -> pd.Timestamp:
close = self.last_session_close
return close if self.side in self.RIGHT_SIDES else close - self.ONE_MIN
@property
def trading_minutes_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last trading minutes covered by answers."""
return self.first_trading_minute, self.last_trading_minute
# --- out-of-bounds properties ---
@property
def minute_too_early(self) -> pd.Timestamp:
"""Minute earlier than first trading minute."""
return self.first_trading_minute - self.ONE_MIN
@property
def minute_too_late(self) -> pd.Timestamp:
"""Minute later than last trading minute."""
return self.last_trading_minute + self.ONE_MIN
@property
def session_too_early(self) -> pd.Timestamp:
"""Date earlier than first session."""
return self.first_session - self.ONE_DAY
@property
def session_too_late(self) -> pd.Timestamp:
"""Date later than last session."""
return self.last_session + self.ONE_DAY
# --- Evaluated properties covering every session. ---
@functools.lru_cache(maxsize=4)
def _first_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.opens.copy()
else:
minutes = self.opens + self.ONE_MIN
minutes.name = "first_minutes"
return minutes
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session (UTC)."""
return self._first_minutes()
@property
def first_minutes_plus_one(self) -> pd.Series:
"""First trading minute of each session plus one minute."""
return self.first_minutes + self.ONE_MIN
@property
def first_minutes_less_one(self) -> pd.Series:
"""First trading minute of each session less one minute."""
return self.first_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.closes.copy()
else:
minutes = self.closes - self.ONE_MIN
minutes.name = "last_minutes"
return minutes
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._last_minutes()
@property
def last_minutes_plus_one(self) -> pd.Series:
"""Last trading minute of each session plus one minute."""
return self.last_minutes + self.ONE_MIN
@property
def last_minutes_less_one(self) -> pd.Series:
"""Last trading minute of each session less one minute."""
return self.last_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_am_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.break_starts.copy()
else:
minutes = self.break_starts - self.ONE_MIN
minutes.name = "last_am_minutes"
return minutes
@property
def last_am_minutes(self) -> pd.Series:
"""Last pre-break trading minute of each session.
NaT if session does not have a break.
"""
return self._last_am_minutes()
@property
def last_am_minutes_plus_one(self) -> pd.Series:
"""Last pre-break trading minute of each session plus one minute."""
return self.last_am_minutes + self.ONE_MIN
@property
def last_am_minutes_less_one(self) -> pd.Series:
"""Last pre-break trading minute of each session less one minute."""
return self.last_am_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _first_pm_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.break_ends.copy()
else:
minutes = self.break_ends + self.ONE_MIN
minutes.name = "first_pm_minutes"
return minutes
@property
def first_pm_minutes(self) -> pd.Series:
"""First post-break trading minute of each session.
NaT if session does not have a break.
"""
return self._first_pm_minutes()
@property
def first_pm_minutes_plus_one(self) -> pd.Series:
"""First post-break trading minute of each session plus one minute."""
return self.first_pm_minutes + self.ONE_MIN
@property
def first_pm_minutes_less_one(self) -> pd.Series:
"""First post-break trading minute of each session less one minute."""
return self.first_pm_minutes - self.ONE_MIN
# --- Evaluated session sets and ranges that meet a specific condition ---
@property
def _mask_breaks(self) -> pd.Series:
return self.break_starts.notna()
@functools.lru_cache(maxsize=4)
def _sessions_with_break(self) -> pd.DatetimeIndex:
return self.sessions[self._mask_breaks]
@property
def sessions_with_break(self) -> pd.DatetimeIndex:
return self._sessions_with_break()
@functools.lru_cache(maxsize=4)
def _sessions_without_break(self) -> pd.DatetimeIndex:
return self.sessions[~self._mask_breaks]
@property
def sessions_without_break(self) -> pd.DatetimeIndex:
return self._sessions_without_break()
@property
def sessions_without_break_run(self) -> pd.DatetimeIndex:
"""Longest run of consecutive sessions without a break."""
s = self.break_starts.isna()
if s.empty:
return pd.DatetimeIndex([], tz="UTC")
trues_grouped = (~s).cumsum()[s]
group_sizes = trues_grouped.value_counts()
max_run_size = group_sizes.max()
max_run_group_id = group_sizes[group_sizes == max_run_size].index[0]
run_without_break = trues_grouped[trues_grouped == max_run_group_id].index
return run_without_break
@property
def sessions_without_break_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest session range that does not include a session with a break.
Returns None if all sessions have a break.
"""
sessions = self.sessions_without_break_run
if sessions.empty:
return None
return sessions[0], sessions[-1]
@property
def _mask_sessions_without_gap_after(self) -> pd.Series:
if self.side == "neither":
# will always have gap after if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if next open is one minute after previous close
closes_plus_min = self.closes + pd.Timedelta(1, "T")
return self.opens.shift(-1) == closes_plus_min
else:
return self.opens.shift(-1) == self.closes
@property
def _mask_sessions_without_gap_before(self) -> pd.Series:
if self.side == "neither":
# will always have gap before if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if previous close is one minute before next open
opens_minus_one = self.opens - pd.Timedelta(1, "T")
return self.closes.shift(1) == opens_minus_one
else:
return self.closes.shift(1) == self.opens
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[mask][:-1]
@property
def sessions_without_gap_after(self) -> pd.DatetimeIndex:
"""Sessions not followed by a non-trading minute.
Rather, sessions immediately followed by first trading minute of
next session.
"""
return self._sessions_without_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[~mask][:-1]
@property
def sessions_with_gap_after(self) -> pd.DatetimeIndex:
"""Sessions followed by a non-trading minute."""
return self._sessions_with_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[mask][1:]
@property
def sessions_without_gap_before(self) -> pd.DatetimeIndex:
"""Sessions not preceeded by a non-trading minute.
Rather, sessions immediately preceeded by last trading minute of
previous session.
"""
return self._sessions_without_gap_before()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[~mask][1:]
@property
def sessions_with_gap_before(self) -> pd.DatetimeIndex:
"""Sessions preceeded by a non-trading minute."""
return self._sessions_with_gap_before()
# times are changing...
@functools.lru_cache(maxsize=16)
def _get_sessions_with_times_different_to_next_session(
self,
column: str, # typing.Literal["opens", "closes", "break_starts", "break_ends"]
) -> list[pd.DatetimeIndex]:
"""For a given answers column, get session labels where time differs
from time of next session.
Where `column` is a break time ("break_starts" or "break_ends"), return
will not include sessions when next session has a different `has_break`
status. For example, if session_0 has a break and session_1 does not have
a break, or vice versa, then session_0 will not be included to return. For
sessions followed by a session with a different `has_break` status, see
`_get_sessions_with_has_break_different_to_next_session`.
Returns
-------
list of pd.Datetimeindex
[0] sessions with earlier next session
[1] sessions with later next session
"""
# column takes string to allow lru_cache (Series not hashable)
is_break_col = column[0] == "b"
column_ = getattr(self, column)
if is_break_col:
if column_.isna().all():
return [pd.DatetimeIndex([], tz="UTC")] * 4
column_ = column_.fillna(method="ffill").fillna(method="bfill")
diff = (column_.shift(-1) - column_)[:-1]
remainder = diff % pd.Timedelta(hours=24)
mask = remainder != pd.Timedelta(0)
sessions = self.sessions[:-1][mask]
next_session_earlier_mask = remainder[mask] > pd.Timedelta(hours=12)
next_session_earlier = sessions[next_session_earlier_mask]
next_session_later = sessions[~next_session_earlier_mask]
if is_break_col:
mask = next_session_earlier.isin(self.sessions_without_break)
next_session_earlier = next_session_earlier.drop(next_session_earlier[mask])
mask = next_session_later.isin(self.sessions_without_break)
next_session_later = next_session_later.drop(next_session_later[mask])
return [next_session_earlier, next_session_later]
@property
def _sessions_with_opens_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("opens")
@property
def _sessions_with_closes_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("closes")
@property
def _sessions_with_break_start_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_starts")
@property
def _sessions_with_break_end_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_ends")
@property
def sessions_next_open_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[0]
@property
def sessions_next_open_later(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[1]
@property
def sessions_next_open_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_earlier.union(self.sessions_next_open_later)
@property
def sessions_next_close_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[0]
@property
def sessions_next_close_later(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[1]
@property
def sessions_next_close_different(self) -> pd.DatetimeIndex:
return self.sessions_next_close_earlier.union(self.sessions_next_close_later)
@property
def sessions_next_break_start_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[0]
@property
def sessions_next_break_start_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[1]
@property
def sessions_next_break_start_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_start_earlier
later = self.sessions_next_break_start_later
return earlier.union(later)
@property
def sessions_next_break_end_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[0]
@property
def sessions_next_break_end_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[1]
@property
def sessions_next_break_end_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_end_earlier
later = self.sessions_next_break_end_later
return earlier.union(later)
@functools.lru_cache(maxsize=4)
def _get_sessions_with_has_break_different_to_next_session(
self,
) -> tuple[pd.DatetimeIndex, pd.DatetimeIndex]:
"""Get sessions with 'has_break' different to next session.
Returns
-------
tuple[pd.DatetimeIndex, pd.DatetimeIndex]
[0] Sessions that have a break and are immediately followed by
a session which does not have a break.
[1] Sessions that do not have a break and are immediately
followed by a session which does have a break.
"""
mask = (self.break_starts.notna() & self.break_starts.shift(-1).isna())[:-1]
sessions_with_break_next_session_without_break = self.sessions[:-1][mask]
mask = (self.break_starts.isna() & self.break_starts.shift(-1).notna())[:-1]
sessions_without_break_next_session_with_break = self.sessions[:-1][mask]
return (
sessions_with_break_next_session_without_break,
sessions_without_break_next_session_with_break,
)
@property
def sessions_with_break_next_session_without_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[0]
@property
def sessions_without_break_next_session_with_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[1]
@functools.lru_cache(maxsize=4)
def _sessions_next_time_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_different.union_many(
[
self.sessions_next_close_different,
self.sessions_next_break_start_different,
self.sessions_next_break_end_different,
self.sessions_with_break_next_session_without_break,
self.sessions_without_break_next_session_with_break,
]
)
@property
def sessions_next_time_different(self) -> pd.DatetimeIndex:
"""Sessions where next session has a different time for any column.
Includes sessions where next session has a different `has_break`
status.
"""
return self._sessions_next_time_different()
# session blocks...
def _create_changing_times_session_block(
self, session: pd.Timestamp
) -> pd.DatetimeIndex:
"""Create block of sessions with changing times.
Given a `session` known to have at least one time (open, close,
break_start or break_end) different from the next session, returns
a block of consecutive sessions ending with the first session after
`session` that has the same times as the session that immediately
preceeds it (i.e. the last two sessions of the block will have the
same times), or the last calendar session.
"""
start_idx = self.sessions.get_loc(session)
end_idx = start_idx + 1
while self.sessions[end_idx] in self.sessions_next_time_different:
end_idx += 1
end_idx += 2 # +1 to include session with same times, +1 to serve as end index
return self.sessions[start_idx:end_idx]
def _get_normal_session_block(self) -> pd.DatetimeIndex:
"""Block of 3 sessions with unchanged timings."""
start_idx = len(self.sessions) // 3
end_idx = start_idx + 21
for i in range(start_idx, end_idx):
times_1 = self.answers.iloc[i].dt.time
times_2 = self.answers.iloc[i + 1].dt.time
times_3 = self.answers.iloc[i + 2].dt.time
one_and_two_equal = (times_1 == times_2) | (times_1.isna() & times_2.isna())
one_and_three_equal = (times_1 == times_3) | (
times_1.isna() & times_3.isna()
)
if (one_and_two_equal & one_and_three_equal).all():
break
assert i < (end_idx - 1), "Unable to evaluate a normal session block!"
return self.sessions[i : i + 3]
def _get_session_block(
self, from_session_of: pd.DatetimeIndex, to_session_of: pd.DatetimeIndex
) -> pd.DatetimeIndex:
"""Get session block with bounds defined by sessions of given indexes.
Block will start with middle session of `from_session_of`.
Block will run to the nearest subsequent session of `to_session_of`
(or `self.final_session` if this comes first). Block will end with
the session that immedidately follows this session.
"""
i = len(from_session_of) // 2
start_session = from_session_of[i]
start_idx = self.sessions.get_loc(start_session)
end_idx = start_idx + 1
end_session = self.sessions[end_idx]
while end_session not in to_session_of and end_session != self.last_session:
end_idx += 1
end_session = self.sessions[end_idx]
return self.sessions[start_idx : end_idx + 2]
@functools.lru_cache(maxsize=4)
def _session_blocks(self) -> dict[str, pd.DatetimeIndex]:
blocks = {}
blocks["normal"] = self._get_normal_session_block()
blocks["first_three"] = self.sessions[:3]
blocks["last_three"] = self.sessions[-3:]
# blocks here include where:
# session 1 has at least one different time from session 0
# session 0 has a break and session 1 does not (and vice versa)
sessions_indexes = (
("next_open_earlier", self.sessions_next_open_earlier),
("next_open_later", self.sessions_next_open_later),
("next_close_earlier", self.sessions_next_close_earlier),
("next_close_later", self.sessions_next_close_later),
("next_break_start_earlier", self.sessions_next_break_start_earlier),
("next_break_start_later", self.sessions_next_break_start_later),
("next_break_end_earlier", self.sessions_next_break_end_earlier),
("next_break_end_later", self.sessions_next_break_end_later),
(
"with_break_to_without_break",
self.sessions_with_break_next_session_without_break,
),
(
"without_break_to_with_break",
self.sessions_without_break_next_session_with_break,
),
)
for name, index in sessions_indexes:
if index.empty:
blocks[name] = pd.DatetimeIndex([], tz="UTC")
else:
session = index[0]
blocks[name] = self._create_changing_times_session_block(session)
# blocks here move from session with gap to session without gap and vice versa
if (not self.sessions_with_gap_after.empty) and (
not self.sessions_without_gap_after.empty
):
without_gap_to_with_gap = self._get_session_block(
self.sessions_without_gap_after, self.sessions_with_gap_after
)
with_gap_to_without_gap = self._get_session_block(
self.sessions_with_gap_after, self.sessions_without_gap_after
)
else:
without_gap_to_with_gap = pd.DatetimeIndex([], tz="UTC")
with_gap_to_without_gap = pd.DatetimeIndex([], tz="UTC")
blocks["without_gap_to_with_gap"] = without_gap_to_with_gap
blocks["with_gap_to_without_gap"] = with_gap_to_without_gap
# blocks that adjoin or contain a non_session date
follows_non_session = pd.DatetimeIndex([], tz="UTC")
preceeds_non_session = pd.DatetimeIndex([], tz="UTC")
contains_non_session = pd.DatetimeIndex([], tz="UTC")
if len(self.non_sessions) > 1:
diff = self.non_sessions[1:] - self.non_sessions[:-1]
mask = diff != pd.Timedelta(
1, "D"
) # non_session dates followed by a session
valid_non_sessions = self.non_sessions[:-1][mask]
if len(valid_non_sessions) > 1:
slce = self.sessions.slice_indexer(
valid_non_sessions[0], valid_non_sessions[1]
)
sessions_between_non_sessions = self.sessions[slce]
block_length = min(2, len(sessions_between_non_sessions))
follows_non_session = sessions_between_non_sessions[:block_length]
preceeds_non_session = sessions_between_non_sessions[-block_length:]
# take session before and session after non-session
contains_non_session = self.sessions[slce.stop - 1 : slce.stop + 1]
blocks["follows_non_session"] = follows_non_session
blocks["preceeds_non_session"] = preceeds_non_session
blocks["contains_non_session"] = contains_non_session
return blocks
@property
def session_blocks(self) -> dict[str, pd.DatetimeIndex]:
"""Dictionary of session blocks of a particular behaviour.
A block comprises either a single session or multiple contiguous
sessions.
Keys:
"normal" - three sessions with unchanging timings.
"first_three" - answers' first three sessions.
"last_three" - answers's last three sessions.
"next_open_earlier" - session 1 open is earlier than session 0
open.
"next_open_later" - session 1 open is later than session 0
open.
"next_close_earlier" - session 1 close is earlier than session
0 close.
"next_close_later" - session 1 close is later than session 0
close.
"next_break_start_earlier" - session 1 break_start is earlier
than session 0 break_start.
"next_break_start_later" - session 1 break_start is later than
session 0 break_start.
"next_break_end_earlier" - session 1 break_end is earlier than
session 0 break_end.
"next_break_end_later" - session 1 break_end is later than
session 0 break_end.
"with_break_to_without_break" - session 0 has a break, session
1 does not have a break.
"without_break_to_with_break" - session 0 does not have a
break, session 1 does have a break.
"without_gap_to_with_gap" - session 0 is not followed by a
gap, session -2 is followed by a gap, session -1 is
preceeded by a gap.
"with_gap_to_without_gap" - session 0 is followed by a gap,
session -2 is not followed by a gap, session -1 is not
preceeded by a gap.
"follows_non_session" - one or two sessions where session 0
is preceeded by a date that is a non-session.
"follows_non_session" - one or two sessions where session -1
is followed by a date that is a non-session.
"contains_non_session" = two sessions with at least one
non-session date in between.
If no such session block exists for any key then value will take an
empty DatetimeIndex (UTC).
"""
return self._session_blocks()
def session_block_generator(self) -> abc.Iterator[tuple[str, pd.DatetimeIndex]]:
"""Generator of session blocks of a particular behaviour."""
for name, block in self.session_blocks.items():
if not block.empty:
yield (name, block)
@functools.lru_cache(maxsize=4)
def _session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
d = {}
for name, block in self.session_blocks.items():
if block.empty:
d[name] = pd.DatetimeIndex([], tz="UTC")
continue
d[name] = self.get_sessions_minutes(block[0], len(block))
return d
@property
def session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
"""Trading minutes for each `session_block`.
Key:
Session block name as documented to `session_blocks`.
Value:
Trading minutes of corresponding session block.
"""
return self._session_block_minutes()
@property
def sessions_sample(self) -> pd.DatetimeIndex:
"""Sample of normal and unusual sessions.
Sample comprises set of sessions of all `session_blocks` (see
`session_blocks` doc). In this way sample includes at least one
sample of every indentified unique circumstance.
"""
dtis = list(self.session_blocks.values())
return dtis[0].union_many(dtis[1:])
# non-sessions...
@functools.lru_cache(maxsize=4)
def _non_sessions(self) -> pd.DatetimeIndex:
all_dates = pd.date_range(
start=self.first_session, end=self.last_session, freq="D"
)
return all_dates.difference(self.sessions)
@property
def non_sessions(self) -> pd.DatetimeIndex:
"""Dates (UTC midnight) within answers range that are not sessions."""
return self._non_sessions()
@property
def sessions_range_defined_by_non_sessions(
self,
) -> tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex] | None:
"""Range containing sessions although defined with non-sessions.
Returns
-------
tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex]:
[0] tuple[pd.Timestamp, pd.Timestamp]:
[0] range start as non-session date.
[1] range end as non-session date.
[1] pd.DatetimeIndex:
Sessions in range.
"""
non_sessions = self.non_sessions
if len(non_sessions) <= 1:
return None
limit = len(self.non_sessions) - 2
i = 0
start, end = non_sessions[i], non_sessions[i + 1]
while (end - start) < pd.Timedelta(4, "D"):
i += 1
start, end = non_sessions[i], non_sessions[i + 1]
if i == limit:
# Unable to evaluate range from consecutive non-sessions
# that covers >= 3 sessions. Just go with max range...
start, end = non_sessions[0], non_sessions[-1]
slice_start, slice_end = self.sessions.searchsorted((start, end))
return (start, end), self.sessions[slice_start:slice_end]
@property
def non_sessions_run(self) -> pd.DatetimeIndex:
"""Longest run of non_sessions."""
ser = self.sessions.to_series()
diff = ser.shift(-1) - ser
max_diff = diff.max()
if max_diff == pd.Timedelta(1, "D"):
return pd.DatetimeIndex([])
session_before_run = diff[diff == max_diff].index[-1]
run = pd.date_range(
start=session_before_run + | pd.Timedelta(1, "D") | pandas.Timedelta |
import multiprocessing
import time
import os
import glob
import json
import requests
import logging
import pandas as pd
import numpy as np
import configparser
from functools import partial
from elasticsearch import Elasticsearch
from daggit.core.io.io import Pandas_Dataframe, File_Txt
from daggit.core.io.io import ReadDaggitTask_Folderpath
from daggit.core.base.factory import BaseOperator
from ..operators.contentTaggingUtils import multimodal_text_enrichment
from ..operators.contentTaggingUtils import keyword_extraction_parallel
from ..operators.contentTaggingUtils import get_level_keywords
from ..operators.contentTaggingUtils import jaccard_with_phrase
from ..operators.contentTaggingUtils import save_obj, load_obj, findFiles
from ..operators.contentTaggingUtils import merge_json
from ..operators.contentTaggingUtils import strip_word, get_words
from ..operators.contentTaggingUtils import writeTokafka
from ..operators.contentTaggingUtils import dictionary_merge, get_sorted_list
from ..operators.contentTaggingUtils import custom_listPreProc
from ..operators.contentTaggingUtils import df_feature_check, identify_contentType
from ..operators.contentTaggingUtils import precision_from_dictionary
from ..operators.contentTaggingUtils import agg_precision_from_dictionary
from ..operators.contentTaggingUtils import CustomDateFormater, findDate
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from functools import partial, reduce
from kafka import KafkaProducer, KafkaConsumer, KafkaClient
class ContentmetaCreation(BaseOperator):
@property
def inputs(self):
return {"DS_DATA_HOME": ReadDaggitTask_Folderpath(
self.node.inputs[0]),
"pathTocredentials": ReadDaggitTask_Folderpath(self.node.inputs[1]),
"pathTotriggerJson": ReadDaggitTask_Folderpath(
self.node.inputs[2])
}
@property
def outputs(self):
return {"pathTocontentMeta": File_Txt(
self.node.outputs[0])}
def run(self, copy_to, file_name):
DS_DATA_HOME = self.inputs["DS_DATA_HOME"].read_loc()
pathTocredentials = self.inputs["pathTocredentials"].read_loc()
pathTotriggerJson = self.inputs["pathTotriggerJson"].read_loc()
timestr = time.strftime("%Y%m%d-%H%M%S")
contentmeta_creation_path = os.path.join(
DS_DATA_HOME, timestr, "contentmeta_creation")
with open(pathTotriggerJson) as json_file:
data = json.load(json_file)
# reading credentials config file
config = configparser.ConfigParser(allow_no_value=True)
config.read(pathTocredentials)
api_key = config["postman credentials"]["api_key"]
data["request"]['filters']['lastUpdatedOn']['min'] = findDate(data["request"]['filters']['lastUpdatedOn']['min'], DS_DATA_HOME)
data["request"]['filters']['lastUpdatedOn']['max'] = findDate(data["request"]['filters']['lastUpdatedOn']['max'], DS_DATA_HOME)
url = "https://api.ekstep.in/composite/v3/search"
headers = {
'content-type': "application/json",
'authorization': api_key,
'cache-control': "no-cache",
'postman-token': "<PASSWORD>"
}
response = requests.request("POST", url, headers=headers, json=data).json()
content_meta = pd.DataFrame(response['result']['content'])
if not os.path.exists(contentmeta_creation_path):
os.makedirs(contentmeta_creation_path)
if "derived_contentType" not in list(content_meta.columns):
content_meta["derived_contentType"] = np.nan
for row_ind, artifact_url in enumerate(content_meta["artifactUrl"]):
try:
content_meta["derived_contentType"][row_ind] = identify_contentType(artifact_url)
except BaseException:
pass
content_meta = content_meta[pd.notnull(content_meta['derived_contentType'])]
content_meta.reset_index(inplace=True, drop=True)
content_meta.to_csv(os.path.join(contentmeta_creation_path, file_name+".csv"))
if copy_to:
try:
content_meta.to_csv(os.path.join(copy_to, file_name)+".csv")
except IOError:
print("Error: Invalid copy_to location")
self.outputs["pathTocontentMeta"].write(os.path.join(contentmeta_creation_path, file_name + ".csv"))
class ContentToText(BaseOperator):
@property
def inputs(self):
return {
"pathTocredentials": ReadDaggitTask_Folderpath(self.node.inputs[0]),
"pathTocontentMeta": File_Txt(self.node.inputs[1])
}
@property
def outputs(self):
return {"timestamp_folder": File_Txt(
self.node.outputs[0])}
def run(
self,
range_start,
range_end,
num_of_processes,
content_type):
pathTocredentials = self.inputs["pathTocredentials"].read_loc()
path_to_content_meta = self.inputs["pathTocontentMeta"].read()
content_meta = pd.read_csv(path_to_content_meta)
print(self.outputs["timestamp_folder"].location_specify())
oldwd = os.getcwd()
contentMeta_mandatory_fields = [
'artifactUrl',
'derived_contentType',
'downloadUrl',
'gradeLevel',
'identifier',
'keywords',
'language',
'subject']
assert df_feature_check(content_meta, contentMeta_mandatory_fields)
path_to_timestamp_folder = os.path.split(os.path.split(path_to_content_meta)[0])[0]
timestr = os.path.split(path_to_timestamp_folder)[1]
content_to_text_path = os.path.join(
path_to_timestamp_folder, "content_to_text")
# content dump:
if not os.path.exists(content_to_text_path):
os.makedirs(content_to_text_path)
print("content_to_text: ", content_to_text_path)
logging.info("CTT_CONTENT_TO_TEXT_START")
# read content meta:
if content_meta.columns[0] == "0":
content_meta = content_meta.drop("0", axis=1)
# check for duplicates in the meta
if list(content_meta[content_meta.duplicated(
['artifactUrl'], keep=False)]["artifactUrl"]) != []:
content_meta.drop_duplicates(subset="artifactUrl", inplace=True)
content_meta.reset_index(drop=True, inplace=True)
# dropna from artifactUrl feature and reset the index:
content_meta.dropna(subset=["artifactUrl"], inplace=True)
content_meta.reset_index(drop=True, inplace=True)
# time the run
start = time.time()
logging.info(
'Contents detected in the content meta: ' + str(len(content_meta)))
logging.info(
"----Running Content_to_Text for contents from {0} to {1}:".format(
range_start, range_end))
logging.info("time started: {0}".format(start))
if range_start == "START":
range_start = 0
if range_end == "END":
range_end = len(content_meta)-1
logging.info(
"CTT_Config: content_meta from {0} to {1} created in: {2}".format(
range_start, range_end, content_to_text_path))
print("Number of processes: ", num_of_processes)
# Reading from a config file
status = False
if os.path.exists(pathTocredentials):
try:
config = configparser.ConfigParser(allow_no_value=True)
config.read(pathTocredentials)
status = True
try:
path_to_googlecred = config['google application credentials']["GOOGLE_APPLICATION_CREDENTIALS"]
with open(path_to_googlecred, "r") as cred_json:
GOOGLE_APPLICATION_CREDENTIALS = cred_json.read()
except BaseException:
logging.info("Invalid GOOGLE_APPLICATION_CREDENTIALS in config.")
logging.info("***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable")
status = False
except BaseException:
logging.info("Invalid config file")
logging.info("***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable")
if not status:
try:
GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
with open(GOOGLE_APPLICATION_CREDENTIALS, "r") as f:
GOOGLE_APPLICATION_CREDENTIALS = f.read()
except BaseException:
GOOGLE_APPLICATION_CREDENTIALS = ""
logging.info("Not a valid google credential")
result = [
multimodal_text_enrichment(
i,
timestr,
content_meta,
content_type,
content_to_text_path,
GOOGLE_APPLICATION_CREDENTIALS) for i in range(
range_start,
range_end)]
print(result)
os.chdir(oldwd)
print("Current directory c2t: ", os.getcwd())
print("timestamp_folder path:", path_to_timestamp_folder)
self.outputs["timestamp_folder"].write(path_to_timestamp_folder)
class ContentToTextRead(BaseOperator):
@property
def inputs(self):
return {
"DS_DATA_HOME": ReadDaggitTask_Folderpath(self.node.inputs[0]),
"pathTocredentials": ReadDaggitTask_Folderpath(self.node.inputs[1]),
"localpathTocontentMeta": Pandas_Dataframe(self.node.inputs[2])
}
@property
def outputs(self):
return {"timestamp_folder": File_Txt(
self.node.outputs[0])}
def run(
self,
range_start,
range_end,
num_of_processes,
subset_contentMeta_by,
content_type):
DS_DATA_HOME = self.inputs["DS_DATA_HOME"].read_loc()
pathTocredentials = self.inputs["pathTocredentials"].read_loc()
content_meta = self.inputs["localpathTocontentMeta"].read()
if "derived_contentType" not in list(content_meta.columns):
content_meta["derived_contentType"] = np.nan
for row_ind, artifact_url in enumerate(content_meta["artifactUrl"]):
try:
content_meta["derived_contentType"][row_ind] = identify_contentType(artifact_url)
except BaseException:
pass
content_meta = content_meta[pd.notnull(content_meta['derived_contentType'])]
content_meta.reset_index(inplace=True, drop=True)
print(self.outputs["timestamp_folder"].location_specify())
oldwd = os.getcwd()
contentMeta_mandatory_fields = [
'artifactUrl',
'derived_contentType',
'downloadUrl',
'gradeLevel',
'identifier',
'keywords',
'language',
'subject']
assert df_feature_check(content_meta, contentMeta_mandatory_fields)
timestr = time.strftime("%Y%m%d-%H%M%S")
path_to_timestamp_folder = os.path.join(DS_DATA_HOME, timestr)
content_to_text_path = os.path.join(
path_to_timestamp_folder, "content_to_text")
# content dump:
if not os.path.exists(content_to_text_path):
os.makedirs(content_to_text_path)
print("content_to_text: ", content_to_text_path)
logging.info("CTT_CONTENT_TO_TEXT_START")
# read content meta:
if content_meta.columns[0] == "0":
content_meta = content_meta.drop("0", axis=1)
# check for duplicates in the meta
if list(content_meta[content_meta.duplicated(
['artifactUrl'], keep=False)]["artifactUrl"]) != []:
content_meta.drop_duplicates(subset="artifactUrl", inplace=True)
content_meta.reset_index(drop=True, inplace=True)
# dropna from artifactUrl feature and reset the index:
content_meta.dropna(subset=["artifactUrl"], inplace=True)
content_meta.reset_index(drop=True, inplace=True)
# time the run
start = time.time()
logging.info(
'Contents detected in the content meta: ' + str(len(content_meta)))
logging.info(
"----Running Content_to_Text for contents from {0} to {1}:".format(
range_start, range_end))
logging.info("time started: {0}".format(start))
# subset contentMeta:
content_meta = content_meta[content_meta["derived_contentType"].isin(
subset_contentMeta_by.split(", "))]
content_meta.reset_index(drop=True, inplace=True)
if range_start == "START":
range_start = 0
if range_end == "END":
range_end = len(content_meta)-1
logging.info(
"CTT_Config: content_meta from {0} to {1} created in: {2}".format(
range_start, range_end, content_to_text_path))
print("Number of processes: ", num_of_processes)
status = False
if os.path.exists(pathTocredentials):
try:
config = configparser.ConfigParser(allow_no_value=True)
config.read(pathTocredentials)
status = True
try:
path_to_googlecred = config['google application credentials']["GOOGLE_APPLICATION_CREDENTIALS"]
with open(path_to_googlecred, "r") as cred_json:
GOOGLE_APPLICATION_CREDENTIALS = cred_json.read()
except BaseException:
logging.info("Invalid GOOGLE_APPLICATION_CREDENTIALS in config.")
logging.info("***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable")
status = False
except BaseException:
logging.info("Invalid config file")
logging.info("***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable")
if not status:
try:
GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
with open(GOOGLE_APPLICATION_CREDENTIALS, "r") as f:
GOOGLE_APPLICATION_CREDENTIALS = f.read()
except BaseException:
GOOGLE_APPLICATION_CREDENTIALS = ""
logging.info("Not a valid google credential")
result = [
multimodal_text_enrichment(
i,
timestr,
content_meta,
content_type,
content_to_text_path,
GOOGLE_APPLICATION_CREDENTIALS) for i in range(
range_start,
range_end,)]
print(result)
os.chdir(oldwd)
print("Current directory c2t: ", os.getcwd())
print("timestamp_folder path:", path_to_timestamp_folder)
self.outputs["timestamp_folder"].write(path_to_timestamp_folder)
class KeywordExtraction(BaseOperator):
@property
def inputs(self):
return {"pathTotaxonomy": Pandas_Dataframe(self.node.inputs[0]),
"categoryLookup": ReadDaggitTask_Folderpath(self.node.inputs[1]),
"timestamp_folder": File_Txt(self.node.inputs[2]),
"pathTocredentials": ReadDaggitTask_Folderpath(self.node.inputs[3])
}
@property
def outputs(self):
return {"path_to_contentKeywords": File_Txt(self.node.outputs[0])
}
def run(self, extract_keywords, filter_criteria, update_corpus, filter_score_val, num_keywords):
assert extract_keywords == "tagme" or extract_keywords == "text_token"
assert filter_criteria == "none" or filter_criteria == "taxonomy" or filter_criteria == "dbpedia"
pathTocredentials = self.inputs["pathTocredentials"].read_loc()
config = configparser.ConfigParser(allow_no_value=True)
config.read(pathTocredentials)
cache_cred=dict()
cache_cred['host']=config["redis"]["host"]
cache_cred['port']=config["redis"]["port"]
cache_cred['password']=config["redis"]["password"]
taxonomy = self.inputs["pathTotaxonomy"].read()
path_to_category_lookup = self.inputs["categoryLookup"].read_loc()
timestamp_folder = self.inputs["timestamp_folder"].read()
timestr = os.path.split(timestamp_folder)[1]
print("****timestamp folder:", timestamp_folder)
print("****categorylookup yaml:", path_to_category_lookup)
content_to_text_path = os.path.join(timestamp_folder, "content_to_text")
print("content_to_text path:", content_to_text_path)
if not os.path.exists(content_to_text_path):
logging.info("No such directory as: ", content_to_text_path)
else:
logging.info('------Transcripts to keywords extraction-----')
pool = multiprocessing.Pool(processes=4)
keywordExtraction_partial = partial(
keyword_extraction_parallel,
timestr=timestr,
content_to_text_path=content_to_text_path,
taxonomy=taxonomy,
extract_keywords=extract_keywords,
filter_criteria=filter_criteria,
cache_cred=cache_cred,
path_to_category_lookup=path_to_category_lookup,
update_corpus=update_corpus,
filter_score_val=filter_score_val,
num_keywords=num_keywords)
results = pool.map(
keywordExtraction_partial, [
dir for dir in os.listdir(content_to_text_path)])
print(results)
print("path to content keywords:", max(glob.glob(
os.path.join(timestamp_folder, 'content_to_text'))))
c2t_path = os.path.join(timestamp_folder, 'content_to_text')
self.outputs["path_to_contentKeywords"].write(max(glob.glob(
c2t_path), key=os.path.getmtime))
pool.close()
pool.join()
class WriteToElasticSearch(BaseOperator):
@property
def inputs(self):
return {"timestamp_folder": File_Txt(self.node.inputs[0])
}
def run(self):
timestamp_folder = self.inputs["timestamp_folder"].read()
timestr = os.path.split(timestamp_folder)[1]
epoch_time = time.mktime(time.strptime(timestr, "%Y%m%d-%H%M%S"))
es_request = requests.get('http://localhost:9200')
content_to_textpath = os.path.join(timestamp_folder, "content_to_text")
cid_name = [i for i in os.listdir(content_to_textpath) if i not in ['.DS_Store']]
for cid in cid_name:
merge_json_list = []
json_file = findFiles(os.path.join(content_to_textpath, cid), ["json"])
logging.info("json_files are: ", json_file)
for file in json_file:
if os.path.split(file)[1] in [
"ML_keyword_info.json", "ML_content_info.json"]:
merge_json_list.append(file)
autotagging_json = merge_json(merge_json_list)
autotagging_json.update({"ETS": epoch_time})
elastic_search = Elasticsearch(
[{'host': 'es', 'port': 9200}]) #change it to localhost
if es_request.status_code == 200:
elastic_search.index(
index="auto_tagging",
doc_type='content_id_info',
id=cid,
body=autotagging_json)
class WriteToKafkaTopic(BaseOperator):
@property
def inputs(self):
return {"path_to_contentKeywords": File_Txt(self.node.inputs[0])
}
def run(self, kafka_broker, kafkaTopic_writeTo):
path_to_contentKeywords = self.inputs["path_to_contentKeywords"].read()
timestamp_folder = os.path.split(path_to_contentKeywords)[0]
timestr = os.path.split(timestamp_folder)[1]
epoch_time = time.mktime(time.strptime(timestr, "%Y%m%d-%H%M%S"))
content_to_textpath = os.path.join(timestamp_folder, "content_to_text")
cid_name = [i for i in os.listdir(content_to_textpath) if i not in ['.DS_Store']]
for cid in cid_name:
merge_json_list = []
json_file = findFiles(os.path.join(content_to_textpath, cid), ["json"])
for file in json_file:
if os.path.split(file)[1] in [
"ML_keyword_info.json", "ML_content_info.json"]:
merge_json_list.append(file)
ignore_list = ["ets"]
dict_list = []
for file in merge_json_list:
with open(file, "r", encoding="UTF-8") as info:
new_json = json.load(info)
[new_json.pop(ignore) for ignore in ignore_list if ignore in new_json.keys()]
dict_list.append(new_json)
# merge the nested jsons:-
autotagging_json = reduce(merge_json, dict_list)
autotagging_json.update({"ets": epoch_time})
with open(os.path.join(timestamp_folder, "content_to_text", cid, "autoTagging_json.json"), "w+") as main_json:
json.dump(autotagging_json, main_json, sort_keys=True, indent=4)
# check if connection established.
server_topics = writeTokafka(kafka_broker)
if server_topics:
for i in server_topics:
if i == kafkaTopic_writeTo:
producer = KafkaProducer(bootstrap_servers=kafka_broker, value_serializer=lambda v: json.dumps(v, indent=4).encode('utf-8'))
# sserializing json message:-
event_send = producer.send(kafkaTopic_writeTo, autotagging_json)
result = event_send.get(timeout=60)
class CorpusCreation(BaseOperator):
@property
def inputs(self):
return {"pathTotaxonomy": Pandas_Dataframe(self.node.inputs[0]),
"path_to_contentKeywords": File_Txt(self.node.inputs[1])
}
@property # how to write to a folder?
def outputs(self):
return {"root_path": File_Txt(self.node.outputs[0]),
"path_to_corpus": File_Txt(self.node.outputs[1])
}
def run(self, keyword_folder_name, update_corpus, word_preprocess):
keyword_folder_ls = ["tagme_none", "txt_token_none", "tagme_taxonomy", "tagme_dbpedia"]
if keyword_folder_name in keyword_folder_ls:
taxonomy = self.inputs["pathTotaxonomy"].read()
path_to_contentKeys = self.inputs["path_to_contentKeywords"].read()
keyword_folder = os.path.split(path_to_contentKeys)[0]
corpus_creation_folder = os.path.join(keyword_folder, "corpus_creation")
if not os.path.exists(corpus_creation_folder):
os.makedirs(corpus_creation_folder)
root_path = os.path.split(os.path.split(path_to_contentKeys)[0])[0]
corpus_loc = os.path.join(root_path, "corpus")
if not os.path.exists(corpus_loc):
os.makedirs(corpus_loc)
corpus_csv_loc = os.path.join(corpus_loc, "corpus.csv")
vocabulary_loc = os.path.join(corpus_creation_folder, "vocab")
cids = os.listdir(path_to_contentKeys)
content_keywords_list = []
for content in cids:
path_to_keywords = os.path.join(
path_to_contentKeys,
content,
"keywords",
keyword_folder_name,
"keywords.csv")
if not os.path.exists(path_to_keywords):
extracted_keys = []
else:
extracted_keyword_df = pd.read_csv(
path_to_keywords, keep_default_na=False)
extracted_keys = list(extracted_keyword_df['keyword'])
content_keywords_list.append(extracted_keys)
# print("content_keywords_list: ", content_keywords_list)
content_keywords_list = custom_listPreProc(
content_keywords_list,
word_preprocess["method"],
word_preprocess["delimitter"])
taxonomy['Keywords'] = [
get_words(i) for i in list(
taxonomy['Keywords'])]
taxonomy_keywords = [
x for x in list(
taxonomy['Keywords']) if str(x) != 'nan']
taxonomy_keywords = custom_listPreProc(
taxonomy_keywords,
word_preprocess["method"],
word_preprocess["delimitter"])
if os.path.exists(corpus_csv_loc):
corpus = list(pd.read_csv(corpus_csv_loc)['Words'])
else:
corpus = []
all_words = list(set(
[i for item1 in taxonomy_keywords for i in item1] +
[j for item2 in content_keywords_list for j in item2] +
corpus))
print("number of unique words: " + str(len(set(all_words))))
vocabulary = dict()
for i in range(len(all_words)):
vocabulary[all_words[i]] = i
save_obj(vocabulary, vocabulary_loc)
if update_corpus:
pd.DataFrame({'Words': all_words}).to_csv(corpus_csv_loc)
self.outputs["root_path"].write(
os.path.split(path_to_contentKeys)[0])
self.outputs["path_to_corpus"].write(corpus_creation_folder)
else:
logging.info(" {0} is unknown name".format("keyword_folder_name"))
class ContentTaxonomyScoring(BaseOperator):
@property
def inputs(self):
return {"pathTocontentMeta": File_Txt(self.node.inputs[0]),
"pathTotaxonomy": Pandas_Dataframe(self.node.inputs[1]),
"root_path": File_Txt(self.node.inputs[2]),
"path_to_corpus": File_Txt(self.node.inputs[3])
}
@property # how to write to a folder?
def outputs(self):
return {"path_to_timestampFolder": File_Txt(self.node.outputs[0]),
"path_to_distMeasure": File_Txt(self.node.outputs[1]),
"path_to_domain_level": File_Txt(self.node.outputs[2])
}
def run(
self,
keyword_extract_filter_by,
phrase_split,
min_words,
distanceMeasure,
embedding_method,
delimitter,
filter_by):
contentmeta_filterby_column = filter_by["contentMeta"]["column"]
contentmeta_level = filter_by["contentMeta"]["alignment_depth"]
taxonomy_filterby_column = filter_by["taxonomy"]["column"]
taxonomy_level = filter_by["taxonomy"]["alignment_depth"]
content_meta_loc = self.inputs["pathTocontentMeta"].read()
taxonomy = self.inputs["pathTotaxonomy"].read()
root_path = self.inputs["root_path"].read()
corpus_folder = self.inputs["path_to_corpus"].read()
content_meta = pd.read_csv(content_meta_loc)
# check for the presence of corpus folder:
if not os.path.exists(corpus_folder):
logging.info("No corpus folder created")
else:
vocab_loc = corpus_folder + "/vocab"
vocabulary = load_obj(vocab_loc)
mapping_folder = os.path.join(root_path, "content_taxonomy_scoring")
if not os.path.exists(mapping_folder):
os.makedirs(mapping_folder)
print("***mapping folder:", mapping_folder)
if len(os.listdir(mapping_folder)) == 0:
output = os.path.join(mapping_folder, "Run_0")
os.makedirs(output)
else:
path_to_subfolders = [
os.path.join(
mapping_folder,
f) for f in os.listdir(mapping_folder) if os.path.exists(
os.path.join(
mapping_folder,
f))]
create_output = [
os.path.join(
mapping_folder,
"Run_{0}".format(
i + 1)) for i,
_ in enumerate(path_to_subfolders)]
os.makedirs(create_output[-1])
output = create_output[-1]
print("***output:", output)
DELIMITTER = delimitter
# cleaning taxonomy KEYWORDS
taxonomy['Keywords'] = [get_words(item) for item in list(
taxonomy['Keywords'])] # get words from string of words
taxonomy_keywords = [
x for x in list(
taxonomy['Keywords']) if str(x) != 'nan']
taxonomy_keywords = custom_listPreProc(
taxonomy_keywords, 'stem_lem', DELIMITTER)
# print("****Taxonomy_df keywords****: ", taxonomy["Keywords"])
logging.info('Number of Content detected: ' + str(len(content_meta)))
print("Number of content detected:", str(len(content_meta)))
content_keywords_list = []
logging.info("******Content keyword creation for content meta*******")
path_to_corpus = root_path + "/content_to_text"
print("***path_to_corpus: ", path_to_corpus)
if not os.path.exists(path_to_corpus):
print("No such directory as path_to_corpus:", path_to_corpus)
else:
print(
"list of folders in path_to_corpus: ",
os.listdir(path_to_corpus))
for content in content_meta['identifier']:
if not os.path.exists(
os.path.join(
path_to_corpus,
content,
"keywords",
keyword_extract_filter_by,
"keywords.csv")):
extracted_keys = []
else:
extracted_keyword_df = pd.read_csv(
os.path.join(
path_to_corpus,
content,
"keywords",
keyword_extract_filter_by,
"keywords.csv"),
keep_default_na=False)
print(
"keywords {0} for id {1}:".format(
list(
extracted_keyword_df['keyword']),
content))
extracted_keys = list(extracted_keyword_df['keyword'])
content_keywords_list.append(extracted_keys)
content_keywords_list = custom_listPreProc(
content_keywords_list,
'stem_lem',
DELIMITTER)
content_meta['Content_keywords'] = content_keywords_list
content_meta = content_meta.iloc[[i for i, e in enumerate(
content_meta['Content_keywords']) if (e != []) and len(e) > min_words]]
content_meta = content_meta.reset_index(drop=True)
print(
"contentmeta domains:", set(
content_meta[contentmeta_filterby_column]))
print("taxonomy domains:", set(taxonomy[taxonomy_filterby_column]))
domains = list(set(
content_meta[contentmeta_filterby_column]) & set(
taxonomy[taxonomy_filterby_column]))
print()
print("Domains: ", domains)
# empty domain
if not domains:
logging.info("No Subjects common")
logging.info(
"Aggregated on level: {0}".format(taxonomy_level))
logging.info("------------------------------------------")
content_meta_sub = content_meta[contentmeta_filterby_column]
logging.info("***Skipping Content id: {0}".format(list(
content_meta[~content_meta_sub.isin(domains)]['identifier'])))
dist_all = dict()
domain_level_all = dict()
for i in domains:
subject = [i]
logging.info("Running for subject: {0}".format(subject))
domain_content_df = content_meta.loc[content_meta_sub.isin(
subject)] # filter arg: contentmeta column: subject
domain_content_df.index = domain_content_df['identifier']
tax_sub = taxonomy[taxonomy_filterby_column]
domain_taxonomy_df = taxonomy.loc[tax_sub.isin(
subject)] # filter arg: taxonomy column: Subject
level_domain_taxonomy_df = get_level_keywords(
domain_taxonomy_df, taxonomy_level)
if (distanceMeasure == 'jaccard' or distanceMeasure ==
'match_percentage') and embedding_method == "none":
level_domain_taxonomy_df.index = level_domain_taxonomy_df[taxonomy_level]
logging.info("Number of Content in domain: {0} ".format(
str(len(domain_content_df))))
logging.info("Number of Topic in domain: {0}".format(
str(len(level_domain_taxonomy_df))))
dist_df = pd.DataFrame(
np.zeros(
(len(domain_content_df),
len(level_domain_taxonomy_df))),
index=domain_content_df.index,
columns=level_domain_taxonomy_df.index)
if len(level_domain_taxonomy_df) > 1:
if phrase_split is True:
# rewrite the nested for loop:-(optimize the code)
for row_ind in range(dist_df.shape[0]):
for col_ind in range(dist_df.shape[1]):
content_keywords = [strip_word(i, DELIMITTER) for i in domain_content_df['Content_keywords'][row_ind]]
taxonomy_keywords = [strip_word(i, DELIMITTER) for i in level_domain_taxonomy_df['Keywords'][col_ind]]
jaccard_index = jaccard_with_phrase(
content_keywords, taxonomy_keywords)
dist_df.iloc[row_ind,
col_ind] = jaccard_index[distanceMeasure]
mapped_df = dist_df.T.apply(
func=lambda x:
get_sorted_list(x, 0), axis=0).T
mapped_df.columns = range(
1, mapped_df.shape[1] + 1)
domain_level_all['& '.join(subject)] = mapped_df
dist_all['& '.join(subject)] = dist_df
if (distanceMeasure == 'cosine'):
if len(level_domain_taxonomy_df) > 1:
taxonomy_documents = [
" ".join(doc) for doc in list(
level_domain_taxonomy_df['Keywords'])]
content_documents = [
" ".join(doc) for doc in list(
domain_content_df['Content_keywords'])]
if embedding_method == 'tfidf':
vectorizer = TfidfVectorizer(vocabulary=vocabulary)
elif embedding_method == 'onehot':
vectorizer = CountVectorizer(vocabulary=vocabulary)
else:
print("unknown embedding_method")
print("selecting default sklearn.CountVectorizer")
vectorizer = CountVectorizer(vocabulary=vocabulary)
vectorizer.fit(list(vocabulary.keys()))
taxonomy_freq_df = vectorizer.transform(
taxonomy_documents)
taxonomy_freq_df = pd.DataFrame(
taxonomy_freq_df.todense(),
index=list(
level_domain_taxonomy_df[taxonomy_level]),
columns=vectorizer.get_feature_names())
content_freq_df = vectorizer.transform(
content_documents)
content_freq_df = pd.DataFrame(content_freq_df.todense(),
index=list(
domain_content_df.index),
columns=vectorizer.get_feature_names())
dist_df = pd.DataFrame(
cosine_similarity(
content_freq_df, taxonomy_freq_df), index=list(
domain_content_df.index), columns=list(
level_domain_taxonomy_df[taxonomy_level]))
mapped_df = dist_df.T.apply(
func=lambda x: get_sorted_list(x, 0), axis=0).T
mapped_df.columns = range(1, mapped_df.shape[1] + 1)
domain_level_all['& '.join(subject)] = mapped_df
dist_all['& '.join(subject)] = dist_df
if not os.path.exists(output):
os.makedirs(output)
save_obj(dist_all, os.path.join(output, "dist_all"))
save_obj(
domain_level_all,
os.path.join(
output,
"domain_level_all"))
cts_output_dict = {
'content_taxonomy_scoring': [
{
'distanceMeasure': distanceMeasure,
'Common domains for Taxonomy and ContentMeta': domains,
'keyword_extract_filter_by': keyword_extract_filter_by,
'embedding_method': embedding_method,
'filter_taxonomy': taxonomy_filterby_column,
'filter_meta': contentmeta_filterby_column,
'taxonomy_alignment_depth': taxonomy_level,
'content_meta_level': contentmeta_level,
'path_to_distanceMeasure': os.path.join(
output,
"dist_all.pkl"),
'path_to_domain_level': os.path.join(
output,
"domain_level_all.pkl")}]}
with open(os.path.join(output, "ScoringInfo.json"), "w") as info:
cts_json_dump = json.dump(
cts_output_dict,
info,
sort_keys=True,
indent=4)
print(cts_json_dump)
self.outputs["path_to_timestampFolder"].write(root_path)
self.outputs["path_to_distMeasure"].write(
os.path.join(output, "dist_all.pkl"))
self.outputs["path_to_domain_level"].write(
os.path.join(output, "domain_level_all.pkl"))
class PredictTag(BaseOperator):
@property
def inputs(self):
return {"path_to_timestampFolder": File_Txt(self.node.inputs[0])
}
@property # how to write to a folder?
def outputs(self):
return {"path_to_predictedTags": File_Txt(self.node.outputs[0])
}
def run(self, window):
timestamp_folder = self.inputs["path_to_timestampFolder"].read()
logging.info("PT_START")
output = os.path.join(timestamp_folder, "content_taxonomy_scoring")
print("output:", output)
prediction_folder = os.path.join(timestamp_folder, "prediction")
logging.info("PT_PRED_FOLDER_CREATED: {0}".format(prediction_folder))
logging.info("PT_WINDOW: {0}". format(window))
dist_dict_list = [
load_obj(
os.path.join(
output,
path_to_runFolder,
"domain_level_all")) for path_to_runFolder in os.listdir(output) if os.path.exists(
os.path.join(
output,
path_to_runFolder,
"domain_level_all.pkl"))]
dist_dict = dictionary_merge(dist_dict_list)
print("dist_dict:", dist_dict)
if bool(dist_dict) is False:
logging.info("Dictionary list is empty. No tags to predict")
else:
if not os.path.exists(prediction_folder):
os.makedirs(prediction_folder)
pred_df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
| tm.assert_isinstance(result, DatetimeIndex) | pandas.util.testing.assert_isinstance |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert | inference.is_list_like(obj) | pandas.core.dtypes.inference.is_list_like |
import numpy as np
import pandas as pd
from collections import defaultdict
import re
import csv
from bs4 import BeautifulSoup
import sys
import os
import multiprocessing as mp
os.environ['KERAS_BACKEND']='theano'
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import merge
from keras.layers import Conv1D, MaxPooling1D, Embedding, Dropout, LSTM, GRU, Bidirectional
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializers
import preprocessor as p
from nltk import tokenize
##Configuration used for data cleaning and word embeddings vector creation
p.set_options(p.OPT.URL, p.OPT.EMOJI,p.OPT.NUMBER,p.OPT.SMILEY)
MAX_SEQUENCE_LENGTH = 10000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.25
# np.random.seed(12)
server="/local/data/"
###Load Socio linguistic features data which consist LIWC,Empath and other linguistic features.
data1= | pd.read_csv(server+"features/Empath_features1.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 12:26:22 2018
@author: Jesus
"""
import pandas as pd
import geopandas as gpd
import datetime
import matplotlib
import matplotlib.pyplot as plt
import os
import imageio
from natsort import natsorted
import urllib.request as urllib2
from bs4 import BeautifulSoup
def time_frame(df, time_unit):
"""
Args:
df (pandas dataframe)
time_unit (datetime object)
Returns:
A list of ordered unique dates
"""
df[time_unit] = pd.to_datetime(df[time_unit])
dates = [date for date in list(pd.unique(df[time_unit]))]
return dates
def upsample(dates):
"""
Args:
dates (List) - List of ordered unique dates
Returns:
A list of DataFrames
"""
filename = 'historical_shape.shp'
his_date = gpd.read_file(filename)
today = datetime.date.today()
his_date = his_date[his_date['ccode'] >=1]
his_date['date_s'] = pd.to_datetime(his_date['date_s'])
his_date['date_e'] = pd.to_datetime(his_date['date_e'])
his_date.loc[his_date.date_e == '2016-06-30 00:00:00', 'date_e'] = today
pd.options.mode.chained_assignment = None
df_list = []
#num = 0
for i, date in enumerate(dates):
up = his_date[(his_date['date_s']<= date) & (his_date['date_e']>= date)]
#his_date = his_date
up['date'] = date
up['num'] = i
df_list.append(up)
return df_list
def merge_all(df, df_list):
"""
Args:
df (pandas dataframe)
df_list (list) - list of dataframes
Returns:
An upsampled dataframe
"""
df_list = gpd.GeoDataFrame(pd.concat(df_list, ignore_index=False))
df = df_list.merge(df, on=['ccode', 'date'], how='right')
return df
def make_poly(df, time_unit):
"""
Args:
df (pandas dataframe)
time_unit (datetime object)
Returns:
GeoDataFrame merged
"""
dates = time_frame(df, time_unit)
df_list = upsample(dates)
df = merge_all(df, df_list)
return df
def collapse(df, unit):
"""
Args:
df (pandas dataframe)
unit (unit to collapse by) - can be 'month' or 'year'
Returns:
GeoDataFrame merged
"""
if unit == "month":
df['day'] = 1
df['date']= pd.to_datetime(df['year']*10000+df['month']*100+df['day'],format='%Y%m%d')
df['government'] = df['government'].astype('category')
df['regime'] = df['government'].cat.codes
df = df.sort_values(by=['ccode', 'date', 'pt_attempt'])
subs = df.drop_duplicates(subset=['ccode', 'date'], keep='last')
df = subs
return df
elif unit == "year":
df = df[df.month == 1]
#df = df.groupby(['ccode', 'year'], as_index=False).sum()
#df = df.set_index(['ccode', 'year']).groupby(level=0, as_index=False).cumsum()
#df = df.reset_index()
df['day'] = 1
#df['month'] = 1
df['date'] = | pd.to_datetime(df[['year', 'month', 'day']]) | pandas.to_datetime |
import numpy as np
import scipy.stats as sp
import os
import pandas as pd
import h5py
import bokeh.io as bkio
import bokeh.layouts as blay
import bokeh.models as bmod
import bokeh.plotting as bplt
from bokeh.palettes import Category20 as palette
from bokeh.palettes import Category20b as paletteb
import plot_results as plt_res
import frequency_analysis as fan
colrs = palette[20] + paletteb[20] + palette[20] + paletteb[20]
def save_data_to_hdf5(data_folder_path, hdf5_file_path):
d_paths = [f_file for f_file in os.listdir(data_folder_path) if f_file.endswith('axgt')]
with pd.HDFStore(hdf5_file_path) as h5file:
for d_path in d_paths:
f_path = os.path.join(data_folder_path, d_path)
d_arr = np.loadtxt(f_path, dtype={'names': ('time', 'Potential', 'Im', 'ACh'),
'formats': ('float', 'float', 'float', 'float')},
skiprows=1)
d_df = pd.DataFrame(d_arr)
d_name = d_path.split('.')[0].replace(' ', '_').replace('(', '').replace(')', '')
print(d_name)
h5file.put('{}/data'.format(d_name), d_df, format='table', data_columns=True)
print(h5file)
def plot_spike_data(in_h5_file, exclude_list=[]):
sp_fig = bplt.figure(title='Membrane Potential vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], 1000.0*h5_data[f_name]['Potential'],
line_width=3, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_selected_ach_data(in_h5_file, select_list):
sp_fig = bplt.figure(title='Acetylcholine vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name in select_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], h5_data[f_name]['ACh'],
line_width=1, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_selected_spike_data(in_h5_file, select_list):
sp_fig = bplt.figure(title='Membrane Potential vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name in select_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], 1000.0*h5_data[f_name]['Potential'],
line_width=1, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_spike_raster(in_h5_file, exclude_list=[]):
rast_fig = bplt.figure(title='Spike Raster vs Time')
rast_fig.xaxis.axis_label = 'time (sec)'
print('Plotting Spike Raster values from {}'.format(in_h5_file))
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 1
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_times' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
y_vals = f_i*np.ones(h5_data[f_name].shape)
if leg_name not in exclude_list:
my_circles.append(rast_fig.circle(h5_data[f_name], y_vals,
line_width=3, color=colrs[f_i-1])
)
legend_items.append((leg_name, [my_circles[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
rast_fig.add_layout(my_legend, 'right')
return rast_fig
def plot_instantaneous_spike_rate(in_h5_file, exclude_list=[], t_start=0):
isr_fig = bplt.figure(title='Instantaneous Spike Rate vs Time')
isr_fig.xaxis.axis_label = 'time (sec)'
isr_fig.yaxis.axis_label = 'spike rate (Hz)'
print('Plotting instantaneous spike rate from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
my_lines.append(isr_fig.line(h5_data[f_name]['time'], h5_data[f_name]['ISR'],
line_width=3, color=colrs[f_i])
)
my_circles.append(isr_fig.circle(h5_data[f_name]['time'], h5_data[f_name]['ISR'],
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
isr_fig.add_layout(my_legend, 'right')
isr_fig.x_range.start = t_start
return isr_fig
def plot_spike_accel(in_h5_file, exclude_list=[], normalize=False, t_start=0):
if normalize:
acc_fig = bplt.figure(title='Normalized Spike Acceleration vs Time')
else:
acc_fig = bplt.figure(title='Spike Acceleration vs Time')
acc_fig.xaxis.axis_label = 'time (sec)'
acc_fig.yaxis.axis_label = 'spike acceleration (%)'
print('Plotting spike acceleration from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
if normalize:
max_accel = np.max(h5_data[f_name]['Spike_Accel'])
my_lines.append(acc_fig.line(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel']/max_accel,
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(h5_data[f_name]['time'],
h5_data[f_name]['Spike_Accel']/max_accel,
size=6, color=colrs[f_i])
)
else:
my_lines.append(acc_fig.line(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel'],
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel'],
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
acc_fig.add_layout(my_legend, 'right')
acc_fig.x_range.start = t_start
return acc_fig
def plot_spike_accel_aligned(in_h5_file, exclude_list=[], normalize=False):
if normalize:
acc_fig = bplt.figure(title='Normalized Spike Acceleration vs Time')
else:
acc_fig = bplt.figure(title='Spike Acceleration vs Time')
acc_fig.xaxis.axis_label = 'time (sec)'
acc_fig.yaxis.axis_label = 'spike acceleration (%)'
print('Plotting spike acceleration from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_time = h5_data[name + '/ach_times'][0] + 0.5
acc_spikes = h5_data[name + '/spike_times'].loc[h5_data[name+'/spike_times'] > ach_time].to_numpy()
acc_isr = 1.0 / np.diff(acc_spikes)
acc_t = acc_spikes[:-1]
sp0 = acc_spikes[0]
freq_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
freq_val = h5_data['frequency_table']['Frequency'][freq_i].values[0]
sp_accel = (acc_isr - freq_val)/freq_val*100
if normalize:
max_accel = np.max(sp_accel)
my_lines.append(
acc_fig.line(acc_t-sp0, sp_accel / max_accel,
line_width=2, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(acc_t-sp0, sp_accel / max_accel,
size=6, color=colrs[f_i])
)
else:
my_lines.append(acc_fig.line(acc_t-sp0, sp_accel,
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(acc_t-sp0, sp_accel,
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
acc_fig.add_layout(my_legend, 'right')
return acc_fig
def plot_spike_cessation(in_h5_file, exclude_list=[], add_mean=True):
cess_names = []
cess_vals = []
with pd.HDFStore(in_h5_file) as h5_data:
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
cess_names.append(leg_name)
cess_vals.append(1.0/np.min(h5_data[f_name]['ISR']))
if add_mean:
mean_cess = np.mean(cess_vals)
cess_vals.append(mean_cess)
all_names = cess_names
mean_name = 'Mean: {0:.2f} sec'.format(mean_cess)
all_names.append(mean_name)
else:
all_names = cess_names
cess_fig = bplt.figure(x_range=all_names, title='Duration of Spike Cessation after ACh')
cess_fig.yaxis.axis_label = 'duration (sec)'
cess_fig.vbar(x=cess_names, top=cess_vals, width=0.9, color=colrs[0])
if add_mean:
cess_fig.vbar(x=[mean_name], top=[mean_cess], width=0.9, color='red')
cess_fig.xaxis.major_label_orientation = np.pi / 2
cess_fig.y_range.start = 0.0
return cess_fig
def plot_average_ifr(in_h5_file, exclude_list=[]):
with pd.HDFStore(in_h5_file) as h5_data:
h5_df = pd.DataFrame(h5_data['frequency_table'])
h5_df = h5_df.sort_values(by=['Filename'])
sel_tab = h5_data['frequency_table'][~h5_data['frequency_table']['Legend'].isin(exclude_list)]
sel_tab.sort_values('Legend', inplace=True)
x_names = sel_tab['Legend'].tolist()
x_names.append('Average')
cess_fig = bplt.figure(x_range=x_names,
title='Average Pre-ACh Frequency and ISR')
cess_fig.vbar(x=sel_tab['Legend'],
top=sel_tab['Frequency'],
width=0.9, color='blue', alpha=0.6, legend='Frequency')
cess_fig.vbar(x=sel_tab['Legend'],
top=sel_tab['ISR_Mean'],
width=0.6, color='red', alpha=0.6, legend='ISR')
mean_isr = np.mean(sel_tab['ISR_Mean'])
mean_freq = np.mean(sel_tab['Frequency'])
cess_fig.vbar(x=['Average'], top=[mean_freq], width=0.9, color='navy', alpha=0.6)
cess_fig.vbar(x=['Average'], top=[mean_isr], width=0.6, color='maroon', alpha=0.6)
cess_fig.xaxis.major_label_orientation = np.pi / 2
cess_fig.yaxis.axis_label = 'frequency (Hz)'
cess_fig.y_range.start = 0.0
cess_fig.legend.location = 'top_right'
return cess_fig
def plot_average_curve(in_h5_file, time_start=8.5, time_bin_size=0.1, exclude_list=[], spike_acceleration=False,
return_curve=False):
long_time = 0
with pd.HDFStore(in_h5_file) as h5_data:
name_sort = list(h5_data.keys())
name_sort.sort()
# get longest recorded time
for f_name in name_sort:
if 'data' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
e_time = np.max(h5_data[f_name]['time'])
if e_time > long_time:
long_time = e_time
# make array of time bins
t_bins = np.arange(time_start, long_time+time_bin_size, time_bin_size)
isr_avg = np.zeros((t_bins.size - 1,))
acc_avg = np.zeros((t_bins.size - 1,))
c_count = np.zeros((t_bins.size - 1,))
for f_name in name_sort:
if 'spike_times' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
acc_spikes = h5_data[name + '/spike_times'].loc[h5_data[name + '/spike_times'] > time_start].to_numpy()
acc_isrs = 1.0 / np.diff(acc_spikes)
acc_t = acc_spikes[:-1]
freq_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
freq_val = h5_data['frequency_table']['Frequency'][freq_i].values[0]
sp_accels = (acc_isrs - freq_val) / freq_val * 100
sp_is = np.digitize(acc_t, t_bins)
for sp_i, sp_acc, sp_isr in zip(sp_is, sp_accels, acc_isrs):
isr_avg[sp_i] += sp_isr
acc_avg[sp_i] += sp_acc
c_count[sp_i] += 1
isr_avg = np.divide(isr_avg, c_count, where=np.greater(c_count, 0))
acc_avg = np.divide(acc_avg, c_count, where=np.greater(c_count, 0))
if spike_acceleration:
avg_fig = bplt.figure(title='Average Acceleration Versus Time')
avg_fig.yaxis.axis_label = 'spike acceleration (%)'
avg_fig.line(t_bins[:-1], acc_avg, line_width=3, color=colrs[0])
avg_fig.circle(t_bins[:-1], acc_avg, size=12, color=colrs[0])
else:
avg_fig = bplt.figure(title='Average Instantaneous Spike Rate Versus Time')
avg_fig.yaxis.axis_label = 'ISR (Hz)'
avg_fig.line(t_bins[:-1], isr_avg, line_width=3, color=colrs[0])
avg_fig.circle(t_bins[:-1], isr_avg, size=12, color=colrs[0])
avg_fig.xaxis.axis_label = 'time (sec)'
if return_curve:
if spike_acceleration:
return avg_fig, t_bins[:-1], acc_avg
else:
return avg_fig, t_bins[:-1], isr_avg
else:
return avg_fig
def plot_spike_cessation_vs_isr_variance(in_h5_file, exclude_list=[]):
cess_names = []
cess_vals = []
ifr_vars = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
cess_names.append(name)
cess_vals.append(1.0 / np.min(h5_data[f_name]['ISR']))
c_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
ifr_vars.append(h5_data['frequency_table']['ISR_Var'][c_i].values[0])
cess_fig = bplt.figure(title='Spike Cessation vs ISR Variance')
cess_fig.circle(cess_vals, ifr_vars, size=12, color=colrs[0])
cess_fig.xaxis.axis_label = 'duration of spike cessation (sec)'
cess_fig.yaxis.axis_label = 'variance of ISR (Hz)'
return cess_fig
def plot_peak_acceleration_vs_spike_cessation(in_h5_file, exclude_list=[]):
fail_acc = []
fail_cess = []
fail_names = []
succ_acc = []
succ_cess = []
succ_names = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_name = name + '/ach_times'
ach_start = h5_data[ach_name][0]
cess_val = 1.0 / np.min(h5_data[f_name]['ISR'])
acc_i = np.where(h5_data[f_name]['time'] < ach_start)
max_acc_pre = np.max(h5_data[f_name].loc[h5_data[f_name]['time'] < ach_start, 'Spike_Accel'].tolist())
max_acc = np.max(h5_data[f_name]['Spike_Accel'])
if max_acc <= 1.1*max_acc_pre:
fail_acc.append(max_acc)
fail_cess.append(cess_val)
fail_names.append(leg_name)
else:
succ_acc.append(max_acc)
succ_cess.append(cess_val)
succ_names.append(leg_name)
acc_fig = bplt.figure(title='Peak Spike Acceleration vs Duration of Spike Cessation')
acc_fig.circle(fail_cess, fail_acc, size=12, color='red', legend='no acceleration')
acc_fig.circle(succ_cess, succ_acc, size=12, color='green', legend='acceleration')
acc_fig.xaxis.axis_label = 'duration of spike cessation (sec)'
acc_fig.yaxis.axis_label = 'peak acceleration (%)'
print('Failed to Demonstrate Spike Acceleration')
print(fail_names)
print('Demonstrated at least 10% increase in ISR')
print(succ_names)
return acc_fig
def plot_peak_acceleration_vs_isr_variance(in_h5_file, exclude_list=[]):
acc_vals = []
var_vals = []
names = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_name = name + '/ach_times'
ach_start = h5_data[ach_name][0]
c_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
var_vals.append(h5_data['frequency_table']['ISR_Var'][c_i].values[0])
max_acc = np.max(h5_data[f_name]['Spike_Accel'])
acc_vals.append(max_acc)
names.append(leg_name)
acc_fig = bplt.figure(title='Peak Spike Acceleration vs ISR Variance')
acc_fig.circle(var_vals, acc_vals, size=12, color=colrs[0])
acc_fig.xaxis.axis_label = 'variance of ISR (Hz)'
acc_fig.yaxis.axis_label = 'peak acceleration (%)'
return acc_fig
def print_average_table(in_h5_file):
with pd.HDFStore(in_h5_file) as h5_data:
h5_df = pd.DataFrame(h5_data['frequency_table'])
print(h5_df)
def analyze_spike_data_from_hdf5(in_h5_file):
avg_freqs = []
avg_isrs = []
var_isrs = []
cell_names = []
legend_names = []
with pd.HDFStore(in_h5_file) as h5_data:
for f_i, f_name in enumerate(h5_data.keys()):
if '/data' in f_name:
print(f_name)
name = f_name.split('/')[1]
sp_name = '{}/spike_times'.format(name)
isr_name = '{}/spike_rates'.format(name)
ach_name = '{}/ach_times'.format(name)
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
# Calculate ACh Times
ach_i = np.where(h5_data[f_name]['ACh'] > 1e-5)
ach_times = pd.Series([h5_data[f_name]['time'][ach_i[0][0]], h5_data[f_name]['time'][ach_i[0][-1]]])
h5_data.put(ach_name, ach_times)
# Get spike times
sp_times = fan.get_spike_times(h5_data[f_name]['Potential'], h5_data[f_name]['time'])
h5_data.put(sp_name, pd.Series(sp_times))
# Calculate ISR
isr_vals = np.divide(1.0, np.diff(sp_times))
isr_ts = (sp_times[:-1] + sp_times[1:]) / 2.0
# Calculate Average Frequency Before ACh
pre_spikes = sp_times[np.where(sp_times < ach_times[0])]
pre_isr = np.divide(1.0, np.diff(pre_spikes))
avg_isr = np.mean(pre_isr)
print('Mean of ISR: {}'.format(avg_isr))
var_isr = np.var(pre_isr)
print('Variance of ISR: {}'.format(var_isr))
avg_freq = pre_spikes.size / ach_times[0]
print('Average Frequency (#/time): {}'.format(avg_freq))
avg_isrs.append(avg_isr)
var_isrs.append(var_isr)
avg_freqs.append(avg_freq)
cell_names.append(name)
legend_names.append(leg_name)
sp_acc = (isr_vals - avg_isr)/avg_isr*100.0
isr_df = pd.DataFrame(np.vstack((isr_ts, isr_vals, sp_acc)).transpose(),
columns=('time', 'ISR', 'Spike_Accel'))
h5_data.put(isr_name, isr_df, format='table', data_columns=True)
freq_dict = {'Filename': cell_names, 'Legend': legend_names, 'ISR_Mean': avg_isrs, 'ISR_Var': var_isrs,
'Frequency': avg_freqs}
h5_data.put('frequency_table', | pd.DataFrame(freq_dict) | pandas.DataFrame |
#!/usr/bin/env python
import os
import sys
import pandas as pd
# This script finds all stressors in both files and only retain the items in base machine.
if len(sys.argv) != 4:
raise Exception("./merge.py <base machine name> <reference machine name> <destination folder>")
base_machine = sys.argv[1]
ref_machine = sys.argv[2]
dest = sys.argv[3]
df_base = pd.read_json(base_machine)
df_ref = pd.read_json(ref_machine)
column_prefix = 'ref_'
df_ref.rename(columns = lambda x : column_prefix + x, inplace=True)
df = | pd.merge(df_base, df_ref, how='inner', left_on='name', right_on=column_prefix+'name') | pandas.merge |
import sys
from pathlib import Path
import pandas as pd
from astropy.io import fits
try:
from tqdm.auto import tqdm
except ImportError:
TQDM_NOT_FOUND = True
else:
TQDM_NOT_FOUND = False
def find_best_header(path):
hdul = fits.open(path)
best_hdu = None
while hdul:
hdu = hdul.pop(0)
best_hdu = hdu
if isinstance(hdu, fits.hdu.image.ImageHDU):
break
return best_hdu.header
def convert_header_to_dataframe(header, index=None):
headerdict = dict(header)
# there's a huge empty string at the end of headers
# if it's called "", then it's removed, otherwise no harm done.
_ = headerdict.pop("", None)
# remove COMMENT and ORIGIN
keys_to_delete = ["COMMENT", "ORIGIN"]
for key in keys_to_delete:
_ = headerdict.pop(key, None)
index = pd.Index([index], name="filename")
return pd.DataFrame( | pd.Series(headerdict) | pandas.Series |
''' Datasets
This file contains definitions for our CIFAR, ImageFolder, and HDF5 datasets
'''
import os
import os.path
import sys
from PIL import Image
import numpy as np
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
import random
import pandas as pd
from multiprocessing import Pool
# from joblib import Parallel, delayed
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.io as io
from torchvision.datasets.utils import download_url, check_integrity
import torch.utils.data as data
from torch.utils.data import DataLoader
# from torchvision.datasets.video_utils import VideoClips
from VideoClips2 import VideoClips
from torchvision.datasets.utils import list_dir
import numbers
from glob import glob
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
print
for target in tqdm(sorted(os.listdir(dir))):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dogball/xxx.png
root/dogball/xxy.png
root/dogball/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, load_in_mem=False,
index_filename='imagenet_imgs.npz', **kwargs):
classes, class_to_idx = find_classes(root)
# Load pre-computed image directory walk
if os.path.exists(index_filename):
print('Loading pre-saved Index file %s...' % index_filename)
imgs = np.load(index_filename)['imgs']
# If first time, walk the folder directory and save the
# results to a pre-computed file.
else:
print('Generating Index file %s...' % index_filename)
imgs = make_dataset(root, class_to_idx)
np.savez_compressed(index_filename, **{'imgs' : imgs})
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.load_in_mem = load_in_mem
if self.load_in_mem:
print('Loading all images into memory...')
self.data, self.labels = [], []
for index in tqdm(range(len(self.imgs))):
path, target = imgs[index][0], imgs[index][1]
self.data.append(self.transform(self.loader(path)))
self.labels.append(target)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
''' ILSVRC_HDF5: A dataset to support I/O from an HDF5 to avoid
having to load individual images all the time. '''
import h5py as h5
import torch
class ILSVRC_HDF5(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
load_in_mem=False, train=True,download=False, validate_seed=0,
val_split=0, **kwargs): # last four are dummies
self.root = root
self.num_imgs = len(h5.File(root, 'r')['labels'])
# self.transform = transform
self.target_transform = target_transform
# Set the transform here
self.transform = transform
# load the entire dataset into memory?
self.load_in_mem = load_in_mem
# If loading into memory, do so now
if self.load_in_mem:
print('Loading %s into memory...' % root)
with h5.File(root,'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root,'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target)
def __len__(self):
return self.num_imgs
# return len(self.f['imgs'])
import pickle
class CIFAR10(dset.CIFAR10):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=True, validate_seed=0,
val_split=0, load_in_mem=True, **kwargs):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.val_split = val_split
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
self.data = []
self.labels= []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.labels += entry['labels']
else:
self.labels += entry['fine_labels']
fo.close()
self.data = np.concatenate(self.data)
# Randomly select indices for validation
if self.val_split > 0:
label_indices = [[] for _ in range(max(self.labels)+1)]
for i,l in enumerate(self.labels):
label_indices[l] += [i]
label_indices = np.asarray(label_indices)
# randomly grab 500 elements of each class
np.random.seed(validate_seed)
self.val_indices = []
for l_i in label_indices:
self.val_indices += list(l_i[np.random.choice(len(l_i), int(len(self.data) * val_split) // (max(self.labels) + 1) ,replace=False)])
if self.train=='validate':
self.data = self.data[self.val_indices]
self.labels = list(np.asarray(self.labels)[self.val_indices])
self.data = self.data.reshape((int(50e3 * self.val_split), 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
elif self.train:
# print(np.shape(self.data))
if self.val_split > 0:
self.data = np.delete(self.data,self.val_indices,axis=0)
self.labels = list(np.delete(np.asarray(self.labels),self.val_indices,axis=0))
self.data = self.data.reshape((int(50e3 * (1.-self.val_split)), 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
else:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.data = entry['data']
if 'labels' in entry:
self.labels = entry['labels']
else:
self.labels = entry['fine_labels']
fo.close()
self.data = self.data.reshape((10000, 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
class videoCIFAR10(CIFAR10):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=True, validate_seed=0,
val_split=0, load_in_mem=True, **kwargs):
super().__init__(root, train,
transform, target_transform,
download, validate_seed,
val_split, load_in_mem, **kwargs)
self.time_steps = kwargs['time_steps']
# print('cifar10 classes',set(self.labels))
def __getitem__(self,index):
img, target = super().__getitem__(index)
return torch.unsqueeze(img, dim=0).repeat(self.time_steps,1,1,1), target
def __len__(self):
return super().__len__()
class vid2frame_dataset(data.Dataset):
"""docstring for video_dataset"""
def __init__(self, cache_csv_path, data_root=None, save_path=None, label_csv_path=None, extensions=None, clip_length_in_frames=12, frame_rate=12, transforms = None, cache_exists=False):
super(vid2frame_dataset, self).__init__()
"""
The constructor for vid2frame_dataset class
Parameters
----------
data_root : str(or None)
The path to the directory with all the videos
save_path : str(or None)
The path to the directory where the frames should be saved
label_csv_path : str(or None)
The path to the csv file which contains class labels
cache_csv_path : str(or None)
The path to the csv file where the cache will be saved
extensions : list(or None)
The path to the csv file where the cache will be saved
clip_length_in_frames : int
Number of frames to be returned to the dataloader
frame_path : int
Frame rate at which the jpeg frames will be written
transforms : list(or None)
The transforms that are to be applied to the clip
"""
self.data_root = data_root
# self.zarr_root = zarr_root
self.label_csv_path = label_csv_path
self.cache_csv_path = cache_csv_path
self.save_path = save_path
# self.zarr_file = zarr.open(zarr_root, 'a')
self.extensions = extensions
self.clip_length_in_frames = clip_length_in_frames
self.frame_rate = frame_rate
self.transforms = transforms
self.cache_exists = cache_exists
if self.cache_exists:
self.cache_df = pd.read_csv(self.cache_csv_path)
self.class_to_idx = {label: i for i, label in enumerate(self.cache_df['label'].unique())}
elif self.cache_exists == False:
self.label_df = pd.read_csv(self.label_csv_path)
columns = ['path', 'label']
self.cache_df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(DataFrame.sum, skipna=True)
.reset_index()
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#coding:utf-8
from PyQt5 import QtWidgets, QtWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
import cv2
import os
import cv2
import numpy as np
import time
import os
from PIL import Image, ImageDraw, ImageFont
import scipy.misc
import pickle
import datetime
import tensorflow as tf
import glog as log
from glob import glob
import pandas as pd
#图像标记类
class Mark(QtWidgets.QWidget):
def __init__(self, imgPath, lablePath, imageOffsetX, imageOffsetY, excelCon):
super(Mark,self).__init__()
self.imgPath=imgPath
self.lablePath=lablePath
self.imageOffsetX=imageOffsetX
self.imageOffsetY=imageOffsetY
self.excelCon=excelCon
self.mouseOnImgX=0.0
self.mouseOnImgY=0.0
self.xPos=0.0
self.yPos=0.0
self.curWidth=0.0
self.curHeight=0.0
#左上角点100,100, 宽高1000,900, 可自己设置,未利用布局
self.setGeometry(100,100,1000,900)
self.setWindowTitle(u"坐标标注") #窗口标题
self.initUI()
def initUI(self):
# self.labelR = QtWidgets.QLabel(u'缩放比例:', self) #label标签
# self.labelR.move(200, 20) #label标签坐标
# self.editR = QtWidgets.QLineEdit(self) #存放图像缩放的比例值
# self.editR.move(250,20) #编辑框坐标
self.buttonSave = QtWidgets.QPushButton(u"保存坐标到EXCEL", self) #保存按钮
self.buttonSave.move(400,20) #保存按钮坐标
self.buttonSave.clicked.connect(self.saveButtonClick) #保存按钮关联的时间
self.allFiles = QtWidgets.QListWidget(self) #列表框,显示所有的图像文件
self.allFiles.move(10,40) #列表框坐标
self.allFiles.resize(180,700) #列表框大小
allImgs = os.listdir(self.imgPath) #遍历路径,将所有文件放到列表框中
allImgs.sort(key= lambda x:int(x[:-4])) #按文件名大小排序
for imgTmp in allImgs:
self.allFiles.addItem(imgTmp)
imgNum=self.allFiles.count()
self.labelShowNum = QtWidgets.QLabel(u'图片数量:'+str(imgNum), self) #label标签
self.labelShowNum.move(20, 20) #label标签坐标
self.allFiles.itemClicked.connect(self.itemClick) #列表框关联时间,用信号槽的写法方式不起作用
self.allFiles.itemSelectionChanged.connect(self.itemSeleChange)
self.labelImg = QtWidgets.QLabel("选中显示图片", self) # 显示图像的标签
self.labelImg.move(self.imageOffsetX, self.imageOffsetY) #显示图像标签坐标
# def closeEvent(self, event):
# self.file.close()
# print('file close')
# # event.ignore() # 忽略关闭事件
# # self.hide() # 隐藏窗体
# cv2img转换Qimage
def img2pixmap(self, image):
Y, X = image.shape[:2]
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order='C')
self._bgra[..., 0] = image[..., 0]
self._bgra[..., 1] = image[..., 1]
self._bgra[..., 2] = image[..., 2]
qimage = QImage(self._bgra.data, X, Y, QImage.Format_RGB32)
pixmap = QPixmap.fromImage(qimage)
return pixmap
# 选择图像列表得到图片和路径
def selectItemGetImg(self):
imgName=self.allFiles.currentItem().text()
imgDirName = self.imgPath + self.allFiles.currentItem().text() #图像的绝对路径
imgOri = cv2.imread(str(imgDirName),1) #读取图像
self.curHeight = imgOri.shape[0] #图像高度
self.curWidth = imgOri.shape[1] # 计算图像宽度,缩放图像
return imgOri, imgName
# 显示坐标和图片
def pointorShow(self, img, x, y):
cv2.circle(img,(x, y),3,(0,0,255),2)
cv2.circle(img,(x, y),5,(0,255,0),2)
self.labelImg.resize(self.curWidth,self.curHeight) #显示图像标签大小,图像按照宽或高缩放到这个尺度
self.labelImg.setPixmap(self.img2pixmap(img))
#鼠标单击事件
def mousePressEvent(self, QMouseEvent):
pointT = QMouseEvent.pos() # 获得鼠标点击处的坐标
self.mouseOnImgX=pointT.x()-200
self.mouseOnImgY=pointT.y()-70
imgOri, _=self.selectItemGetImg()
self.pointorShow(imgOri, self.mouseOnImgX, self.mouseOnImgY)
# 保存标签
self.saveLabelBySelectItem()
# 列表改变显示图片坐标
def itemSelectShowImg(self):
imgOri, imgName=self.selectItemGetImg()
# 从excel表中得到x,y坐标
xScal, yScal = self.excelCon.getXYPoint('imageName', imgName)
# 通过归一化x,y计算真实坐标
self.mouseOnImgX=int(xScal*self.curWidth)
self.mouseOnImgY=int(yScal*self.curHeight)
self.pointorShow(imgOri, self.mouseOnImgX, self.mouseOnImgY)
def itemClick(self): #列表框单击事件
self.itemSelectShowImg()
def itemSeleChange(self): #列表框改变事件
self.itemSelectShowImg()
def saveLabelBySelectItem(self):
curItem=self.allFiles.currentItem()
if(curItem==None):
print('please select a item')
return
name=str(curItem.text())
# 坐标归一化
self.xPos=self.mouseOnImgX/self.curWidth
self.yPos=self.mouseOnImgY/self.curHeight
# 更新或追加记录
self.excelCon.updateAppendRowBycolName('imageName', name, self.xPos, self.yPos)
def saveButtonClick(self): #保存按钮事件
self.saveLabelBySelectItem()
class imgTools():
def __init__(self):
self.name = "ray"
def png2jpg(self, path):
# path:=>'images/*.png'
pngs = glob(path)
for j in pngs:
img = cv2.imread(j)
cv2.imwrite(j[:-3] + 'jpg', img)
def txt2Excel(self, txtPathName, excelCon):
with open(txtPathName, 'r') as f:
lines = f.readlines()
imagesNum=len(lines)
imgNameList=[]
xList=[]
yList=[]
for i in range (imagesNum):
line=lines[i].strip().split()
imageName=line[0]
# 去掉路径
imageName=imageName[44:]
print(imageName)
imgNameList.append(imageName)
landmark = np.asarray(line[1:197], dtype=np.float32)
nosice=landmark[54*2:54*2+2]
xList.append(nosice[0])
yList.append(nosice[1])
# 批量追加数据
colNames=['imageName', 'x', 'y']
datas=[]
datas.append(imgNameList)
datas.append(xList)
datas.append(yList)
excelCon.appendRowsAnyway(colNames, datas)
def CenterLabelHeatMap(self, img_width, img_height, posX, posY, sigma):
X1 = np.linspace(1, img_width, img_width)
Y1 = np.linspace(1, img_height, img_height)
[X, Y] = np.meshgrid(X1, Y1)
X = X - posX
Y = Y - posY
D2 = X * X + Y * Y
E2 = 2.0 * sigma * sigma
Exponent = D2 / E2
heatmap = np.exp(-Exponent)
return heatmap
# Compute gaussian kernel
def CenterGaussianHeatMap(self, img_height, img_width, posX, posY, variance):
gaussian_map = np.zeros((img_height, img_width))
for x_p in range(img_width):
for y_p in range(img_height):
dist_sq = (x_p - posX) * (x_p - posX) + \
(y_p - posY) * (y_p - posY)
exponent = dist_sq / 2.0 / variance / variance
gaussian_map[y_p, x_p] = np.exp(-exponent)
return gaussian_map
class excelTools():
def __init__(self, lablePath, excelName, sheetName=None):
self.lablePath = lablePath
self.excelName=excelName
self.sheetName=sheetName
def mkEmptyExecl(self, titleFormat):
writer = pd.ExcelWriter(self.lablePath+self.excelName, engine='xlsxwriter')
df=pd.DataFrame(titleFormat)
# df=pd.DataFrame()
if(self.sheetName==None):
df.to_excel(writer, index=False)
else:
df.to_excel(writer, sheet_name=self.sheetName, index=False)
writer.save()
def updateAppendRowBycolName(self, colName, keyWord, x, y):
dirName=self.lablePath+self.excelName
if(self.sheetName==None):
df = pd.read_excel(dirName)
else:
df = pd.read_excel(dirName, sheet_name=self.sheetName)
value=df.loc[df[colName] == keyWord]
if(value.empty):
print('add row at end')
new=pd.DataFrame({'imageName':[keyWord], 'x':[x], 'y':[y]})
df=df.append(new,ignore_index=True)
df.to_excel(dirName, sheet_name=self.sheetName, index=False)
else:
print('update x y')
index=value.index.values[0]
df.at[index,'x']=x
df.at[index,'y']=y
df.to_excel(dirName, sheet_name=self.sheetName, index=False)
def appendRowsAnyway(self, colNames, datas):
dirName=self.lablePath+self.excelName
if(self.sheetName==None):
df = pd.read_excel(dirName)
else:
df = pd.read_excel(dirName, sheet_name=self.sheetName)
print('add rows at end')
dataDic={}
for i in range(len(colNames)):
dataDic[colNames[i]]=datas[i]
new=pd.DataFrame(dataDic)
df=df.append(new,ignore_index=True)
df.to_excel(dirName, sheet_name=self.sheetName, index=False)
def searchIndexByColName(self, colName, keyWord):
dirName=self.lablePath+self.excelName
if(self.sheetName==None):
df = pd.read_excel(dirName)
else:
df = | pd.read_excel(dirName, sheet_name=self.sheetName) | pandas.read_excel |
# Standard
import threading
# Numerical
import numpy as np
import pandas as pd
from scipy.stats import binom_test
from joblib import Parallel, delayed
# Graphical
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
# Local repo
from tree_explainer.utilities.preprocessing import DataProcessor
from tree_explainer.utilities.model_validation import validate_model_type, validate_model_is_trained
from tree_explainer.utilities.parallel import compute_feature_contributions_from_tree, analyze_tree_structure, compute_two_way_conditional_contributions, compute_explanation_of_prediction
from tree_explainer.utilities.numerical import divide0
from tree_explainer.utilities.visualization import adjust_spines
from tree_explainer.utilities.lists import true_list
################################################################################
class TreeExplainer(object):
def __init__(self, model, data, targets=None, n_jobs=None, verbose=False):
"""The class is instantiated by passing model to train and training data.
Optionally, feature names and target names can be passed, too.
:param model: The input model to explain_feature_contributions.
:param data: [numpy array or pandas DataFrame] Data on which to test
feature contributions. It must have the same number of features of
the dataset used to train the model. It should be scaled in the same
way as the training data.
:param targets: [numpy array] True target values of each instance in
`data`. For classification tasks, it contains the class labels, for
regression problems the true value.
:param n_jobs: [int or None] The number of parallel processes to use.
:param verbose: [bool or int] Whether to print progress to the console.
If it equals False, no message will be printed; if it equals 1,
1, progress messages will be displayed; if it equals True or 2,
messages from joblib will also be displayed.
:return self:
"""
# Check that model type is supported
model_class, model_type, implementation, estimator_type = validate_model_type(model)
# Check that model has been fit, and get information from it
n_estimators_attr, estimators_attr = validate_model_is_trained(model, model_class)
# Store the state of verbosity and the number of cores to use for joblib
if isinstance(verbose, bool):
if verbose:
self.verbosity_level = 2
else:
self.verbosity_level = 0
elif isinstance(verbose, (int, float)):
self.verbosity_level = int(round(verbose))
else:
self.verbosity_level = 0
self.joblib_params = dict(n_jobs=n_jobs, verbose=self.verbosity_level == 2)
if self.verbosity_level > 0:
print('Analyzing model structure and computing feature contributions ...')
# Keep a reference to these attributes of the model
self._model_specifics = dict(model_class=model_class,
model_type=model_type,
implementation=implementation,
estimator_type=estimator_type,
estimators_=estimators_attr,
n_estimators=n_estimators_attr)
# Initialize basic attributes of the model
self.model = model
self.n_trees = getattr(model, self._model_specifics['n_estimators'])
self.n_features = model.n_features_
# Initialize attributes
self.data = None
self.targets = None
self.tree_path = None
self.features_split = list(np.empty((self.n_trees, ), dtype=object))
self.tree_feature_path = None
self.tree_depth = None
self.feature_depth = None
self.threshold_value = None
self.no_of_nodes = None
self.data_leaves = None
self.target_probability_at_root = None
self.contributions = None
self.conditional_contributions = None
self.conditional_contributions_sample = None
self.min_depth_frame = None
self.min_depth_frame_summary = None
self.importance_frame = None
self.two_way_contribution_table = None
self.n_two_way_contribution_table = None
# Prepare data
self._prepare_data_and_predict(data=data, targets=targets)
def analyze_tree_structure(self):
"""
:return:
"""
# Allocate variables
results = dict()
if self.tree_path is None:
store_tree_path = True
results['tree_path'] = list(np.empty((self.n_trees, ), dtype=object))
else:
store_tree_path = False
results['features_split'] = list(np.empty((self.n_trees, ), dtype=object))
results['tree_feature_path'] = list(np.empty((self.n_trees,), dtype=object))
results['tree_depth'] = np.zeros((self.n_trees, ), dtype=int)
results['feature_depth'] = {f: np.zeros((self.n_trees, ), dtype=int) - 1 for f in self.feature_names}
results['threshold_value'] = list(np.empty((self.n_trees,), dtype=object))
results['no_of_nodes'] = {f: np.zeros((self.n_trees,), dtype=int) - 1 for f in self.feature_names}
# Process trees in parallel
Parallel(**self.joblib_params, require='sharedmem')(
delayed(analyze_tree_structure)(
estimator=estimator, feature_names=self.feature_names,
store_tree_path=store_tree_path, i_tree=i_tree,
results=results, lock=threading.Lock())
for i_tree, estimator in enumerate(getattr(self.model, self._model_specifics['estimators_'])))
# Store results
if store_tree_path:
self.tree_path = results['tree_path']
self.features_split = results['features_split']
self.tree_feature_path = results['tree_feature_path']
self.tree_depth = results['tree_depth']
self.feature_depth = results['feature_depth']
self.threshold_value = results['threshold_value']
self.no_of_nodes = results['no_of_nodes']
if self.verbosity_level > 0:
print('done')
return self
def explain_feature_contributions(self, joint_contributions=False,
ignore_non_informative_nodes=False):
"""
:param joint_contributions: [bool] Whether to compute the joint
contribution between features.
:param ignore_non_informative_nodes: [bool] Whether to exclude from
averaging the nodes where contribution values did not change. In this
way, the mean is only computed over the nodes where a feature was
effectively evaluated.
"""
# Process trees in parallel
results = Parallel(**self.joblib_params, require='sharedmem')(
delayed(compute_feature_contributions_from_tree)(
estimator=estimator, data=self.data,
contributions_shape=(self.n_samples, self.n_features, self.n_target_levels),
features_split=self.features_split[i_tree],
joint_contributions=joint_contributions,
ignore_non_informative_nodes=ignore_non_informative_nodes)
for i_tree, estimator in enumerate(getattr(self.model, self._model_specifics['estimators_'])))
# Unpack `results`
if self.tree_path is None:
self.tree_path = [i['tree_path'] for i in results]
self.data_leaves = np.vstack([i['data_leaves'] for i in results]).transpose()
self.target_probability_at_root = np.vstack([i['target_probability_at_root'] for i in results])
# Divide contributions only by the number of times the feature was evaluated.
# Features that were never evaluated will return NaN
if ignore_non_informative_nodes:
denominator = np.sum(np.stack([i['contributions_n_evaluations'] for i in results], axis=3), axis=3)
else:
denominator = self.n_trees # Normalized by all trees
self.contributions = divide0(np.sum(np.stack([i['contributions'] for i in results], axis=3), axis=3),
denominator,
replace_with=np.nan)
if joint_contributions:
self.conditional_contributions = [i['conditional_contributions'] for i in results]
self.conditional_contributions_sample = [i['conditional_contributions_sample'] for i in results]
if self.features_split[0] is None: # If the first is empty, then all are
self.features_split = [i['features_split'] for i in results]
if self.verbosity_level > 0:
print('done')
return self
def explain_single_prediction(self, observation_idx, solve_duplicate_splits='mean',
threshold_contribution=None, top_n_features=None):
"""Analyze tree structure and try to explain how the model has reached a
certain prediction for a single observation. The idea is to look at how
each tree has used data features to partition the feature space, and
how that rule generalizes across trees.
:param observation_idx: [int] The index of an observation in the stored
data
:param solve_duplicate_splits: [str] Not implemented yet.
:param threshold_contribution: [None or int] The threshold on
contribution values below which features will be hidden from the final
summary because uninformative. If None, nothing happens.
:param top_n_features: [int or None] The number of most informative
features, as measured by conditional contributions. If None,
nothing happens.
:return [str]: Prints message to console regarding the contribution of
features to a single prediction.
"""
if self.data is None or self.conditional_contributions is None:
raise ValueError('No data is present. First run the method explain_interactions()')
# Get data of observation
this_sample_original = self.data[observation_idx, :]
this_sample = list(this_sample_original)
# Convert data of this sample
for i_feature, feature in enumerate(self.feature_names):
if self.features_data_types[feature]['data_type'] == 'numerical':
this_sample[i_feature] = '%.3f' % this_sample_original[i_feature]
else:
this_sample[i_feature] = self.features_data_types[feature]['categories'][int(this_sample_original[i_feature])]
# Remove trailing zeros
this_sample = [str(i).rstrip('0') for i in this_sample]
# Gather unique values from each feature
feature_values = dict({feature: list() for feature in self.feature_names})
for i_feature, feature in enumerate(self.feature_names):
feature_values[feature] = np.unique(self.data[:, i_feature])
# Process trees in parallel
# results = dict()
# results['samples_for_decision_table'] = dict({i: list() for i in self.feature_names})
# results['contributions_for_decision_table'] = dict({i: list() for i in self.feature_names})
results = Parallel(**self.joblib_params)(
delayed(compute_explanation_of_prediction)(
leaf=self.data_leaves[observation_idx, i_tree],
paths=self.tree_path[i_tree],
threshold_value=self.threshold_value[i_tree],
features_split=self.features_split[i_tree],
conditional_contributions=self.conditional_contributions[i_tree],
prediction=self.predictions[observation_idx],
feature_values=feature_values,
solve_duplicate_splits=solve_duplicate_splits)
for i_tree in range(self.n_trees))
# Extract data
all_samples_for_decision_table = [i['samples_for_decision_table'] for i in results]
all_contributions_for_decision_table = [i['contributions_for_decision_table'] for i in results]
samples_for_decision_table = dict()
contributions_for_decision_table = dict()
for feature in self.feature_names:
samples_for_decision_table[feature] = np.hstack([i[feature] for i in all_samples_for_decision_table])
contributions_for_decision_table[feature] = np.hstack([i[feature] for i in all_contributions_for_decision_table])
# Initialize output variables
numerical_columns = ['lower quartile', 'median', 'upper quartile']
categorical_columns = ['1st choice', '2nd choice']
# Split numerical from categorical features
numerical_features = [feature for feature in self.feature_names
if self.features_data_types[feature]['data_type'] == 'numerical']
categorical_features = list(set(self.feature_names).difference(numerical_features))
# Make DataFrames
decision_table_numerical = pd.DataFrame(columns=['value'] + numerical_columns + ['contribution'],
index=numerical_features)
decision_table_categorical = pd.DataFrame(columns=['parent_feature', 'value'] + categorical_columns + ['contribution'],
index=categorical_features)
# Create function for filling the categorical DataFrame
fill_numerical_values = lambda x: '%.3f' % x
fill_categorical_values = lambda x, y: '%s (%i%%)' % (x, y * 100) if x != '' else ''
fill_contribution_values = lambda x: '%.1f%%' % x
# Compute summary statistics for each feature
for feature in self.feature_names:
samples = np.array(samples_for_decision_table[feature])
# Check data type
if feature in categorical_features:
# Convert indices to values
samples_value = self.features_data_types[feature]['categories'][samples.astype(int)]
category_frequencies = pd.value_counts(samples_value, ascending=False, normalize=True)
# Take the top 3 choices
choices = list(category_frequencies.index)
first_choice = choices[0]
second_choice = choices[1] if len(choices) > 1 else ''
# Take their frequency value
category_frequencies = category_frequencies.values
if category_frequencies.shape[0] < 2:
category_frequencies = np.hstack((category_frequencies,
np.zeros((2 - category_frequencies.shape[0], ))))
# Store values in nice format
decision_table_categorical.loc[feature, '1st choice'] = fill_categorical_values(first_choice, category_frequencies[0])
decision_table_categorical.loc[feature, '2nd choice'] = fill_categorical_values(second_choice, category_frequencies[1])
# Store median contribution
decision_table_categorical.loc[feature, 'contribution'] = np.median(contributions_for_decision_table[feature])
# Store name of parent feature, if any
if 'parent_feature' in self.features_data_types[feature].keys():
parent_feature = self.features_data_types[feature]['parent_feature']
else:
parent_feature = None
decision_table_categorical.loc[feature, 'parent_feature'] = parent_feature
elif feature in numerical_features:
# Compute quartiles
q = np.quantile(samples, [.25, .50, .75], interpolation='nearest')
decision_table_numerical.loc[feature, ['lower quartile', 'median', 'upper quartile']] = q
# Store median contribution
decision_table_numerical.loc[feature, 'contribution'] = np.median(contributions_for_decision_table[feature])
# Add sample of interest to decision table
decision_table_numerical['value'] = [this_sample[self.feature_names.index(i)] for i in numerical_features]
decision_table_categorical['value'] = [this_sample[self.feature_names.index(i)] for i in categorical_features]
# Sort decision table by contribution value
decision_table_numerical.sort_values(by='contribution', ascending=False, inplace=True)
decision_table_categorical.sort_values(by='contribution', ascending=False, inplace=True)
decision_table_categorical.drop(columns=['parent_feature'], inplace=True)
# Limit number of features used to explain_feature_contributions this prediction
if threshold_contribution is not None:
decision_table_numerical = decision_table_numerical.loc[decision_table_numerical['contribution'] >= threshold_contribution]
decision_table_categorical = decision_table_categorical.loc[decision_table_categorical['contribution'] >= threshold_contribution]
if top_n_features is not None:
decision_table_numerical = decision_table_numerical.iloc[:top_n_features]
decision_table_categorical = decision_table_categorical.iloc[:top_n_features]
# Convert contribution column to string
decision_table_numerical['contribution'] = decision_table_numerical['contribution'].map(fill_contribution_values)
decision_table_categorical['contribution'] = decision_table_categorical['contribution'].map(fill_contribution_values)
# Convert other numerical columns to string
for feature in numerical_columns:
decision_table_numerical[feature] = decision_table_numerical[feature].map(fill_numerical_values)
# Print to console
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
outcome = self.target_data_type[self.target_name]['categories'][int(self.predictions[observation_idx])]
is_correct = 'correct' if self.correct_predictions[observation_idx] else 'not correct'
print('\nObservation #%i: %s = \'%s\' (%s)\n' % (observation_idx, self.target_name, outcome, is_correct))
if decision_table_numerical.shape[0] > 0:
print(decision_table_numerical)
print()
if decision_table_categorical.shape[0] > 0:
print(decision_table_categorical)
print()
def _prepare_data_and_predict(self, data, targets=None):
# Process input data
DP = DataProcessor().prepare(data=data, targets=targets)
# Extract information on data
self.data = DP.data
self.n_samples = self.data.shape[0]
self.original_feature_names = DP.info['original_feature_names']
self.feature_names = DP.info['feature_names']
self.n_features = DP.info['n_features']
self.features_data_types = DP.info['features_data_types']
if targets is not None:
self.targets = DP.targets
self.n_target_levels = DP.info['n_target_levels']
self.target_name = DP.info['target_name']
self.target_levels = DP.info['target_levels']
self.target_data_type = DP.info['target_data_type']
else: # some of these attributes can be inferred from the model
if self._model_specifics['implementation'] == 'sklearn' and \
self._model_specifics['model_type'] == 'classifier':
self.n_target_levels = self.model.n_classes_
self.target_name = 'target'
self.targets = None
self.target_levels = None
# Compute and store predictions
self.prediction_probabilities = self.model.predict_proba(self.data)
if self._model_specifics['model_type'] == 'classifier':
self.predictions = np.argmax(self.prediction_probabilities, axis=1)
if self.targets is not None:
self.correct_predictions = self.predictions == self.targets
else:
self.predictions = self.model.predict(self.data)
############################################################################
# Statistics
############################################################################
def compute_min_depth_distribution(self, mean_sample='relevant_trees'):
"""Calculates distribution of minimal depth of all variables in all trees.
REFERENCE: This function is based on plot_min_depth_distribution in the
R package randomForestExplainer.
SOURCE: https://github.com/MI2DataLab/randomForestExplainer/blob/master/R/min_depth_distribution.R
:param mean_sample:
- If mean_sample = "all_trees" (filling missing value): the minimal
depth of a variable in a tree that does not use it for splitting is
equal to the mean depth of trees. Note that the depth of a tree is
equal to the length of the longest path from root to leave in this
tree. This equals the maximum depth of a variable in this tree plus
one, as leaves are by definition not split by any variable.
- If mean_sample = "top_trees" (restricting the sample): to
calculate the mean minimal depth only B^tilde out of B (number of
trees) observations are considered, where B^tilde is equal to the
maximum number of trees in which any variable was used for
splitting. Remaining missing values for variables that were used for
splitting less than B^tilde times are filled in as in mean_sample =
"all_trees".
- mean_sample = "relevant_trees" (ignoring missing values): mean
minimal depth is calculated using only non-missing values.
The following attributes are stored in self:
min_depth_frame: [pandas DataFrame] Contains the depth at which each
feature can be found in each tree.
min_depth_frame_summary: [pandas DataFrame] Contains the count of each
depth value for each feature.
"""
# Check inputs
if mean_sample != 'relevant_trees':
raise NotImplementedError
# Initialize temporary variables
new_depth_value = None
# Convert data to a long format
feature = np.vstack([np.tile(key, (len(value), 1))
for key, value in self.feature_depth.items()])
depth_value = np.hstack([np.array(value) for value in
self.feature_depth.values()])
min_depth_frame = pd.DataFrame(columns=['tree', 'feature', 'minimal_depth'])
min_depth_frame['tree'] = np.tile(np.arange(self.n_trees) + 1, (1, self.n_features)).ravel()
min_depth_frame['minimal_depth'] = depth_value
# Features become a categorical data type
min_depth_frame['feature'] = pd.Categorical(feature.ravel(),
categories=self.feature_names,
ordered=True)
# Sort output as in randomForestExplainer
min_depth_frame.sort_values(by=['tree', 'feature'], ascending=True, inplace=True)
min_depth_frame.reset_index(drop=True, inplace=True)
# Drop rows where minimal_depth is negative because it means that the
# feature was not used by that tree
min_depth_frame.drop(np.where(min_depth_frame['minimal_depth'] < 0)[0], inplace=True)
min_depth_frame.reset_index(drop=True, inplace=True)
# Summarize data by reporting count of each [feature minimal_depth] combination
summary = min_depth_frame.groupby(['feature', 'minimal_depth']).size()
# Convert back to DataFrame
summary = summary.to_frame(name='count').reset_index(level=['feature', 'minimal_depth'])
# Impute depth of features for those that were not evaluated in all trees
if mean_sample != 'relevant_trees':
missing_values = summary.groupby('feature').sum()
missing_values['n_missing_trees'] = self.n_trees - missing_values['count']
missing_values = missing_values[missing_values['n_missing_trees'] > 0]
if missing_values.shape[0] > 0:
rows_to_add = list()
features_with_missing_values = list(missing_values.index)
for feature in features_with_missing_values:
if mean_sample == 'all_trees':
new_depth_value = self.tree_depth.mean()
elif mean_sample == 'top_trees':
raise NotImplementedError
# Store values
rows_to_add.append([feature, new_depth_value,
missing_values.loc[feature]['n_missing_trees']])
# Add missing values to summary data
summary = summary.append(pd.DataFrame(rows_to_add, columns=summary.columns), ignore_index=True)
summary.sort_values(by=['feature', 'minimal_depth'], ascending=True, inplace=True)
summary.reset_index(drop=True, inplace=True)
# Store outputs
self.min_depth_frame = min_depth_frame
self.min_depth_frame_summary = summary
def compute_two_way_interactions(self, n_jobs=-1, verbose=False):
"""This function computes tables of two-way conditional interactions
between features. These values are the relative change in feature
contribution at a node where a feature is in the parent node, and another
one in one of the children. This type of information could highlight
whether there are combinations of features that are used in sequence
more often than others. The last column of the table reports the
relative contribution of a feature when used at the root of the tree.
Values are averaged across trees and observations in the data provided
to the explain_feature_contributions() method.
If the model is a classifier, we can further divide feature
contributions between correct and incorrect predictions of the model,
to characterize how features interact when the model turns out to be
right and when it is wrong.
:param n_jobs: [int or None] The number of parallel process to use.
:param verbose: [bool] Whether to print progress messages.
The following attributes are stored in self:
two_way_contribution_table: [dict] Contains pandas DataFrames of
relative feature contributions when 'all' samples are used. If the
model is a classifier, also tables for 'correct' and 'incorrect'
predictions will be calculated. In each table, larger values indicate
that when a particular feature is used after another one, there is a
more pronounced change in the prediction.
n_two_way_contribution_table: [dict] Same as two_way_contribution_table.
However, it contains the number of times a feature was used for split.
Higher values stand for a more frequent use of a combination of
features, indicating a possible relationship between them.
"""
# Compute conditional contributions if not previously done
if self.conditional_contributions is None:
raise ValueError('First compute joint contributions')
# Initialize temporary variables
key_name = None
samples = None
# If the model is a classifier, separate contributions for correct and
# incorrect predictions of the model
if self._model_specifics['model_type'] == 'classifier':
n_iterations = 3
else:
n_iterations = 1
# Initialize output variables
self.two_way_contribution_table = dict()
self.n_two_way_contribution_table = dict()
for i_iter in range(n_iterations):
if i_iter == 0: # Select all samples
samples = np.arange(self.n_samples)
key_name = 'all'
else:
if self._model_specifics['model_type'] == 'classifier':
if i_iter == 1: # Select only correctly predictions samples
samples = np.where(self.targets == self.predictions)[0]
key_name = 'correct'
elif i_iter == 2: # Select only wrongly predicted samples
samples = np.where(self.targets != self.predictions)[0]
key_name = 'incorrect'
if verbose:
print('Computing two-way feature interactions of %s predictions ...' % key_name)
# Initialize heatmap-tables
results = dict()
results['contribution_values'] = np.zeros((self.n_features,
self.n_features + 1,
self.n_target_levels),
dtype=np.float32)
results['contribution_values_n'] = np.zeros((self.n_features,
self.n_features + 1),
dtype=int)
# Process trees in parallel
Parallel(n_jobs=n_jobs, verbose=verbose, require='sharedmem')(
delayed(compute_two_way_conditional_contributions)(samples,
self.conditional_contributions[i_tree],
self.conditional_contributions_sample[i_tree],
self.features_split[i_tree],
results, threading.Lock())
for i_tree, estimator in enumerate(getattr(self.model, self._model_specifics['estimators_'])))
# Store values
# Average interactions across all samples and trees. Combinations of
# features that have never been selected will get a NaN
self.two_way_contribution_table[key_name] = divide0(results['contribution_values'],
np.atleast_3d(results['contribution_values_n']),
replace_with=np.nan)
self.n_two_way_contribution_table[key_name] = results['contribution_values_n']
if verbose:
print('done')
################################################################################
# Summary
################################################################################
def summarize_importance(self, permutation_iterations=0, display=True):
"""Calculate different measures of importance for variables presented
in the model. Different variables are available for classification
and regression models.
REFERENCE: This function is based on the function measure_importance in
the R package randomForestExplainer.
SOURCE: https://github.com/MI2DataLab/randomForestExplainer/blob/master/R/measure_importance.R
:param permutation_iterations: [int > 0] The number of permutations to
compute the 'significance' of the importance value of each feature.
If 0, the permutation test is skipped.
:param display: [bool] Whether to display the results in the console.
:return importance_frame: [pandas DataFrame] Contains importance metrics
for the model.
"""
# Initialize temporary variables
node_purity = None
# Compute the minimal depth distribution, if not done already
if self.min_depth_frame is None:
self.compute_min_depth_distribution()
# Initialize importance_frame
if self._model_specifics['model_type'] == 'classifier':
accuracy_column_name = 'accuracy_decrease'
node_purity_column_name = 'gini_decrease'
else:
accuracy_column_name = 'mse_increase'
node_purity_column_name = 'node_purity_increase'
importance_frame = pd.DataFrame(columns=['variable', 'mean_min_depth',
'no_of_nodes', accuracy_column_name,
node_purity_column_name,
'no_of_trees', 'times_a_root',
'p_value'])
for i_feature, feature in enumerate(self.feature_names):
# Gather info on this feature from other tables
mean_min_depth = self.min_depth_frame[self.min_depth_frame['feature'] == feature]['minimal_depth'].mean()
no_of_nodes = self.no_of_nodes[feature].sum()
min_depth_summary_this_feature = self.min_depth_frame_summary[self.min_depth_frame_summary['feature'] == feature]
no_of_trees = min_depth_summary_this_feature['count'].sum()
if (min_depth_summary_this_feature['minimal_depth'] == 0).any():
times_a_root = min_depth_summary_this_feature[min_depth_summary_this_feature['minimal_depth'] == 0]['count'].values[0]
else:
times_a_root = 0
# Compute performance information based on the model type
if permutation_iterations > 0:
raise NotImplementedError
# if accuracy_column_name == 'accuracy_decrease':
# accuracy = 1
# elif accuracy_column_name == 'mse_increase':
# accuracy = 1
else:
accuracy = np.nan
if node_purity_column_name == 'gini_decrease':
node_purity = np.nan
elif node_purity_column_name == 'node_purity_increase':
node_purity = np.nan
# Compute p-value
p_value = binom_test(no_of_nodes, no_of_nodes, 1 / self.n_features,
alternative='greater')
# Store data
importance_frame.at[i_feature, 'variable'] = feature
importance_frame.at[i_feature, 'mean_min_depth'] = mean_min_depth
importance_frame.at[i_feature, 'no_of_nodes'] = no_of_nodes
importance_frame.at[i_feature, accuracy_column_name] = accuracy
importance_frame.at[i_feature, node_purity_column_name] = node_purity
importance_frame.at[i_feature, 'no_of_trees'] = no_of_trees
importance_frame.at[i_feature, 'times_a_root'] = times_a_root
importance_frame.at[i_feature, 'p_value'] = p_value
# Remove the accuracy column, if that metric has not been computed
if permutation_iterations == 0:
importance_frame.drop(columns=accuracy_column_name, inplace=True)
# Sort values
sort_by = accuracy_column_name if permutation_iterations > 0 else node_purity_column_name
importance_frame.sort_values(by=sort_by, ascending=False, inplace=True)
importance_frame.reset_index(drop=True, inplace=True)
# Store results
self.importance_frame = importance_frame
# Display results
if display:
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(importance_frame)
############################################################################
# Plots
############################################################################
def plot_min_depth_distribution(self, top_n_features=None,
min_trees_fraction=0.0, mark_average=True,
average_n_digits=2, sort_by_weighted_mean=False,
title='Distribution of minimal depth',
colormap='tab20', return_figure_handle=False):
"""Plots distribution of minimal depth of variables in all trees along
with mean depths for each variable. In general, the shallower (less deep)
variables are the more influential.
REFERENCE: This function has been inspired by plot_min_depth_distribution
in the R package randomForestExplainer.
SOURCE: https://github.com/MI2DataLab/randomForestExplainer/blob/master/R/min_depth_distribution.R
:param top_n_features: [int or None] The maximal number of variables with
lowest mean minimal depth to plot. If None, all features are shown.
:param min_trees_fraction: [float in range [0, 1], extrema included] The
fraction of trees in which a feature has to be used for splitting
to have the feature included in the plot.
:param mark_average: [bool] Whether to mark the average depth on the plot.
:param average_n_digits: [int] Number of digits for displaying mean
minimal depth.
:param sort_by_weighted_mean: [bool] Whether to sort features by their
proportion in each depth bin. In thi way, features that appeared more
often at a shorted depth will rank higher, despite their actual mean.
:param title: [str] The plot title.
:param colormap: [str] Name of matplotlib colormap. Default is 'tab20'.
:param return_figure_handle: [bool] Whether to return the figure handle,
which can be used for printing, for example.
:return fig: [matplotlib Figure] The displayed figure.
"""
# Cannot continue with minimal depth distributions
if self.min_depth_frame is None:
raise ValueError('Needs to compute minimal depth distributions first.\nUse method compute_min_depth_distribution() to do so.')
# Get number of trees in which a feature was used for splitting
tree_count = self.min_depth_frame_summary.groupby('feature')['count'].sum().to_frame()
tree_count['fraction'] = tree_count['count'] / self.n_trees
tree_count = tree_count[tree_count['fraction'] >= float(min_trees_fraction)]
# Get list of features to analyze
features = tree_count.index.to_list()
# Get the max depth among all trees
max_depth = max([value.max() for key, value in self.feature_depth.items()
if key in features])
# Make colormap
cmap = plt.get_cmap(colormap, max_depth)
# Compute average minimum depth and the position of each label
avg_min_depth = pd.DataFrame(columns=['feature', 'avg_depth', 'x', 'weight'])
for i_feature, feature in enumerate(features):
data = self.min_depth_frame_summary.loc[self.min_depth_frame_summary['feature'] == feature,
['minimal_depth', 'count']]
# If user did not request it, do not calculate the average
if mark_average:
this_feature_depth_values = self.min_depth_frame[
self.min_depth_frame['feature'] == feature]['minimal_depth']
avg_depth = this_feature_depth_values.mean()
sorted_depth_values = np.hstack([np.linspace(data.iloc[i]['minimal_depth'],
data.iloc[i]['minimal_depth'] + 1,
data.iloc[i]['count'])
for i in range(data.shape[0])])
mean_depth_pos = np.abs(sorted_depth_values - avg_depth).argmin()
mean_depth_pos = np.clip(mean_depth_pos, a_min=0, a_max=self.n_trees).astype(int)
else:
avg_depth = np.nan
mean_depth_pos = np.nan
# Store values
avg_min_depth.at[i_feature, 'feature'] = feature
avg_min_depth.at[i_feature, 'avg_depth'] = avg_depth
avg_min_depth.at[i_feature, 'x'] = mean_depth_pos
avg_min_depth.at[i_feature, 'weight'] = (data['count'] * data['minimal_depth'] ** 2).sum()
# Sort values
if sort_by_weighted_mean:
# Features used closer to the root more often will rank higher
sort_by = 'weight'
else:
sort_by = 'avg_depth'
# Apply sorting
avg_min_depth.sort_values(sort_by, ascending=True, inplace=True)
avg_min_depth.reset_index(drop=True, inplace=True)
# Re-extract (sorted) list of features
features = avg_min_depth['feature'].tolist()
# Keep only top features
if top_n_features is not None:
features = features[:top_n_features]
# Generate a color for each depth level
depth_values = np.arange(max_depth + 1)
# Get location and width of each bar
n_features = len(features)
feature_y_width = 1 / n_features * .9
feature_y_pos = np.linspace(0, 1, n_features)
feature_y_gap = feature_y_pos[1] - feature_y_width
# Open figure
fig = plt.figure(figsize=(7, 8))
fig.clf()
ax = fig.add_subplot(1, 1, 1)
# Mark the maximum number of trees used
max_n_trees = 0
# Plot horizontally stacked bars
for i_feature, feature in enumerate(features):
# Get data and prepare x- and y-ranges
data = self.min_depth_frame_summary.loc[self.min_depth_frame_summary['feature'] == feature,
['minimal_depth', 'count']]
# Add missing depths
missing_depths = np.setdiff1d(depth_values, data['minimal_depth'])
data = pd.concat((data, pd.DataFrame(np.vstack((missing_depths, np.zeros(missing_depths.shape[0]))).T.astype(int),
columns=data.columns)), ignore_index=True,
sort=True)
data.sort_values(by='minimal_depth', ascending=True, inplace=True)
# Get count
count = data.sort_values(by='minimal_depth')['count'].values
count = np.vstack((np.cumsum(count) - count, count)).T
max_n_trees = max(max_n_trees, count.max())
# Plot horizontal bars
yrange = (feature_y_pos[i_feature], feature_y_width)
ax.broken_barh(xranges=count.tolist(), yrange=yrange, facecolors=cmap.colors,
alpha=.8)
# Mark average depth
if mark_average is not None:
# Add vertical bar for the mean
ax.plot([avg_min_depth.loc[i_feature, 'x']] * 2, [yrange[0], yrange[0] + yrange[1]],
color='k', lw=5, solid_capstyle='butt')
# Add text box showing the value of the mean
ax.text(avg_min_depth.loc[i_feature, 'x'], yrange[0] + yrange[1] / 2,
'%%.%if' % average_n_digits % avg_min_depth.loc[i_feature, 'avg_depth'],
ha='center', va='center', bbox=dict(boxstyle='round', facecolor='w'))
# Adjust axes appearance
ax.set_yticks(feature_y_pos + feature_y_width / 2)
ax.set_yticklabels(features)
ax.set_ylim(1 + feature_y_width + feature_y_gap, 0)
ax.set_xlim(0, max_n_trees)
adjust_spines(ax, spines=['bottom', 'left'], offset=0, smart_bounds=True)
ax.spines['left'].set_color('None')
ax.tick_params(axis='y', length=0, pad=0)
ax.set_xlabel('Number of trees (out of %i)' % self.n_trees)
if top_n_features is not None:
title += ' (top %i features)' % top_n_features
ax.set_title(title)
# Adjust layout
fig.tight_layout()
# Add lines at beginning and end of plotting area
ax.axvline(0, color='k', lw=.5)
ax.axvline(self.n_trees, color='k', lw=.5)
# Add colorbar
cmap_cbar = LinearSegmentedColormap.from_list('cmap', cmap.colors, cmap.N + 1)
ax_bg = fig.add_axes(ax.get_position())
im_cbar = ax_bg.imshow(np.tile(depth_values, (2, 1)),
cmap=cmap_cbar, aspect='auto', interpolation=None,
vmin=depth_values.min(), vmax=depth_values.max() + 1)
cbar = fig.colorbar(im_cbar)
# The axes ax_bg has now been squeezed to the left by the colorbar. Copy
# that position back to ax, and hide the axes ax_bg
ax.set_position(ax_bg.get_position())
ax_bg.set_visible(False)
# Set labels of colorbar
cbar.ax.tick_params(axis='both', length=0)
cbar.set_ticks(depth_values + .5)
cbar.set_ticklabels(depth_values)
# Invert colorbar direction so 'deeper' is below 'shallower'
cbar.ax.set_ylim(cbar.ax.get_ylim()[::-1])
# Make colorbar shorter so a title can be written
bbox = cbar.ax.get_position()
cbar.ax.set_position([bbox.x0 + .025, bbox.y0, bbox.x1 - bbox.x0, .6])
cbar.ax.set_title('Minimal\ndepth')
if return_figure_handle:
return fig
def plot_two_way_interactions(self, sort_features_on_target=True,
targets_to_plot=None, sort_on_contributions=True,
top_n_features=None, return_fig_handle=False):
# Cannot continue without 2way tables
if self.two_way_contribution_table is None:
raise ValueError('Needs to compute 2-way interactions between features first.\nUse method compute_two_way_interactions() to do so.')
# If sort_features_on_target is True, and which target is unspecified,
# use the first target that will be plotted
if isinstance(sort_features_on_target, bool):
if sort_features_on_target:
sort_features_on_target = targets_to_plot[0]
# Set number of subplot columns in figure
if targets_to_plot is not None:
targets_to_plot = true_list(targets_to_plot)
else:
if isinstance(sort_features_on_target, str):
targets_to_plot = list([sort_features_on_target])
elif self._model_specifics['model_type'] == 'classifier' and self.n_target_levels == 2:
# If this is a binary classification task, the contribution to each
# target will be identical, so we'll plot only one
targets_to_plot = list([self.target_levels[-1]]) # Assume that more interesting label is the highest
else:
targets_to_plot = list(self.target_levels)
# Get indices of targets to plot
targets_to_plot_idx = [self.target_levels.index(i) for i in targets_to_plot]
n_columns = len(targets_to_plot_idx)
# Add another plot for the heatmap with percentage of values
n_columns += 1
# A column for each heatmap
tables = list(self.two_way_contribution_table.keys())
n_figures = len(tables)
figures = list()
for i_fig in range(n_figures):
# If sort_features_on_target is a string, it is the name of the target
# to use. In that case, get a sorting order for the columns
if isinstance(sort_features_on_target, str):
# Select corresponding data
index_of_target = self.target_levels.index(sort_features_on_target)
if sort_on_contributions:
data = self.two_way_contribution_table[tables[i_fig]][:, :, index_of_target]
else:
data = self.n_two_way_contribution_table[tables[i_fig]]
# Convert to DataFrame
df = pd.DataFrame(data, index=self.feature_names,
columns=self.feature_names + ['root'])
# Sort by contribution when feature is used at the root
df['root'].replace(to_replace=np.nan, value=0, inplace=True)
df.sort_values(by='root', ascending=False, inplace=True,
na_position='last')
# Get order of features
features_order = list(df.index)
else:
features_order = list(self.feature_names)
# Keep only at most n features
if top_n_features is not None:
features_order = features_order[:top_n_features]
# Open figure and plot heatmaps
fig, ax = plt.subplots(nrows=1, ncols=n_columns, figsize=(13, 5))
figures.append(fig)
for i_col in range(n_columns):
# Assess whether to add an annotation to the heatmap
heatmap_annotations = len(features_order) < 15
# Select data
if i_col < n_columns - 1: # Contribution values go in the first n-1 plots
data = self.two_way_contribution_table[tables[i_fig]][:, :, targets_to_plot_idx[i_col]]
subplot_title = 'Predictions for \'%s\'' % self.target_levels[targets_to_plot_idx[i_col]]
else: # Percentages go in the last plot
data = self.n_two_way_contribution_table[tables[i_fig]]
# Normalize across number of trees and samples to
# obtain a percentage
data = data / self.n_trees / self.n_samples * 100
subplot_title = 'Combination frequencies'
# Remove 0s
data[data == 0] = np.nan
# Convert data to DataFrame, so it can be passed to seaborn
df = pd.DataFrame(data, index=self.feature_names,
columns=self.feature_names + ['root'])
# Move root to the front
df = pd.concat((df['root'], df[self.feature_names]), axis=1)
# Sort features
if features_order is not None:
# Select and sort columns
df = | pd.concat((df['root'], df[features_order]), axis=1) | pandas.concat |
#-*- coding:utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics import silhouette_samples
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import *
from sklearn.cluster import *
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from operator import itemgetter
from operator import attrgetter
from pyjarowinkler import distance
from collections import Counter
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import nltk
import math
import time
import csv
import sys
import re
import io
import os
def interest(co_author_path, reviewer_information_path, co_author_network_path, professionalism_result, extractive_keyword_result, reviewee_index,matrix_multifly_count):
path1,path2=co_author_path,reviewer_information_path
network_path=co_author_network_path
temp,reviewee = professionalism_result,extractive_keyword_result
index,multifly=reviewee_index,matrix_multifly_count
co_author_csv = pd.read_csv(path1, encoding='latin1')
co_author_df = co_author_csv.merge(temp, on=['reviewer_orcid'])
co_author_df2 = co_author_df.iloc[:]['reviewer_name'].tolist()
try :
network_csv = pd.read_csv(network_path, encoding='latin1',index_col=0)
except FileNotFoundError :
df1=pd.read_csv('./network/network0704.csv')
df2=pd.read_csv('./network/network0704_2.csv')
tmp=[1]
tmp.extend(i*2 for i in range(1,11))
for k in range(1,len(df1.columns)):
a=df1.columns[k]
a1=df2.loc[df2['reviewer_coauthor_title']==a]
a_list=[]
for i in range(len(tmp)):
a_list.append(a1.iloc[0][tmp[i]])
for i in range(len(df1)):
if (df1.iloc[i,0] in a_list) is True :df1.iloc[i,k]=1
else :df1.iloc[i,k]=0
mat=(df1.values)[:,1:]
mat2=mat.T
mat3=np.dot(mat,mat2)
for i in range(len(mat3)):
mat3[i,i]=0
fof=np.dot(mat3,mat3)
df1_index=(df1.iloc[:,0]).tolist()
df1_col=(df1.columns)[1:]
network_csv= | pd.DataFrame(data=mat3,index=df1_index,columns=df1_index) | pandas.DataFrame |
import pandas as pd
import os
import re
from pathlib import Path
from utilities.generic.file_finder import FileFinder
def list_diff(li1, li2):
"""Returns the subset of lists that are present in li1 but absent in li2.
Args:
li1: The first list (a superset of li2).
li2: The second list (a subset of li1)
Returns:
list: a list of the elements present in li1, but not li2."""
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
### NOTE THIS IS SPECIFIC TO EML STUDY CURRENTLY.
class DataTableGenerator(object):
def __init__(self, running_fpath, file_finder, root_dir="", export_fpath=""):
self._this_fpath = running_fpath
self._file_finder = file_finder
self.export_fpath = export_fpath
if not root_dir:
self.ROOT_DIR = Path(f"{self._this_fpath}/data/unzipped/")
if not self.export_fpath:
self.export_fpath = Path(f"{self.ROOT_DIR}/data_summaries/eml_summary_nirs.csv")
if not os.path.exists(Path(f"{self.ROOT_DIR}/data_summaries")):
os.makedirs(Path(f"{self.ROOT_DIR}/data_summaries"))
def gen_data_table(self):
# Get all the file paths needed in order to gather information about all of the sessions.
self.nirs_fnames = self._file_finder.find_files_of_type(self.ROOT_DIR, ".nirs")
self.trigger_fnames = self._file_finder.find_files_of_type(self.ROOT_DIR, ".tri")
self.trial_sheet_fnames = self._file_finder.find_files_of_type(self.ROOT_DIR, "Trials.txt")
self.nirs_dir_fpaths = self._file_finder.find_files_of_type(self.ROOT_DIR, ".nirs", return_parent=True)
# Start getting info about the data.
self.valid_triggers_dict = self.validate_triggers(self.trigger_fnames)
self.localizer_order_dict = self.get_localizer_order(self.trial_sheet_fnames)
self.reading_order_dict = self.get_reading_order(self.trial_sheet_fnames)
# Generate dataframe to collate all of that information and write it to file.
df = pd.DataFrame([self.nirs_fnames, self.trigger_fnames, self.nirs_dir_fpaths,
self.trial_sheet_fnames, self.valid_triggers_dict,
self.localizer_order_dict, self.reading_order_dict]).transpose()
df.index.name = 'participant'
df.columns = ["NIRS fPath", "Trigger fPath", "nirs_dir",
"Trial Sheet fPath", "Trigger Notes",
"Localizer Order", "Reading Order"]
df.to_csv(self.export_fpath)
return self.export_fpath
def validate_triggers(self, trigger_fnames):
"""Reads the .lsl trigger file from each participant and makes a few judgements
about the state of the triggers. These judgements are then written to the data state table later on.
Args:
trigger_fnames (dict): key is ID, val is filepath to lsl.tri file for that id.
Returns:
triggers_valid (dict): key is ID, val is string describing the state of the triggers.
"""
localizer_triggers = [25, 27, 24, 22, 23, 26]
triggers_valid = {}
for k, val in trigger_fnames.items():
df = pd.DataFrame()
for v in val:
df = df.append( | pd.read_csv(v, sep=";", names=["t", "sample", "val"]) | pandas.read_csv |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo = algos.SelectAll(include_no_data=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('bt.ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].ix[dts[1]] = 105
data['c1'].ix[dts[2]] = 95
data['c1'].ix[dts[3]] = 105
data['c1'].ix[dts[4]] = 95
# low vol c2
data['c2'].ix[dts[1]] = 100.1
data['c2'].ix[dts[2]] = 99.9
data['c2'].ix[dts[3]] = 100.1
data['c2'].ix[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('bt.ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import os
from time import sleep
import pathlib
import dask
import dask.dataframe as dd
from dask.utils import tmpfile, tmpdir, dependency_depth
from dask.dataframe.utils import assert_eq
def test_to_hdf():
pytest.importorskip("tables")
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]
)
a = dd.from_pandas(df, 2)
with tmpfile("h5") as fn:
a.to_hdf(fn, "/data")
out = pd.read_hdf(fn, "/data")
tm.assert_frame_equal(df, out[:])
with tmpfile("h5") as fn:
a.x.to_hdf(fn, "/data")
out = pd.read_hdf(fn, "/data")
tm.assert_series_equal(df.x, out[:])
a = dd.from_pandas(df, 1)
with tmpfile("h5") as fn:
a.to_hdf(fn, "/data")
out = pd.read_hdf(fn, "/data")
| tm.assert_frame_equal(df, out[:]) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
import sys
import pandas as pd
import numpy as np
import json
from datetime import datetime
from hashlib import md5
import os.path as path
import argparse
import os.path as path
import pysolr
from uuid import uuid1
DEBUG = True
filename = 'output/PATH_005'
filename = 'output/PATH_147'
filename = 'output/PATH_016'
filename = 'output/PATH_024'
filename = 'output/PATH_008'
filename = 'output/PATH_090'
filename = 'output/AA_132'
filename = 'output/PATH_004'
filename = 'output/AA_003'
filename = 'output/HD_001'
filename = 'output/TR_002'
filename = 'output/PATH_004'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process trains services \
file to json')
parser.add_argument('inputfile', type=str, help='name of working \
timetable file to parse')
args = parser.parse_args()
filename = args.inputfile
DEBUG = False
filestub = path.basename(filename)
if DEBUG:
print(filename)
pd.set_option('display.max_columns', None)
ISO8601_DATE = datetime(1900, 1, 1)
DAY = pd.offsets.Day()
MONDAY = pd.offsets.Week(weekday=0)
def header_date(this_column):
return pd.to_datetime(this_column, format='%d%m%y').dt.strftime('%Y-%m-%d')
def wtt_date(this_column):
return pd.to_datetime(this_column, format='%y%m%d').dt.strftime('%Y-%m-%d')
def wtt_datetime(this_column):
return this_column.dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def wtt_time(this_column, format='%H%M%S'):
this_column = this_column.str.replace('H', '30').str.replace(' ', '00')
return pd.to_datetime(this_column, format=format)
def blank_columns(this_frame):
return [n for n in this_frame.select_dtypes(include=['object']).columns if this_frame[n].str.isspace().all() or (this_frame[n] == '').all()]
def strip_columns(this_frame):
return [n for n in this_frame.select_dtypes(include=['object']).columns if this_frame[n].str.isspace().any()]
def days_str(this_series):
return pd.to_datetime(this_series).apply(lambda v: '{:b}'.format(64 >> v.weekday()).zfill(7))
def get_dates(this_df):
no_idx = this_df['Date To'].str.isspace()
this_df.loc[no_idx, 'Days'] = days_str(this_df.loc[no_idx, 'Date From'])
this_df.loc[no_idx, 'Date To'] = this_df.loc[no_idx, 'Date From']
this_df['Date From'] = pd.to_datetime(this_df['Date From'], format='%y%m%d')
this_df['Date To'] = pd.to_datetime(this_df['Date To'], format='%y%m%d')
this_df['Dates'] = wtt_date(this_df['Date From'] - MONDAY) + '.' + wtt_date(this_df['Date To'] + MONDAY) + '.' + this_df['Days']
this_df['Date From'] = wtt_datetime(this_df['Date From'])
this_df['Date To'] = wtt_datetime(this_df['Date To'])
return this_df[['Date From', 'Date To', 'Dates', 'Days']]
def header_record(records):
"""process CIF file header record from 80-character line string"""
this_array = [[line[0:2], line[2:22], line[22:28], line[28:32], line[32:39], line[39:46], line[46:47], line[47:48], line[48:54], line[54:60]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'File Mainframe Identity', 'Date of Extract', 'Time of Extract', 'Current File Ref', 'Last File Ref', 'Bleed off Update Ind', 'Version', 'User Extract Start Date', 'User Extract End Date'])
this_frame['Extract Datetime'] = | pd.to_datetime(this_frame['Time of Extract'] + this_frame['Date of Extract'], format='%H%M%d%m%y') | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*
import os
import csv
import pandas as pd
import numpy as np
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
from sklearn.preprocessing import scale
from scipy.stats import ks_2samp
from .utils import CSV_READ_FORMAT, CSV_WRITE_FORMAT
from .utils import Accessor, Stats
from .logger import log
def compute_aggregates(row):
metadataA = Accessor.get_entity_aggregate(row.entityA)
metadataB = Accessor.get_entity_aggregate(row.entityB)
mean = abs(metadataA['mean'] - metadataB['mean'])
std = abs(metadataA['std'] - metadataB['std'])
var = abs(metadataA['var'] - metadataB['var'])
frequency = abs(metadataA['frequency'] - metadataB['frequency'])
result = pd.Series({'mean': mean, 'std': std, 'var': var, 'frequency': frequency})
return result
def compute_hellinger_distance(row):
hd = Accessor.hellinger_distance_2entity(row.entityA, row.entityB)
result = pd.Series({'hellinger_distance': hd})
return result
def compute_ks_test(row):
ks, pvalue = Accessor.ks_test_2entity(row.entityA, row.entityB)
result = | pd.Series({'ks_test': ks, 'pvalue': pvalue}) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from IPython.display import HTML
from matplotlib import animation
from sklearn.preprocessing import MinMaxScaler
sns.set()
# In[2]:
class Model:
def __init__(self, learning_rate, num_layers, size, size_layer, output_size, forget_bias=0.1):
def lstm_cell(size_layer):
return tf.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple=False)
rnn_cells = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell(size_layer) for _ in range(num_layers)], state_is_tuple=False
)
self.X = tf.placeholder(tf.float32, (None, None, size))
self.Y = tf.placeholder(tf.float32, (None, output_size))
drop = tf.contrib.rnn.DropoutWrapper(rnn_cells, output_keep_prob=forget_bias)
self.hidden_layer = tf.placeholder(tf.float32, (None, num_layers * 2 * size_layer))
self.outputs, self.last_state = tf.nn.dynamic_rnn(
drop, self.X, initial_state=self.hidden_layer, dtype=tf.float32
)
rnn_W = tf.Variable(tf.random_normal((size_layer, output_size)))
rnn_B = tf.Variable(tf.random_normal([output_size]))
self.logits = tf.matmul(self.outputs[-1], rnn_W) + rnn_B
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
# In[3]:
df = pd.read_csv("GOOG-year.csv")
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
df.head()
# In[4]:
minmax = MinMaxScaler().fit(df.iloc[:, 4:5].astype("float32"))
df_log = minmax.transform(df.iloc[:, 4:5].astype("float32"))
df_log = | pd.DataFrame(df_log) | pandas.DataFrame |
"""
Randen: Random DataFrame Generator
A small utility to generate random dataframes for testing, benchmarking etc. purposes
Supported dataframe types:
- string
- datetime
- char
- int
- float
- bool
- mix(provide column types)
TODO:
- Add Null ratio support
- Add Documentation build(readdocs? )
"""
__author__ = "<NAME>"
__date__ = "05-10-2020"
__appname__ = "randen"
import time
import random
import secrets
import logging
from typing import List
from functools import partial
from datetime import datetime
from random import random, choice
from collections import Counter
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits
import numpy as np
import pandas as pd
logging_format = "%(asctime)s [%(levelname)s] %(name)s : %(message)s in %(pathname)s:%(lineno)d"
logging.basicConfig(level=logging.DEBUG, format=logging_format)
logger = logging.getLogger(__appname__)
class DataFrameGenerator:
"""Dataframe Generator class
Usage:
-----
dfg = DataFrameGenerator()
df = get_dataframe(nrows=10, ctypes=[str, int, float])
...
This will return the dataframe with following traits
> dfg.shape == (10, 3)
> dfg.columns == ["Str0", "Int0", "Float0"]
> dfg.dtypes == Str object
Int int32
Float float64
dtype: object
Supported APIs:
---------------
dfg.get_dataframe(...)
dfg.get_integer_dataframe(...)
dfg.get_float_dataframe(...)
dfg.get_string_dataframe(...)
dfg.get_char_dataframe(...)
dfg.get_dates_dataframe(...)
dfg.get_bool_dataframe(...)
"""
def __init__(self):
self._numrows = None
def _generate_ints(self, _min: int = -100000, _max: int = 100000) -> np.ndarray:
return np.random.randint(low=_min, high=_max, size=self._numrows)
def _generate_floats(self, _min: float = -1.0, _max: float = 1.0) -> np.ndarray:
return ((_max - _min) * np.random.random(size=self._numrows)) + _min
def _generate_strings(self, _min: int = 10, _max: int = 20) -> List[str]:
keys = set()
pickchar = partial(secrets.choice, ascii_letters + digits)
while len(keys) < self._numrows:
keys |= {''.join([pickchar()
for _ in range(np.random.randint(low=_min, high=_max))])
for _ in range(self._numrows - len(keys))}
return list(keys)
def _generate_dates(self, start: datetime = None, end: datetime = None) -> pd.DatetimeIndex:
if not start:
start = datetime.fromtimestamp(0)
if not end:
end = datetime.now()
return pd.date_range(start=start, end=end, periods=self._numrows)
def _generate_bools(self) -> List[bool]:
return [not random() >= 0.5 for _ in range(self._numrows)]
def _generate_chars(self, lowercase: bool = True) -> List[str]:
if lowercase:
return [choice(ascii_lowercase) for _ in range(self._numrows)]
else:
return [choice(ascii_uppercase) for _ in range(self._numrows)]
def _get_column_names(self, ctypes: List[type], columns: List[str] = None) -> List[str]:
"""
TODO: Change column names to Str0, Str1, Integer0, Integer1, ... format
TODO: Optimize the column name generation?
Args:
ctypes (List[type]): Column types of the dataframe
columns (List[str], optional): Column names of the dataframe. Defaults to None.
Returns:
List[str]: columns; Names of dataframe columns
"""
if columns is not None and len(ctypes) == len(columns):
return columns
columns = []
_ctypes_count = Counter(ctypes)
for i, _ctype in enumerate(ctypes):
_column_name = f"{_ctype.__name__.capitalize()}{i}"
columns.append(_column_name)
return columns
def get_dataframe(self, nrows: int, ctypes: List[type], columns: List[str] = None) -> pd.DataFrame:
"""Generate random data frame of shape 'nrows x len(ctypes)'
Args:
nrows (int): Number of rows
ctypes (List[type]): Column types of the dataframe
columns (List[str], optional): Column names of the dataframe. Defaults to None.
Raises:
ValueError: If requested Column datatype is unsupported
Returns:
pd.DataFrame:
"""
assert ctypes is not None, "provide columns' data types"
if columns is not None:
assert len(columns) == len(ctypes), "provide all or No Columns names"
columns = self._get_column_names(ctypes, columns)
self._numrows = nrows
out_df = pd.DataFrame()
for i, _ctype in enumerate(ctypes):
_col_name = columns[i]
if _ctype == int:
_data_list = self._generate_ints()
elif _ctype == float:
_data_list = self._generate_floats()
elif _ctype == bool:
_data_list = self._generate_bools()
elif _ctype == str:
_data_list = self._generate_strings()
elif _ctype == bytes:
_data_list = self._generate_chars()
elif _ctype == datetime:
_data_list = self._generate_dates()
else:
logger.error(f"Unsuported datatype {_ctype} requested")
raise ValueError(f"Unsupported datatype {_ctype} requested")
out_df[_col_name] = _data_list
return out_df
def get_integer_dataframe(self, nrows: int, ncols: int, nullratio=0, columns: List[str] = None,
minval: int = -100000, maxval: int = 100000) -> pd.DataFrame:
"""Generate a dataframe of ONLY integer datatype values
Args:
nrows (int): Number of rows
ncols (int): Number of columns of type int
nullratio (int, optional): [description]. Defaults to 0.
columns (List[str], optional): Column names of the dataframe. Defaults to None.
minval (int, optional): Minimum value of the integers. Defaults to -100000.
maxval (int, optional): Maximum value of the integers. Defaults to 100000.
Returns:
pd.DataFrame:
"""
if columns is not None:
assert len(columns) == ncols, "provide all or No Columns names"
logger.info(f"Generating {nrows}x{ncols} all integer dataframe")
self._numrows = nrows
out_df = pd.DataFrame()
for i in range(ncols):
_col_name = columns[i] if columns is not None else f"Integer{i}"
out_df[_col_name] = self._generate_ints(_min=minval, _max=maxval)
return out_df
def get_float_dataframe(self, nrows: int, ncols: int, nullratio: float = 0, columns: List[str] = None,
minval: float = -1.0, maxval: float = 1.0) -> pd.DataFrame:
"""Generate a dataframe of ONLY floating point datatype values
Args:
nrows (int): Number of rows
ncols (int): Number of columns of type float
nullratio (float, optional): [description]. Defaults to 0.
columns (List[str], optional): Column names of the dataframe. Defaults to None.
minval (float, optional): Minimum value of the floats. Defaults to -1.0.
maxval (float, optional): Maximum value of the floats. Defaults to 1.0.
Returns:
pd.DataFrame:
"""
if columns is not None:
assert len(columns) == ncols, "Provide all or No Columns names"
logger.info(f"Generating {nrows}x{ncols} all float dataframe")
self._numrows = nrows
out_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from bs4 import BeautifulSoup as s
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
import time
import random
r = requests.get('https://www.goodreads.com/search?utf8=%E2%9C%93&q=harry+potter&search_type=books&search%5Bfield%5D=title')
soup = s(r.content, 'html.parser')
links = []
title = []
for i in soup.findAll('a',{'class':'bookTitle'})[:8]:
links.append('https://goodreads.com'+i.get('href'))
title.append(i.get_text().strip())
def scrapper(w,d,ls):
fi = soup.findAll(w,d)
if fi:
for i in fi:
ls.append(i.get_text())
else:
ls.append('')
r = []
rdesc = []
r2 = []
like_count = []
title = []
b = webdriver.Chrome(ChromeDriverManager().install())
for l in links:
rs2 = requests.get(l)
b.get(l)
seconds = 5 + (random.random() * 5)
b.implicitly_wait(30)
for i in range(3):
h = b.page_source
soup = s(h, 'html.parser')
for t in range(30):
title.append(soup.find('h1',{'id':'bookTitle'}).get_text())
scrapper('div',{'class':'reviewHeader'},r)
scrapper('div',{'class':'review'},r2)
scrapper('div',{'class':'reviewText'},rdesc)
for r in soup.select('div.review span.likesCount'):
like_count.append(r.get_text())
e = b.find_element(By.CLASS_NAME,'next_page')
b.execute_script('arguments[0].click()',e)
time.sleep(seconds)
rde2 = rdesc.copy()
rde2 = [i.strip().replace('\xa0','').replace('...more','') for i in rde2]
rc = r.copy()
rc = [i.strip().replace(' ','').split('\n') for i in rc]
rdate = []
rname = []
rrating = []
recc = []
shelves = []
rev = []
likes = []
comm = []
for i in rc:
rname.append(i[2])
rdate.append(i[0])
if i[5]=='ratedit':
rrating.append(i[6])
else:
rrating.append('')
title = [i.strip() for i in title]
dt = pd.DataFrame({'book':title,'name':rname,'date':rdate,'rating':rrating,'likes':like_count,'description':rde2})
def stars(t):
d = {'itwasamazing':5,'reallylikedit':4,'likedit':3,'itwasok':2,'didnotlikeit':1, '':''}
return d[t]
dt2 = dt.rating
dt['stars_given'] = dt2.apply(lambda x: stars(x))
dt.to_csv('lightning.csv')
| pd.read_csv('lightning.csv') | pandas.read_csv |
"""
This function calculates statistical values and performs multivariate
statistics. The values from the **results.csv** file are used for this.
A PCA and an LDA are performed. Corresponding plots are created for this.
:info: In the calculate function, parameters of the measurements can be deleted
from the evaluation. (For example, if the same substance is used for
all measurements. This property could be removed from the calculation)
:copyright: (c) 2022 by <NAME>, Hochschule-Bonn-Rhein-Sieg
:license: see LICENSE for more details.
"""
from os import name
from tkinter import font
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import numpy as np
import plotly.express as px
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from random import randint
from sklearn.model_selection import train_test_split
from sklearn.model_selection import LeaveOneOut
from sklearn import metrics
from roc import get_roc
import matplotlib
def create_droplist(keywords, cols):
"""
This function creates a list with all feauters in which the given keywords occur.
Args:
keywords (list): list with keywords to drop
cols (list): list with feauters
"""
drops = []
for key in keywords:
for col in cols:
if key in col:
drops.append(col)
return drops
def save_df(df, path, name):
"""
This function saves a DataFrame to csv in the results folder.
Param:
df (pandas.DataFrame): DataFrame to save
path (string): path to root directory of data
name (string): Name under which the file is to be saved
"""
Path(path).mkdir(parents=True, exist_ok=True)
path = path + '\\' + name + '.csv'
df.to_csv(path, sep=';', decimal='.', index=True)
def save_html(html_object, path, name):
"""
This Function saves a plottly figure as html to plots//statistics.
Param:
html_object (object): plottly html object to save
path (string): path to root directory of data
name (string): Name to save figure
"""
path = path + '\\plots\\statistics'
Path(path).mkdir(parents=True, exist_ok=True)
path = path + '\\' + name + '.html'
print(path)
html_object.write_html(path)
def save_jpeg(jpeg_object, path, name):
"""
This Function saves a figure as jpeg to plots//statistics.
Param:
html_object (object): plottly figure to save
path (string): path to root directory of data
name (string): Name to save figure
"""
path = path + '\\plots\\statistics'
Path(path).mkdir(parents=True, exist_ok=True)
path = path + '\\' + name + '.jpeg'
jpeg_object.savefig(path)
def get_statistics(df, path):
"""
This function calculates statistical characteristics of the data.
Param:
df (pandas.DataFrame): DataFrame with data form result.csv
path (string): root path to data
"""
print('processing statistics...')
samples = df.index.unique().tolist()
statistics_list = {}
df_mean = | pd.DataFrame() | pandas.DataFrame |
"""
Module contains tools for collecting data from various remote sources
"""
import warnings
import tempfile
import datetime as dt
import time
from collections import defaultdict
import numpy as np
from pandas.compat import(
StringIO, bytes_to_str, range, lmap, zip
)
import pandas.compat as compat
from pandas import Panel, DataFrame, Series, read_csv, concat, to_datetime, DatetimeIndex, DateOffset
from pandas.core.common import is_list_like, PandasError
from pandas.io.common import urlopen, ZipFile, urlencode
from pandas.tseries.offsets import MonthEnd
from pandas.util.testing import _network_error_classes
from pandas.io.html import read_html
warnings.warn("\n"
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
class SymbolWarning(UserWarning):
pass
class RemoteDataError(PandasError, IOError):
pass
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0.001):
"""
Imports data from a number of online sources.
Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED)
and Kenneth French's data library.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (yahoo, google, fred) will
accept a list of names.
data_source: str, default: None
the data source ("yahoo", "google", "fred", or "ff")
start : datetime, default: None
left boundary for range (defaults to 1/1/2010)
end : datetime, default: None
right boundary for range (defaults to today)
retry_count : int, default 3
Number of times to retry query request.
pause : numeric, default 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
Examples
----------
# Data from Yahoo! Finance
gs = DataReader("GS", "yahoo")
# Data from Google Finance
aapl = DataReader("AAPL", "google")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
"""
start, end = _sanitize_dates(start, end)
if data_source == "yahoo":
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "google":
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "fred":
return get_data_fred(name, start, end)
elif data_source == "famafrench":
return get_data_famafrench(name)
def _sanitize_dates(start, end):
from pandas.core.datetools import to_datetime
start = to_datetime(start)
end = to_datetime(end)
if start is None:
start = dt.datetime(2010, 1, 1)
if end is None:
end = dt.datetime.today()
return start, end
def _in_chunks(seq, size):
"""
Return sequence in 'chunks' of size defined by size
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
Returns a DataFrame
"""
if isinstance(symbols, compat.string_types):
sym_list = symbols
else:
sym_list = '+'.join(symbols)
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
request = ''.join(compat.itervalues(_yahoo_codes)) # code request string
header = list(_yahoo_codes.keys())
data = defaultdict(list)
url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
v = float(field.strip('"%'))
elif field[0] == '"':
v = field.strip('"')
else:
try:
v = float(field)
except ValueError:
v = field
data[header[i]].append(v)
idx = data.pop('symbol')
return DataFrame(data, index=idx)
def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _retry_read_url(url, retry_count, pause, name):
for _ in range(retry_count):
time.sleep(pause)
# kludge to close the socket ASAP
try:
with urlopen(url) as resp:
lines = resp.read()
except _network_error_classes:
pass
else:
rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
parse_dates=True, na_values='-')[::-1]
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
#Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode('unicode_escape').encode('ascii', 'ignore')
except AttributeError:
#Python 3 string has no decode method.
rs.index.name = rs.index.name.encode('ascii', 'ignore').decode()
return rs
raise IOError("after %d tries, %s did not "
"return a 200 for url %r" % (retry_count, name, url))
_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
def _get_hist_yahoo(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from yahoo.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
'&d=%s' % (end.month - 1) +
'&e=%s' % end.day +
'&f=%s' % end.year +
'&g=%s' % interval +
'&ignore=.csv')
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
def _get_hist_google(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from google.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
urlencode({"q": sym,
"startdate": start.strftime('%b %d, ' '%Y'),
"enddate": end.strftime('%b %d, %Y'),
"output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
def _adjust_prices(hist_data, price_list=None):
"""
Return modifed DataFrame or Panel with adjusted prices based on
'Adj Close' price. Adds 'Adj_Ratio' column.
"""
if price_list is None:
price_list = 'Open', 'High', 'Low', 'Close'
adj_ratio = hist_data['Adj Close'] / hist_data['Close']
data = hist_data.copy()
for item in price_list:
data[item] = hist_data[item] * adj_ratio
data['Adj_Ratio'] = adj_ratio
del data['Adj Close']
return data
def _calc_return_index(price_df):
"""
Return a returns index from a input price df or series. Initial value
(typically NaN) is set to 1.
"""
df = price_df.pct_change().add(1).cumprod()
mask = df.ix[1].notnull() & df.ix[0].isnull()
df.ix[0][mask] = 1
# Check for first stock listings after starting date of index in ret_index
# If True, find first_valid_index and set previous entry to 1.
if (~mask).any():
for sym in mask.index[~mask]:
tstamp = df[sym].first_valid_index()
t_idx = df.index.get_loc(tstamp) - 1
df[sym].ix[t_idx] = 1
return df
_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
"""
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
# LOOP across component index structure,
# break when no new components are found
while True in mask:
url_str = url.format(idx_mod, stats, comp_idx)
with urlopen(url_str) as resp:
raw = resp.read()
lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange'])
temp_df = temp_df.drop_duplicates()
temp_df = temp_df.set_index('ticker')
mask = ~temp_df.index.isin(idx_df.index)
comp_idx = comp_idx + 50
idx_df = idx_df.append(temp_df[mask])
return idx_df
def _dl_mult_symbols(symbols, start, end, interval, chunksize, retry_count, pause,
method):
stocks = {}
failed = []
passed = []
for sym_group in _in_chunks(symbols, chunksize):
for sym in sym_group:
try:
stocks[sym] = method(sym, start, end, interval, retry_count, pause)
passed.append(sym)
except IOError:
warnings.warn('Failed to read symbol: {0!r}, replacing with '
'NaN.'.format(sym), SymbolWarning)
failed.append(sym)
if len(passed) == 0:
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
try:
if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0:
df_na = stocks[passed[0]].copy()
df_na[:] = np.nan
for sym in failed:
stocks[sym] = df_na
return Panel(stocks).swapaxes('items', 'minor')
except AttributeError:
# cannot construct a panel with just 1D nans indicating no data
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
_source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo}
def _get_data_from(symbols, start, end, interval, retry_count, pause, adjust_price,
ret_index, chunksize, source):
src_fn = _source_functions[source]
# If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (compat.string_types, int)):
hist_data = src_fn(symbols, start, end, interval, retry_count, pause)
# Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
hist_data = _dl_mult_symbols(symbols.index, start, end, interval, chunksize,
retry_count, pause, src_fn)
else:
hist_data = _dl_mult_symbols(symbols, start, end, interval, chunksize,
retry_count, pause, src_fn)
if source.lower() == 'yahoo':
if ret_index:
hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close'])
if adjust_price:
hist_data = _adjust_prices(hist_data)
return hist_data
def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25, interval='d'):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Yahoo! Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame, default: None
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default: 3
Number of times to retry query request.
pause : numeric, default: 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
adjust_price : bool, default: False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default: False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default: 25
Number of symbols to download consecutively before intiating pause.
interval : string, default: 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly and 'v' for dividend.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
if interval not in ['d', 'w', 'm', 'v']:
raise ValueError("Invalid interval: valid values are 'd', 'w', 'm' and 'v'")
return _get_data_from(symbols, start, end, interval, retry_count, pause,
adjust_price, ret_index, chunksize, 'yahoo')
def get_data_google(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Google Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default: 3
Number of times to retry query request.
pause : numeric, default: 0.001
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
chunksize : int, default: 25
Number of symbols to download consecutively before intiating pause.
ret_index : bool, default: False
If True, includes a simple return index 'Ret_Index' in hist_data.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
return _get_data_from(symbols, start, end, None, retry_count, pause,
adjust_price, ret_index, chunksize, 'google')
_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
Get data for the given name from the St. Louis FED (FRED).
Date format is datetime
Returns a DataFrame.
If multiple names are passed for "series" then the index of the
DataFrame is the outer join of the indicies of each series.
"""
start, end = _sanitize_dates(start, end)
if not is_list_like(name):
names = [name]
else:
names = name
urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
with urlopen(url) as resp:
data = read_csv(resp, index_col=0, parse_dates=True,
header=None, skiprows=1, names=["DATE", name],
na_values='.')
try:
return data.truncate(start, end)
except KeyError:
if data.ix[3].name[7:12] == 'Error':
raise IOError("Failed to get the data. Check that {0!r} is "
"a valid FRED series.".format(name))
raise
df = concat([fetch_data(url, n) for url, n in | zip(urls, names) | pandas.compat.zip |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_scalar_column(self):
"""Test the ``Between.transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_scalar(self):
"""Test the ``Between.transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_column(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_scalar(self):
"""Test the ``Between.is_valid`` method with ``low`` as a column and ``high`` as scalar.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the second value is smaller than ``low`` and
last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the first row, False
for the last two. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = | pd.Series([True, False, False]) | pandas.Series |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 4, 5],
}
)
self.df2_sessionized_1 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation_list": [["A", "B"], ["A"], ["C"], ["B"], ["C"], ["A"]],
"duration": [
pd.to_timedelta(1, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
],
"number_events": [2, 1, 1, 1, 1, 1],
}
)
self.df2_with_ses_col_2 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 3, 4],
}
)
self.df2_sessionized_2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation_list": [["A", "B"], ["A"], ["C"], ["B", "C"], ["A"]],
"duration": [
pd.to_timedelta(1, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
| pd.to_timedelta(4, "min") | pandas.to_timedelta |
from __future__ import print_function
import boto3
import os, sys
import json
from neptune_python_utils.gremlin_utils import GremlinUtils
from neptune_python_utils.endpoints import Endpoints
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.traversal import Column
from io import BytesIO, StringIO
from datetime import datetime as dt
import numpy as np
import pandas as pd
import time
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
MAX_FEATURE_NODE = int(os.environ['MAX_FEATURE_NODE'])
CLUSTER_ENDPOINT = os.environ['CLUSTER_ENDPOINT']
CLUSTER_PORT = os.environ['CLUSTER_PORT']
CLUSTER_REGION = os.environ['CLUSTER_REGION']
ENDPOINT_NAME = os.environ['ENDPOINT_NAME']
MODEL_BTW = float(os.environ['MODEL_BTW'])
QUEUE_URL = os.environ['QUEUE_URL']
transactions_id_cols = os.environ['TRANSACTION_ID_COLS']
transactions_cat_cols = os.environ['TRANSACTION_CAT_COLS']
dummied_col = os.environ['DUMMIED_COL']
sqs = boto3.client('sqs')
runtime = boto3.client('runtime.sagemaker')
endpoints = Endpoints(neptune_endpoint = CLUSTER_ENDPOINT, neptune_port = CLUSTER_PORT, region_name = CLUSTER_REGION)
def load_data_from_event(input_event, transactions_id_cols, transactions_cat_cols, dummied_col):
"""Load and transform event data into correct format for next step subgraph loading and model inference input.
input event keys should come from related dataset.]
Example:
>>> load_data_from_event(event = {"transaction_data":[{"TransactionID":"3163166", "V1":1, ...]}, 'card1,card2,,...', 'M2_T,M3_F,M3_T,...')
"""
TRANSACTION_ID = 'TransactionID'
transactions_id_cols = transactions_id_cols.split(',')
transactions_cat_cols = transactions_cat_cols.split(',')
transactions_no_value_cols = [TRANSACTION_ID, 'TransactionDT'] + transactions_id_cols + transactions_cat_cols
dummied_col = dummied_col.split(',')
if input_event['identity_data'] != []:
identities_cols = list(input_event['identity_data'][0].keys())
identities_cols.remove(TRANSACTION_ID)
else:
identities_cols = []
neighbor_cols = [x for x in list(input_event['transaction_data'][0].keys()) if x not in transactions_no_value_cols]
if input_event['identity_data'] != []:
input_event = {**input_event['transaction_data'][0], **input_event['identity_data'][0]}
else:
input_event = input_event['transaction_data'][0]
input_event[TRANSACTION_ID] = f't-{input_event[TRANSACTION_ID]}'
input_event['TransactionAmt'] = np.log10(input_event['TransactionAmt'])
input_event = | pd.DataFrame.from_dict(input_event, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas_should # noqa
class TestEqualAccessorMixin(object):
def test_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.equal(s2)
def test_equal_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert not s1.should.equal(s2)
@pytest.mark.parametrize('alias_name', [
'be_equal_to', 'be_equals_to', 'be_eq_to', 'eq',
])
def test_qeual_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_not_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert s1.should.not_equal(s2)
def test_not_equal_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert not s1.should.not_equal(s2)
@pytest.mark.parametrize('alias_name', [
'be_not_equal_to', 'be_not_equals_to', 'be_neq_to', 'neq',
])
def test_not_qeual_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_have_same_length_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.have_same_length(s2)
def test_have_same_length_false(self):
s1 = | pd.Series([1, 2, 3]) | pandas.Series |
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from finmarketpy.economics.techindicator import TechParams, TechIndicator
tech_params = TechParams(fillna=True, atr_period=14, sma_period=3,
green_n=4, green_count=9, red_n=2, red_count=13)
tech_ind = TechIndicator()
dates = pd.date_range(start='1/1/2018', end='1/08/2018')
def get_cols_name(n):
return ['Asset%d.close' % x for x in range(1, n + 1)]
def test_sma():
indicator_name = 'SMA'
# Test Case 1: constant prices
cols = get_cols_name(1)
data_df = pd.DataFrame(index=dates, columns=cols, data=1)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=1)
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 2: Normal case with one single security
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(1, 9)))
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=data_df.shift().values)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 3: Normal case with multiple securities
cols = get_cols_name(10)
col_prices = np.array(range(1, 9))
data_df = pd.DataFrame(index=dates, columns=cols, data=np.tile(col_prices, (len(cols), 1)).T)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=data_df.shift().values)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 4: Decreasing price with multiple securities
cols = get_cols_name(10)
col_prices = np.array(range(8, 0, -1))
data_df = pd.DataFrame(index=dates, columns=cols, data=np.tile(col_prices, (len(cols), 1)).T)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=data_df.shift().values)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 5: With SOME missing data
cols = get_cols_name(1)
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(1, 9)))
data_df.iloc[3] = np.nan
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=[np.nan, np.nan, 2, 2.67, 3.67,
4.67, 6, 7])
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
assert_frame_equal(df.apply(lambda x: round(x, 2)), expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 6: With not enough data
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(1, 9)))
tech_params.sma_period = 20
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=np.nan)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=np.nan)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
def test_roc():
indicator_name = 'ROC'
# Test Case 1: constant prices
cols = get_cols_name(1)
tech_params.roc_period = 3
data_df = pd.DataFrame(index=dates, columns=cols, data=1)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=0)
expected_signal_df.iloc[:tech_params.roc_period] = np.nan
expected_df.iloc[:tech_params.roc_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 2: Increasing prices, fixed rate
cols = get_cols_name(1)
tech_params.roc_period = 1
data_df = pd.DataFrame(index=dates, columns=cols, data=[1, 2, 4, 8, 16, 32, 64, 128])
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=1)
expected_signal_df.iloc[:tech_params.roc_period] = np.nan
expected_df.iloc[:tech_params.roc_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
tech_params.roc_period = 2
data_df = pd.DataFrame(index=dates, columns=cols, data=[1, 2, 4, 8, 16, 32, 64, 128])
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=3)
expected_signal_df.iloc[:tech_params.roc_period] = np.nan
expected_df.iloc[:tech_params.roc_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
tech_params.roc_period = 3
data_df = pd.DataFrame(index=dates, columns=cols, data=[1, 2, 4, 8, 16, 32, 64, 128])
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=7)
expected_signal_df.iloc[:tech_params.roc_period] = np.nan
expected_df.iloc[:tech_params.roc_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 3: Missing values
cols = get_cols_name(1)
tech_params.roc_period = 1
data_df = pd.DataFrame(index=dates, columns=cols, data=[1, 2, 4, 8, 16, 32, 64, 128])
data_df.iloc[3] = np.nan
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=[1, 1, 1, -1, 1, 1, 1, 1])
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=[1, 1, 1, 0, 3, 1, 1, 1])
expected_signal_df.iloc[:tech_params.roc_period] = np.nan
expected_df.iloc[:tech_params.roc_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
def test_sma2():
indicator_name = 'SMA2'
tech_params.sma_period = 2
tech_params.sma2_period = 3
# Test Case 1: Increasing prices
cols = get_cols_name(1)
signals = ['SMA', 'SMA2']
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(1, 9)))
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, sig])
for col in cols for sig in signals],
data=[
[np.nan, np.nan],
[1.5, np.nan],
[2.5, 2.0],
[3.5, 3.0],
[4.5, 4.0],
[5.5, 5.0],
[6.5, 6.0],
[7.5, 7.0],
])
expected_signal_df.iloc[:tech_params.sma2_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 2: Decreasing prices
cols = get_cols_name(1)
signals = ['SMA', 'SMA2']
data_df = pd.DataFrame(index=dates, columns=cols, data=list(range(8, 0, -1)))
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, sig])
for col in cols for sig in signals],
data=[
[np.nan, np.nan],
[7.5, np.nan],
[6.5, 7.0],
[5.5, 6.0],
[4.5, 5.0],
[3.5, 4.0],
[2.5, 3.0],
[1.5, 2.0],
])
expected_signal_df.iloc[:tech_params.sma2_period] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
assert_frame_equal(df, expected_df)
assert_frame_equal(signal_df, expected_signal_df)
# Test Case 3: Costant prices
cols = get_cols_name(1)
signals = ['SMA', 'SMA2']
data_df = pd.DataFrame(index=dates, columns=cols, data=1)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, sig])
for col in cols for sig in signals],
data=np.tile(np.ones(len(dates)), (2, 1)).T)
expected_signal_df.iloc[:tech_params.sma2_period] = np.nan
expected_df.iloc[:tech_params.sma2_period - 1] = np.nan
expected_df.set_value('2018-01-02', 'Asset1.close SMA', 1.0)
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
| assert_frame_equal(df, expected_df) | pandas.testing.assert_frame_equal |
# reading stat files
import pandas as pd
df = pd.read_stata("files/disarea.dta")
print(df.describe())
print(df.head())
print(df.iloc[0:, 2:10])
import matplotlib.pyplot as plt
pd.DataFrame.boxplot(df[['disa1', 'disa2']])
plt.show()
| pd.DataFrame.hist(df[['disa10']]) | pandas.DataFrame.hist |
import numpy as np
import pytest
from pandas._libs import groupby as libgroupby
from pandas._libs.groupby import (
group_cumprod_float64,
group_cumsum,
group_mean,
group_var,
)
from pandas.core.dtypes.common import ensure_platform_int
from pandas import isna
import pandas._testing as tm
class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3,)).astype("intp")
expected_out = (
np.squeeze(values).reshape((5, 3), order="F").std(axis=1, ddof=1) ** 2
)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype="int64")
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype="intp")
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2,)).astype("intp")
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2,)).astype("intp")
expected_out = np.vstack(
[
values[:, 0].reshape(5, 2, order="F").std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5),
]
).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float32
rtol = 1e-2
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = libgroupby.group_ohlc
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
pd_op : callable
The pandas cumulative function.
np_op : callable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
answer = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.intp)
ngroups = 1
pd_op(answer, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumsum(any_real_dtype):
# see gh-4095
dtype = np.dtype(any_real_dtype).type
pd_op, np_op = group_cumsum, np.cumsum
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
pd_op, np_op = group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
| group_cumsum(actual, data, labels, ngroups, is_datetimelike) | pandas._libs.groupby.group_cumsum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 16:19:07 2020
@author: kezeng
"""
from sklearn.base import TransformerMixin,BaseEstimator
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import SET_USE_BIC_LLF
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from statsmodels.discrete.discrete_model import BinaryResultsWrapper
from statsmodels.genmod.generalized_linear_model import GLMResultsWrapper
from sklearn.linear_model._logistic import LogisticRegression
from pandas.api.types import is_numeric_dtype,is_string_dtype
from BDMLtools.fun import raw_to_bin_sc,Specials
from joblib import Parallel,delayed,effective_n_jobs
from BDMLtools.base import Base
class stepLogit(Base,BaseEstimator,TransformerMixin):
'''
逐步回归,请注意column name需能够被pasty识别
逐步回归过程:
逐步回归过程:
+首先尝试加入:
+从潜在特征中尝试所有特征并选择出使指标(aic,bic)优化的特征进入
+再进行剔除:
+若特征的在模型中的p值过高(大于p_value_enter),那么该特征将被剔除并永不加入
循环上述步骤直到
+无潜在特征可使指标(aic,bic)优化
+无潜在特征可用
Parameters:
--
custom_column=None:list,自定义列名,调整回归模型时使用,默认为None表示所有特征都会进行筛选
no_stepwise=False,True时直接回归(不支持normalize=True),不进行逐步回归筛选
p_value_enter=.05:逐步法中特征进入的pvalue限制,默认0.05
criterion='aic':逐步法筛选变量的准则,默认aic,可选bic
normalize=False:是否进行数据标准化,默认False,若为True,则数据将先进行标准化,且不会拟合截距
show_step=False:是否打印逐步回归过程
max_iter=200,逐步回归最大迭代次数
sample_weight=None,样本权重
show_high_vif_only=False:True时仅输出vif大于10的特征,False时将输出所有特征的vif
Attribute:
--
logit_model:逐步回归的statsmodel结果对象,须先使用方法fit
model_info: 回归结果报告,须先使用方法fit
vif_info:pd.DataFrame,筛选后特征的方差膨胀系数,须先使用方法fit
'''
def __init__(self,custom_column=None,no_stepwise=False,p_value_enter=.05,criterion='aic',
normalize=False,show_step=False,max_iter=200,sample_weight=None,
show_high_vif_only=False):
self.custom_column=custom_column
self.no_stepwise=no_stepwise
self.p_value_enter=p_value_enter
self.criterion=criterion
self.normalize=normalize
self.show_step=show_step
self.max_iter=max_iter
self.sample_weight=sample_weight
self.show_high_vif_only=show_high_vif_only
self._is_fitted=False
def predict_proba(self,X,y=None):
'''
模型预测,使用逐步回归模型预测,产生预测概率
Parameters:
--
X:woe编码数据,pd.DataFrame对象,需与训练数据woe编码具有相同的特征
'''
self._check_is_fitted()
self._check_X(X)
pred=self.logit_model.predict(X)
return pred
def transform(self,X,y=None):
'''
使用逐步回归进行特征筛选,返回逐步法筛选后的训练数据
Parameters:
--
X:woe编码数据,pd.DataFrame对象,需与训练数据woe编码具有相同的特征
'''
self._check_is_fitted()
self._check_X(X)
return X[self.logit_model.params.index.tolist()[1:]]
def fit(self,X,y):
'''
拟合逐步回归
Parameters:
--
X:woe编码训练数据,pd.DataFrame对象
y:目标变量,pd.Series对象
'''
self._check_data(X, y)
if self.custom_column:
if self.no_stepwise:
formula = "{} ~ {} + 1".format(y.name,' + '.join(self.custom_column))
self.logit_model=smf.glm(formula, data=X[self.custom_column].join(y),
family=sm.families.Binomial(),
freq_weights=self.sample_weight).fit(disp=0)
else:
self.logit_model=self._stepwise(X[self.custom_column].join(y),y.name,criterion=self.criterion,p_value_enter=self.p_value_enter,normalize=self.normalize,show_step=self.show_step,max_iter=self.max_iter)
else:
if self.no_stepwise:
formula = "{} ~ {} + 1".format(y.name,' + '.join(X.columns.tolist()))
self.logit_model=smf.glm(formula, data=X.join(y),
family=sm.families.Binomial(),
freq_weights=self.sample_weight).fit(disp=0)
else:
self.logit_model=self._stepwise(X.join(y),y.name,criterion=self.criterion,p_value_enter=self.p_value_enter,normalize=self.normalize,show_step=self.show_step,max_iter=self.max_iter)
self.model_info=self.logit_model.summary()
self.vif_info=self._vif(self.logit_model,X,show_high_vif_only=self.show_high_vif_only)
self._is_fitted=True
return self
def _stepwise(self,df,response,intercept=True, normalize=False, criterion='aic',
p_value_enter=.05, show_step=True,max_iter=200):
'''
逐步回归
Parameters:
--
X:特征数据,pd.DataFrame
y:目标变量列,pd.Series,必须与X索引一致
df : dataframe
分析用数据框,response为第一列。
response : str
回归分析相应变量。
intercept : bool, 默认是True
模型是否有截距项。
criterion : str, 默认是'aic',可选bic
逐步回归优化规则。
p_value_enter : float, 默认是.05
移除变量的pvalue阈值。
direction : str, 默认是'both'
逐步回归方向。
show_step : bool, 默认是True
是否显示逐步回归过程。
max_iter : int, 默认是200
逐步法最大迭代次数。
'''
SET_USE_BIC_LLF(True)
criterion_list = ['bic', 'aic']
if criterion not in criterion_list:
raise ValueError('criterion must in', '\n', criterion_list)
# 默认p_enter参数
p_enter = {'bic':0.0, 'aic':0.0}
if normalize: # 如果需要标准化数据
intercept = False # 截距强制设置为0
df_std = StandardScaler().fit_transform(df)
df = pd.DataFrame(df_std, columns=df.columns, index=df.index)
remaining = list(df.columns) # 自变量集合
remaining.remove(response)
selected = [] # 初始化选入模型的变量列表
# 初始化当前评分,最优新评分
if intercept: # 是否有截距
formula = "{} ~ {} + 1".format(response, remaining[0])
else:
formula = "{} ~ {} - 1".format(response, remaining[0])
result = smf.glm(formula, data=df,family=sm.families.Binomial(),freq_weights=self.sample_weight).fit(disp=0) # logit回归
current_score = eval('result.' + criterion)
best_new_score = eval('result.' + criterion)
if show_step:
print('\nstepwise starting:\n')
# 当变量未剔除完,并且当前评分更新时进行循环
iter_times = 0
while remaining and (current_score == best_new_score) and (iter_times<max_iter):
scores_with_candidates = [] # 初始化变量以及其评分列表
for candidate in remaining: # 在未剔除的变量中每次选择一个变量进入模型,如此循环
if intercept: # 是否有截距
formula = "{} ~ {} + 1".format(response, ' + '.join(selected + [candidate]))
else:
formula = "{} ~ {} - 1".format(response, ' + '.join(selected + [candidate]))
result = smf.glm(formula, data=df,family=sm.families.Binomial(),freq_weights=self.sample_weight).fit(disp=0) # logit回归
llf = result.llf
score = eval('result.' + criterion)
scores_with_candidates.append((score, candidate, llf)) # 记录此次循环的变量、评分列表
if criterion in ['bic', 'aic']: # 这几个指标取最小值进行优化
scores_with_candidates.sort(reverse=True) # 对评分列表进行降序排序
best_new_score, best_candidate, best_new_llf = scores_with_candidates.pop() # 提取最小分数及其对应变量
if (current_score - best_new_score) > p_enter[criterion]: # 如果当前评分大于最新评分
remaining.remove(best_candidate) # 从剩余未评分变量中剔除最新最优分对应的变量
selected.append(best_candidate) # 将最新最优分对应的变量放入已选变量列表
current_score = best_new_score # 更新当前评分
if show_step: # 是否显示逐步回归过程
print('Adding %s, %s = %.3f' % (best_candidate, criterion, best_new_score))
elif (current_score - best_new_score) >= 0 and iter_times == 0: # 当评分差大于等于0,且为第一次迭代
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
if show_step: # 是否显示逐步回归过程
print('Adding %s, %s = %.3f' % (best_candidate, criterion, best_new_score))
elif iter_times == 0: # 当评分差小于p_enter,且为第一次迭代
selected.append(remaining[0])
remaining.remove(remaining[0])
if show_step: # 是否显示逐步回归过程
print('Adding %s, %s = %.3f' % (remaining[0], criterion, best_new_score))
if intercept: # 是否有截距
formula = "{} ~ {} + 1".format(response, ' + '.join(selected))
else:
formula = "{} ~ {} - 1".format(response, ' + '.join(selected))
result = smf.glm(formula, data=df,family=sm.families.Binomial(),freq_weights=self.sample_weight).fit(disp=0) # 最优模型拟合
if iter_times >= 1: # 当第二次循环时判断变量的pvalue是否达标
if result.pvalues.max() > p_value_enter:
var_removed = result.pvalues[result.pvalues == result.pvalues.max()].index[0]
p_value_removed = result.pvalues[result.pvalues == result.pvalues.max()].values[0]
selected.remove(result.pvalues[result.pvalues == result.pvalues.max()].index[0])
if show_step: # 是否显示逐步回归过程
print('Removing %s, Pvalue = %.3f' % (var_removed, p_value_removed))
iter_times += 1
if intercept: # 是否有截距
formula = "{} ~ {} + 1".format(response, ' + '.join(selected))
else:
formula = "{} ~ {} - 1".format(response, ' + '.join(selected))
stepwise_model = smf.glm(formula,data=df,family=sm.families.Binomial(),freq_weights=self.sample_weight).fit(disp=0) # 最优模型拟合
if show_step: # 是否显示逐步回归过程
print('\nLinear regression model:', '\n ', stepwise_model.model.formula)
print('\n', stepwise_model.summary())
return stepwise_model
def _vif(self,logit_model,X,show_high_vif_only=False):
'''
输出vif方差膨胀系数,大于10时说明存在共线性
Parameters:
--
logit_model:stepwise产生的logit_model对象
X:训练数据,pd.DataFrame
show_high_vif_only=False:True时仅输出vif大于10的特征,False时将输出所有特征的vif
'''
vif = pd.DataFrame()
variables_stepwise=logit_model.params.index.tolist()[1:]
vif["VIF Factor"] = [variance_inflation_factor(X[variables_stepwise].values, i) for i in range(X[variables_stepwise].shape[1])]
vif["features"] = variables_stepwise
if show_high_vif_only:
return(vif[vif['VIF Factor']>=10])
else:
return(vif)
class cardScorer(Base,Specials,TransformerMixin):
'''
评分转换
Parameters:
--
logit_model:statsmodel/sklearn的logit回归模型对象
+ statsmodel.discrete.discrete_model.BinaryResultsWrapper类或statsmodels.genmod.generalized_linear_model类
+ sklearn.linear_model._logistic.LogisticRegression类
varbin:BDMLtools.varReport(...).fit(...).var_report_dict,dict格式,woe编码参照此编码产生
odds0=1/100:基准分对应的发生比(bad/good)
pdo=50:int,评分翻番时间隔
points0=600,int,基准分
digit=0,评分卡打分保留的小数位数
check_na,bool,为True时,若经打分后编码数据出现了缺失值,程序将报错终止
出现此类错误时多半是某箱样本量为1,或test或oot数据相应列的取值超出了train的范围,且该列是字符列的可能性极高
special_values,特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定
请特别注意,special_values必须与binSelector的special_values一致,否则score的special行会产生错误结果
+ None,保证数据默认
+ list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
+ dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
dtype,可选'float32'与'float64',转换最终评分数据为np.float32/np.float64格式,breaks也会以np.float32/np.float64格式分段数据
+ 模块会使用varbin中的breaks分段数据,其本身为np.float64,因此fit中的数据的number列也必须为float64,否则会因为格式不一致产生精度问题
+ 若fit中的数据的number列为float32型,则请设定为float32以保证不因格式不一致而产生精度问题
+ 请不要在原始数据中共用不同的数值精度格式,例如float32与float64共用,int32与int64共用...,请使用bm.dtypeAllocator统一建模数据的格式
n_jobs=1,并行数量
verbose=0,并行信息输出等级
Attribute:
--
scorecard:dict,产生的评分卡,须先使用方法fit
'''
def __init__(self,logit_model,varbin,odds0=1/100,pdo=50,points0=600,digit=0,special_values=None,
check_na=True,dtype='float64',n_jobs=1,verbose=0):
self.logit_model=logit_model
self.varbin=varbin
self.odds0=odds0
self.pdo=pdo
self.points0=points0
self.digit=digit
self.special_values=special_values
self.dtype=dtype
self.check_na=check_na
self.n_jobs=n_jobs
self.verbose=verbose
self._is_fitted=False
def fit(self,X,y=None):
self._check_X(X)
if isinstance(self.logit_model,(BinaryResultsWrapper,GLMResultsWrapper)):
logit_model_coef=self.logit_model.params[1:].to_dict()
logit_model_intercept=self.logit_model.params[0]
self.columns=list(logit_model_coef.keys())
elif isinstance(self.logit_model,LogisticRegression):
logit_model_coef=dict(zip(self.logit_model.feature_names_in_.tolist(),self.logit_model.coef_.tolist()[0]))
logit_model_intercept=self.logit_model.intercept_[0]
self.columns=self.logit_model.feature_names_in_.tolist()
else:
raise ValueError('type(logit_model) in (statsmodels..BinaryResultsWrapper;GLMResultsWrapper,sklearn.linear_model._logistic.LogisticRegression)')
self.scorecard=self._getPoints(self.varbin,logit_model_coef,logit_model_intercept,self.digit)
self._is_fitted=True
return self
def transform(self,X,y=None):
self._check_param_dtype(self.dtype)
self._check_is_fitted()
self._check_X(X)
n_jobs=effective_n_jobs(self.n_jobs)
p=Parallel(n_jobs=n_jobs,verbose=self.verbose)
res=p(delayed(self._points_map)(X[key],self.scorecard[key],self.check_na,self.special_values,self.dtype)
for key in self.columns)
score=pd.concat({col:col_points for col,col_points in res},axis=1)
score['score']=score.sum(axis=1).add(self.scorecard['intercept']['points'][0])
return score
def _getPoints(self,varbin,logit_model_coef,logit_model_intercept,digit):
A,B=self._getAB(base=self.points0, ratio=self.odds0, PDO=self.pdo)
bin_keep={col:varbin[col] for col in logit_model_coef.keys()}
points_intercept=round(A-B*(logit_model_intercept),digit)
points_all={}
points_all['intercept']=pd.DataFrame({'variable':'intercept',
'points':np.array(points_intercept)},index=['intercept'])
for col in bin_keep:
bin_points=bin_keep[col].join(
bin_keep[col]['woe'].mul(logit_model_coef[col]).mul(B).mul(-1).round(digit).rename('points')
)[['variable','points','woe','breaks']]
points_all[col]=bin_points
return points_all
def _getAB(self,base=600, ratio=1/100, PDO=50):
b = PDO/np.log(2)
a = base + b*np.log(ratio)
return a,b
def _points_map(self,col,bin_df,check_na=True,special_values=None,dtype='float64'):
col=self._sp_replace_single(col,self._check_spvalues(col.name,special_values),fill_num=np.finfo(np.float32).max,fill_str='special')
if is_numeric_dtype(col):
bin_df_drop= bin_df[~bin_df['breaks'].isin([-np.inf,'missing','special',np.inf])]
breaks=bin_df_drop['breaks'].astype('float64').tolist()
points=bin_df[~bin_df['breaks'].isin(['missing','special'])]['points'].tolist()
points_nan= bin_df[bin_df['breaks'].eq("missing")]['points'][0]
points_sp= bin_df[bin_df['breaks'].eq("special")]['points'][0]
if special_values:
breaks_cut=breaks+[np.finfo(np.float32).max] if dtype=='float64' else np.float32(breaks+[np.finfo(np.float32).max]).tolist()
col_points= | pd.cut(col,[-np.inf]+breaks_cut+[np.inf],labels=points+[points_sp],right=False,ordered=False).astype(dtype) | pandas.cut |
import tiledb, numpy as np
import json
import sys
import os
import io
from collections import OrderedDict
import warnings
from tiledb import TileDBError
if sys.version_info >= (3,3):
unicode_type = str
else:
unicode_type = unicode
unicode_dtype = np.dtype(unicode_type)
# TODO
# - handle missing values
# - handle extended datatypes
# - implement distributed CSV import
# - implement support for read CSV via TileDB VFS from any supported FS
TILEDB_KWARG_DEFAULTS = {
'ctx': None,
'sparse': True,
'index_dims': None,
'allows_duplicates': True,
'mode': 'ingest',
'attrs_filters': None,
'coords_filters': None,
'full_domain': False,
'tile': None,
'row_start_idx': None,
'fillna': None,
'column_types': None,
'capacity': None,
'date_spec': None,
'cell_order': 'row-major',
'tile_order': 'row-major',
'debug': None,
}
def parse_tiledb_kwargs(kwargs):
args = dict(TILEDB_KWARG_DEFAULTS)
for key in TILEDB_KWARG_DEFAULTS.keys():
if key in kwargs:
args[key] = kwargs.pop(key)
return args
class ColumnInfo:
def __init__(self, dtype, repr=None):
self.dtype = dtype
self.repr = repr
def dtype_from_column(col):
import pandas as pd
col_dtype = col.dtype
# TODO add more basic types here
if col_dtype in (np.int32, np.int64, np.uint32, np.uint64, np.float, np.double,
np.uint8):
return ColumnInfo(col_dtype)
# TODO this seems kind of brittle
if col_dtype.base == np.dtype('M8[ns]'):
if col_dtype == np.dtype('datetime64[ns]'):
return ColumnInfo(col_dtype)
elif hasattr(col_dtype, 'tz'):
raise ValueError("datetime with tz not yet supported")
else:
raise ValueError("unsupported datetime subtype ({})".format(type(col_dtype)))
# Pandas 1.0 has StringDtype extension type
if col_dtype.name == 'string':
return ColumnInfo(unicode_dtype)
if col_dtype == 'bool':
return ColumnInfo(np.uint8, repr=np.dtype('bool'))
if col_dtype == np.dtype("O"):
# Note: this does a full scan of the column... not sure what else to do here
# because Pandas allows mixed string column types (and actually has
# problems w/ allowing non-string types in object columns)
inferred_dtype = pd.api.types.infer_dtype(col)
if inferred_dtype == 'bytes':
return ColumnInfo(np.bytes_)
elif inferred_dtype == 'string':
# TODO we need to make sure this is actually convertible
return ColumnInfo(unicode_dtype)
elif inferred_dtype == 'mixed':
raise ValueError(
"Column '{}' has mixed value dtype and cannot yet be stored as a TileDB attribute".format(col.name)
)
raise ValueError(
"Unhandled column type: '{}'".format(
col_dtype
)
)
# TODO make this a staticmethod on Attr?
def attrs_from_df(df,
index_dims=None, filters=None,
column_types=None, ctx=None):
attr_reprs = dict()
if ctx is None:
ctx = tiledb.default_ctx()
if column_types is None:
column_types = dict()
attrs = list()
for name, col in df.items():
# ignore any column used as a dim/index
if index_dims and name in index_dims:
continue
if name in column_types:
spec_type = column_types[name]
# Handle ExtensionDtype
if hasattr(spec_type, 'type'):
spec_type = spec_type.type
attr_info = ColumnInfo(spec_type)
else:
attr_info = dtype_from_column(col)
attrs.append(tiledb.Attr(name=name, dtype=attr_info.dtype, filters=filters))
if attr_info.repr is not None:
attr_reprs[name] = attr_info.repr
return attrs, attr_reprs
def dim_info_for_column(ctx, df, col, tile=None, full_domain=False, index_dtype=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if len(col_values) < 1:
raise ValueError("Empty column '{}' cannot be used for dimension!".format(col_name))
if index_dtype is not None:
dim_info = ColumnInfo(index_dtype)
elif col_values.dtype is np.dtype('O'):
col_val0_type = type(col_values[0])
if col_val0_type in (bytes, unicode_type):
# TODO... core only supports TILEDB_ASCII right now
dim_info = ColumnInfo(np.bytes_)
else:
raise TypeError("Unknown column type not yet supported ('{}')".format(col_val0_type))
else:
dim_info = dtype_from_column(col_values)
return dim_info
def dim_for_column(ctx, name, dim_info, col, tile=None, full_domain=False, ndim=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if tile is None:
if ndim is None:
raise TileDBError("Unexpected Nonetype ndim")
if ndim == 1:
tile = 10000
elif ndim == 2:
tile = 1000
elif ndim == 3:
tile = 100
else:
tile = 10
dtype = dim_info.dtype
if full_domain:
if not dim_info.dtype in (np.bytes_, np.unicode):
# Use the full type domain, deferring to the constructor
(dtype_min, dtype_max) = tiledb.libtiledb.dtype_range(dim_info.dtype)
dim_max = dtype_max
if dtype.kind == 'M':
date_unit = np.datetime_data(dtype)[0]
dim_min = np.datetime64(dtype_min + 1, date_unit)
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = np.datetime64(dtype_max - tile, date_unit)
elif dtype is np.int64:
dim_min = dtype_min + 1
else:
dim_min = dtype_min
if dtype.kind != 'M' and np.issubdtype(dtype, np.integer):
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = dtype_max - tile
else:
dim_min, dim_max = (None, None)
else:
dim_min = np.min(col_values)
dim_max = np.max(col_values)
if not dim_info.dtype in (np.bytes_, np.unicode):
if np.issubdtype(dtype, np.integer):
dim_range = np.uint64(np.abs(np.uint64(dim_max) - np.uint64(dim_min)))
if dim_range < tile:
tile = dim_range
elif np.issubdtype(dtype, np.float64):
dim_range = dim_max - dim_min
if dim_range < tile:
tile = np.ceil(dim_range)
dim = tiledb.Dim(
name = name,
domain = (dim_min, dim_max),
dtype = dim_info.dtype,
tile = tile
)
return dim
def get_index_metadata(dataframe):
md = dict()
for index in dataframe.index.names:
# Note: this may be expensive.
md[index] = dtype_from_column(dataframe.index.get_level_values(index)).dtype
return md
def create_dims(ctx, dataframe, index_dims,
tile=None, full_domain=False, sparse=None):
import pandas as pd
index = dataframe.index
index_dict = OrderedDict()
index_dtype = None
per_dim_tile = False
if tile is not None:
if isinstance(tile, dict):
per_dim_tile = True
# input check, can't do until after per_dim_tile
if (per_dim_tile and not all(map(lambda x: isinstance(x,(int,float)), tile.values()))) or \
(per_dim_tile is False and not isinstance(tile, (int,float))):
raise ValueError("Invalid tile kwarg: expected int or tuple of ints "
"got '{}'".format(tile))
if isinstance(index, pd.MultiIndex):
for name in index.names:
index_dict[name] = dataframe.index.get_level_values(name)
elif isinstance(index, (pd.Index, pd.RangeIndex, pd.Int64Index)):
if hasattr(index, 'name') and index.name is not None:
name = index.name
else:
index_dtype = np.dtype('uint64')
name = 'rows'
index_dict[name] = index.values
else:
raise ValueError("Unhandled index type {}".format(type(index)))
# create list of dim types
# we need to know all the types in order to validate before creating Dims
dim_types = list()
for idx,(name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dim_types.append(dim_info_for_column(ctx, dataframe, values,
tile=dim_tile, full_domain=full_domain,
index_dtype=index_dtype))
if any([d.dtype in (np.bytes_, np.unicode_) for d in dim_types]):
if sparse is False:
raise TileDBError("Cannot create dense array with string-typed dimensions")
elif sparse is None:
sparse = True
d0 = dim_types[0]
if not all(d0.dtype == d.dtype for d in dim_types[1:]):
if sparse is False:
raise TileDBError("Cannot create dense array with heterogeneous dimension data types")
elif sparse is None:
sparse = True
ndim = len(dim_types)
dims = list()
for idx, (name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dims.append(dim_for_column(ctx, name, dim_types[idx], values,
tile=dim_tile, full_domain=full_domain, ndim=ndim))
if index_dims:
for name in index_dims:
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
col = dataframe[name]
dims.append(
dim_for_column(ctx, dataframe, col.values, name, tile=dim_tile)
)
return dims, sparse
def write_array_metadata(array, attr_metadata = None, index_metadata = None):
"""
:param array: open, writable TileDB array
:param metadata: dict
:return:
"""
if attr_metadata:
attr_md_dict = {n: str(t) for n,t in attr_metadata.items()}
array.meta['__pandas_attribute_repr'] = json.dumps(attr_md_dict)
if index_metadata:
index_md_dict = {n: str(t) for n,t in index_metadata.items()}
array.meta['__pandas_index_dims'] = json.dumps(index_md_dict)
def from_dataframe(uri, dataframe, **kwargs):
# deprecated in 0.6.3
warnings.warn("tiledb.from_dataframe is deprecated; please use .from_pandas",
DeprecationWarning)
from_pandas(uri, dataframe, **kwargs)
def from_pandas(uri, dataframe, **kwargs):
"""Create TileDB array at given URI from pandas dataframe
:param uri: URI for new TileDB array
:param dataframe: pandas DataFrame
:param mode: Creation mode, one of 'ingest' (default), 'schema_only', 'append'
:Keyword Arguments: optional keyword arguments for TileDB, see ``tiledb.from_csv``.
:raises: :py:exc:`tiledb.TileDBError`
:return: None
"""
import pandas as pd
args = parse_tiledb_kwargs(kwargs)
ctx = args.get('ctx', None)
tile_order = args['tile_order']
cell_order = args['cell_order']
allows_duplicates = args.get('allows_duplicates', True)
sparse = args['sparse']
index_dims = args.get('index_dims', None)
mode = args.get('mode', 'ingest')
attrs_filters = args.get('attrs_filters', None)
coords_filters = args.get('coords_filters', None)
full_domain = args.get('full_domain', False)
capacity = args.get('capacity', False)
tile = args.get('tile', None)
nrows = args.get('nrows', None)
row_start_idx = args.get('row_start_idx', None)
fillna = args.pop('fillna', None)
date_spec = args.pop('date_spec', None)
column_types = args.pop('column_types', None)
write = True
create_array = True
if mode is not None:
if mode == 'schema_only':
write = False
elif mode == 'append':
create_array = False
elif mode != 'ingest':
raise TileDBError("Invalid mode specified ('{}')".format(mode))
if capacity is None:
capacity = 0 # this will use the libtiledb internal default
if ctx is None:
ctx = tiledb.default_ctx()
if create_array:
if attrs_filters is None:
attrs_filters = tiledb.FilterList(
[tiledb.ZstdFilter(1, ctx=ctx)])
if coords_filters is None:
coords_filters = tiledb.FilterList(
[tiledb.ZstdFilter(1, ctx=ctx)])
if nrows:
if full_domain is None:
full_domain = False
# create the domain and attributes
# if sparse==None then this function may return a default based on types
dims, sparse = create_dims(ctx, dataframe, index_dims, sparse=sparse,
tile=tile, full_domain=full_domain)
domain = tiledb.Domain(
*dims,
ctx = ctx
)
attrs, attr_metadata = attrs_from_df(dataframe,
index_dims=index_dims,
filters=attrs_filters,
column_types=column_types)
# now create the ArraySchema
schema = tiledb.ArraySchema(
domain=domain,
attrs=attrs,
cell_order=cell_order,
tile_order=tile_order,
coords_filters=coords_filters,
allows_duplicates=allows_duplicates,
capacity=capacity,
sparse=sparse
)
tiledb.Array.create(uri, schema, ctx=ctx)
# apply fill replacements for NA values if specified
if fillna is not None:
dataframe.fillna(fillna, inplace=True)
# apply custom datetime parsing to given {'column_name': format_spec} pairs
# format_spec should be provied using Python format codes:
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
if date_spec is not None:
if type(date_spec) is not dict:
raise TypeError("Expected 'date_spec' to be a dict, got {}".format(type(date_spec)))
for name, spec in date_spec.items():
dataframe[name] = pd.to_datetime(dataframe[name], format=spec)
if write:
write_dict = {k: v.values for k,v in dataframe.to_dict(orient='series').items()}
index_metadata = get_index_metadata(dataframe)
try:
A = tiledb.open(uri, 'w', ctx=ctx)
if A.schema.sparse:
coords = []
for k in range(A.schema.ndim):
coords.append(dataframe.index.get_level_values(k))
# TODO ensure correct col/dim ordering
A[tuple(coords)] = write_dict
else:
if row_start_idx is None:
row_start_idx = 0
row_end_idx = row_start_idx + len(dataframe)
A[row_start_idx:row_end_idx] = write_dict
if create_array:
write_array_metadata(A, attr_metadata, index_metadata)
finally:
A.close()
def _tiledb_result_as_dataframe(readable_array, result_dict):
import pandas as pd
# TODO missing key in the rep map should only be a warning, return best-effort?
# TODO this should be generalized for round-tripping overloadable types
# for any array (e.g. np.uint8 <> bool)
repr_meta = None
index_dims = None
if '__pandas_attribute_repr' in readable_array.meta:
# backwards compatibility
repr_meta = json.loads(readable_array.meta['__pandas_attribute_repr'])
if '__pandas_index_dims' in readable_array.meta:
index_dims = json.loads(readable_array.meta['__pandas_index_dims'])
indexes = list()
for col_name, col_val in result_dict.items():
if repr_meta and col_name in repr_meta:
new_col = pd.Series(col_val, dtype=repr_meta[col_name])
result_dict[col_name] = new_col
elif index_dims and col_name in index_dims:
new_col = pd.Series(col_val, dtype=index_dims[col_name])
result_dict[col_name] = new_col
indexes.append(col_name)
df = pd.DataFrame.from_dict(result_dict)
if len(indexes) > 0:
df.set_index(indexes, inplace=True)
return df
def open_dataframe(uri, ctx=None):
"""Open TileDB array at given URI as a Pandas dataframe
If the array was saved using tiledb.from_dataframe, then columns
will be interpreted as non-primitive pandas or numpy types when
available.
:param uri:
:return: dataframe constructed from given TileDB array URI
**Example:**
>>> import tiledb
>>> df = tiledb.open_dataframe("iris.tldb")
>>> tiledb.objec_type("iris.tldb")
'array'
"""
if ctx is None:
ctx = tiledb.default_ctx()
# TODO support `distributed=True` option?
with tiledb.open(uri, ctx=ctx) as A:
data = A[:]
new_df = _tiledb_result_as_dataframe(A, data)
return new_df
def from_csv(uri, csv_file, **kwargs):
"""
Create TileDB array at given URI from a CSV file or list of files
:param uri: URI for new TileDB array
:param csv_file: input CSV file or list of CSV files.
Note: multi-file ingestion requires a `chunksize` argument. Files will
be read in batches of at least `chunksize` rows before writing to the
TileDB array.
:Keyword Arguments:
- Any ``pandas.read_csv`` supported keyword argument.
- TileDB-specific arguments:
* ``allows_duplicates``: Generated schema should allow duplicates
* ``cell_order``: Schema cell order
* ``tile_order``: Schema tile order
* ``mode``: (default ``ingest``), Ingestion mode: ``ingest``, ``schema_only``,
``append``
* ``full_domain``: Dimensions should be created with full range of the dtype
* ``attrs_filters``: FilterList to apply to all Attributes
* ``coords_filters``: FilterList to apply to all coordinates (Dimensions)
* ``sparse``: (default True) Create sparse schema
* ``tile``: Dimension tiling: accepts either Int or a list of Tuple[Int] with per-dimension
'tile' arguments to apply to the generated ArraySchema.
* ``capacity``: Schema capacity
* ``date_spec``: Dictionary of {``column_name``: format_spec} to apply to date/time
columns which are not correctly inferred by pandas 'parse_dates'.
Format must be specified using the Python format codes:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
:return: None
**Example:**
>>> import tiledb
>>> tiledb.from_csv("iris.tldb", "iris.csv")
>>> tiledb.object_type("iris.tldb")
'array'
"""
try:
import pandas
except ImportError as exc:
print("tiledb.from_csv requires pandas")
raise
tiledb_args = parse_tiledb_kwargs(kwargs)
multi_file = False
debug = tiledb_args.get('debug', False)
if isinstance(csv_file, str) and not os.path.isfile(csv_file):
# for non-local files, use TileDB VFS i/o
ctx = tiledb_args.get('ctx', tiledb.default_ctx())
vfs = tiledb.VFS(ctx=ctx)
csv_file = tiledb.FileIO(vfs, csv_file, mode='rb')
elif isinstance(csv_file, (list, tuple)):
# TODO may be useful to support a callback here
multi_file = True
mode = kwargs.pop('mode', None)
if mode is not None:
tiledb_args['mode'] = mode
# For schema_only mode we need to pass a max read count into
# pandas.read_csv
# Note that 'nrows' is a pandas arg!
if mode == 'schema_only' and not 'nrows' in kwargs:
kwargs['nrows'] = 500
elif mode not in ['ingest', 'append']:
raise TileDBError("Invalid mode specified ('{}')".format(mode))
chunksize = kwargs.get('chunksize', None)
if multi_file and not chunksize:
raise TileDBError("Multiple input CSV files requires a 'chunksize' argument")
if chunksize is not None or multi_file:
if not 'nrows' in kwargs:
full_domain = True
array_created = False
if mode == 'schema_only':
raise TileDBError("schema_only ingestion not supported for chunked read")
elif mode == 'append':
array_created = True
csv_kwargs = kwargs.copy()
kwargs.update(tiledb_args)
if multi_file:
input_csv_list = csv_file
csv_kwargs.pop("chunksize")
else:
input_csv = csv_file
keep_reading = True
rows_written = 0
csv_idx = 0
df_iter = None
while keep_reading:
# if we have multiple files, read them until we hit row threshold
if multi_file:
rows_read = 0
input_dfs = list()
while rows_read < chunksize and keep_reading:
input_csv = input_csv_list[csv_idx]
df = pandas.read_csv(input_csv, **csv_kwargs)
input_dfs.append(df)
rows_read += len(df)
csv_idx += 1
keep_reading = csv_idx < len(input_csv_list)
df = pandas.concat(input_dfs)
else:
if not df_iter:
df_iter = pandas.read_csv(input_csv, **csv_kwargs)
df = next(df_iter, None)
if df is None:
break
kwargs['row_start_idx'] = rows_written
kwargs['full_domain'] = True
if array_created:
kwargs['mode'] = 'append'
# after the first chunk, switch to append mode
array_created = True
if debug:
print("`tiledb.read_csv` flushing '{}' rows ('{}' files)".format(
len(df), csv_idx))
# now flush
from_pandas(uri, df, **kwargs)
rows_written += len(df)
if mode == 'schema_only':
break
else:
df = | pandas.read_csv(csv_file, **kwargs) | pandas.read_csv |
"""Deep Averaging Networks for text classification
- pytorch, spacy, deep averaging networks
- GloVe word embeddings, frozen and fine-tuned weights
- S&P Key Developments
<NAME>
License: MIT
"""
# jupyter-notebook --NotebookApp.iopub_data_rate_limit=1.0e12
import numpy as np
import os
import time
import re
import csv, gzip, json
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import Counter
from nltk.tokenize import RegexpTokenizer
import torch
import torch.nn as nn
import random
from finds.database import MongoDB
from finds.unstructured import Unstructured
from finds.structured import PSTAT
from finds.learning import TextualData
from settings import settings
from settings import pickle_dump, pickle_load
mongodb = MongoDB(**settings['mongodb'])
keydev = Unstructured(mongodb, 'KeyDev')
imgdir = os.path.join(settings['images'], 'classify')
memdir = settings['memmap']
event_ = PSTAT.event_
role_ = PSTAT.role_
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
## Retrieve headline+situation text
events = [28, 16, 83, 41, 81, 23, 87, 45, 80, 97, 231, 46, 31, 77, 29,
232, 101, 42, 47, 86, 93, 3, 22, 102, 82]
if False:
lines = []
event_all = []
tokenizer = RegexpTokenizer(r"\b[^\d\W][^\d\W][^\d\W]+\b")
for event in events:
docs = keydev['events']\
.find({'keydeveventtypeid': {'$eq': event}}, {'_id': 0})
doc = [tokenizer.tokenize((d['headline'] + " " + d['situation']).lower())
for d in docs]
lines.extend(doc)
event_all.extend([event] * len(doc))
with gzip.open(os.path.join(imgdir, 'lines.json.gz'), 'wt') as f:
json.dump(lines, f)
with gzip.open(os.path.join(imgdir,'event_all.json.gz'), 'wt') as f:
json.dump(event_all, f)
print(lines[1000000])
if False:
with gzip.open(os.path.join(imgdir, 'lines.json.gz'), 'rt') as f:
lines = json.load(f)
with gzip.open(os.path.join(imgdir,'event_all.json.gz'), 'rt') as f:
event_all = json.load(f)
## Encode class labels
from sklearn.preprocessing import LabelEncoder
event_encoder = LabelEncoder().fit(event_all) # .inverse_transform()
num_classes = len(np.unique(event_all))
y_all = event_encoder.transform(event_all)
Series(event_all).value_counts().rename(index=event_).rename('count').to_frame()
## Split into stratified train and test indices
from sklearn.model_selection import train_test_split
train_idx, test_idx = train_test_split(np.arange(len(y_all)), random_state=42,
stratify=y_all, test_size=0.2)
## Load spacy vocab
import spacy
lang = 'en_core_web_lg'
nlp = spacy.load(lang, disable=['parser', 'tagger', 'ner', 'lemmatizer'])
for w in ['yen', 'jpy', 'eur', 'dkk', 'cny', 'sfr']:
nlp.vocab[w].is_stop = True # Mark customized stop words
n_vocab, vocab_dim = nlp.vocab.vectors.shape
print('Language:', lang, ' vocab:', n_vocab, ' dim:', vocab_dim)
## Precompute word embeddings input
def form_input(line, nlp):
"""Return spacy average vector from valid words"""
tokens = [tok.vector for tok in nlp(" ".join(line))
if not(tok.is_stop or tok.is_punct or tok.is_oov or tok.is_space)]
return (np.array(tokens).mean(axis=0) if len(tokens) else
np.zeros(nlp.vocab.vectors.shape[1]))
args = {'dtype': 'float32'}
memdir = '/home/terence/Downloads/stocks2020/memmap/'
if False:
args.update({'shape': (len(lines), vocab_dim), 'mode': 'r+'})
X = np.memmap(os.path.join(memdir, "X.{}_{}".format(*args['shape'])),**args)
for i, line in tqdm(enumerate(lines)):
X[i] = form_input(line, nlp).astype(args['dtype'])
args.update({'shape': (1224251, vocab_dim), 'mode': 'r'})
X = np.memmap(os.path.join(memdir, "X.{}_{}".format(*args['shape'])), **args)
## Pytorch Feed Forward Network
class FFNN(nn.Module):
"""Deep Averaging Network for classification"""
def __init__(self, vocab_dim, num_classes, hidden, dropout=0.3):
super().__init__()
V = nn.Linear(vocab_dim, hidden[0])
nn.init.xavier_uniform_(V.weight)
L = [V, nn.Dropout(dropout)]
for g, h in zip(hidden, hidden[1:] + [num_classes]):
W = nn.Linear(g, h)
nn.init.xavier_uniform_(W.weight)
L.extend([nn.ReLU(), W])
self.network = nn.Sequential(*L)
self.classifier = nn.LogSoftmax(dim=-1) # output is (N, C) logits
def forward(self, x):
"""Return tensor of log probabilities"""
return self.classifier(self.network(x))
def predict(self, x):
"""Return predicted int class of input tensor vector"""
return torch.argmax(self(x), dim=1).int().tolist()
def save(self, filename):
"""save model state to filename"""
return torch.save(self.state_dict(), filename)
def load(self, filename):
"""load model name from filename"""
self.load_state_dict(torch.load(filename, map_location='cpu'))
return self
## Training Loops
accuracy = {} # to store computed metrics
max_layers, hidden = 1, 300 #3, 300
batch_sz, lr, num_lr, step_sz, eval_skip = 64, 0.01, 4, 10, 5 #3, 3, 3 #
num_epochs = step_sz * num_lr + 1
for layers in [max_layers]:
# Instantiate model, optimizer, scheduler, loss_function
model = FFNN(vocab_dim=vocab_dim, num_classes=num_classes,
hidden=[hidden]*layers).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=0.1,
step_size=step_sz)
loss_function = nn.NLLLoss()
accuracy[layers] = {}
# Loop over epochs and batches
for epoch in range(0, num_epochs):
tic = time.time()
idxs = [i for i in train_idx]
random.shuffle(idxs)
batches = [idxs[i:(i+batch_sz)] for i in range(0, len(idxs), batch_sz)]
total_loss = 0.0
model.train()
for i, batch in enumerate(batches): # loop over minibatches
x = torch.FloatTensor(X[batch]).to(device)
y = torch.tensor([y_all[idx] for idx in batch]).to(device)
model.zero_grad() # reset model gradient
log_probs = model(x) # run model
loss = loss_function(log_probs, y) # compute loss
total_loss += float(loss)
loss.backward() # loss step
optimizer.step() # optimizer step
print(i, batches, i/len(batches), total_loss, end='\r')
scheduler.step() # scheduler step
model.eval()
print(f"Loss on epoch {epoch}: {total_loss:.1f}")
#model.save(os.path.join(imgdir, f"dan{layers}.pt"))
with torch.no_grad():
if epoch % eval_skip == 0:
gold = np.asarray([int(y) for y in y_all])
batches = [test_idx[i:(i+128)]
for i in range(0, len(test_idx), 128)]
test_gold, test_pred = [], []
for batch in tqdm(batches):
test_pred.extend(model.predict(
torch.FloatTensor(X[batch]).to(device)))
test_gold.extend(gold[batch])
test_correct = (np.asarray(test_pred) ==
np.asarray(test_gold)).sum()
batches = [train_idx[i:(i+128)]
for i in range(0, len(train_idx), 128)]
train_gold, train_pred = [], []
for batch in tqdm(batches):
train_pred.extend(model.predict(
torch.FloatTensor(X[batch]).to(device)))
train_gold.extend(gold[batch])
train_correct = (np.asarray(train_pred) ==
np.asarray(train_gold)).sum()
accuracy[layers][epoch] = {
'loss': total_loss,
'train': train_correct/len(train_idx),
'test': test_correct/len(test_idx)}
print(layers, epoch, int(time.time()-tic),
optimizer.param_groups[0]['lr'],
train_correct/len(train_idx), test_correct/len(test_idx))
from sklearn import metrics
print(model) # show accuracy metrics for this layer
pd.concat([
Series({'Accuracy': metrics.accuracy_score(test_gold, test_pred),
'Precision': metrics.precision_score(test_gold, test_pred,
average='weighted'),
'Recall': metrics.recall_score(test_gold, test_pred,
average='weighted')},
name='Test Set').to_frame().T,
Series({'Accuracy': metrics.accuracy_score(train_gold, train_pred),
'Precision': metrics.precision_score(train_gold, train_pred,
average='weighted'),
'Recall': metrics.recall_score(train_gold, train_pred,
average='weighted')},
name='Train Set').to_frame().T], axis=0)
fig, ax = plt.subplots(num=1, clear=True, figsize=(10,6))
DataFrame.from_dict({err: {k: v[err] for k,v in accuracy[max_layers].items()}
for err in ['train', 'test']}).plot(ax=ax)
ax.set_title(f'Accuracy of DAN with frozen embedding weights')
ax.set_xlabel('Steps')
ax.set_ylabel('Accuracy')
ax.legend(['Train Set', 'Test Set'], loc='upper left')
plt.tight_layout()
plt.savefig(os.path.join(imgdir, f"frozen_accuracy.jpg"))
plt.show()
## Confusion Matrix
from sklearn.metrics import confusion_matrix
labels = [event_[e] for e in event_encoder.classes_]
cf_train = DataFrame(confusion_matrix(train_gold, train_pred),
index= | pd.MultiIndex.from_product([['Actual'], labels]) | pandas.MultiIndex.from_product |
import pandas as pd
import os
import random
from tqdm import tqdm
import pickle
import numpy as np
from collections import defaultdict, Counter
from prettytable import PrettyTable
import itertools
import scipy.sparse as sp
"""
-------------------------------------------------------------------
Functions of the preliminary section
-------------------------------------------------------------------
"""
def write_pickle(file_name, content):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'wb') as handle:
pickle.dump(content, handle)
def read_pickle(file_name):
with open(file_name, 'rb') as handle:
return pickle.load(handle)
"""
-------------------------------------------------------------------
REQUEST 1
-------------------------------------------------------------------
"""
def get_graph_dictionary(data):
"""
Create a graph structure
"""
concat = [data['Source'], data['Target']] # merge in a list the data columns Source and Target
df_concat = pd.concat(concat) # concatenate them
data_2 = data.set_index('Source') # set a index the Source column
graph = defaultdict(list) # initialize the nested dictionary
for row in tqdm(df_concat.unique()):
try:
graph[row] = data_2.loc[row, 'Target'].tolist() # append in the values the Target vertices
except AttributeError:
graph[row] = data_2.loc[row, 'Target'].flatten().tolist() # append in the values the Target vertex
except KeyError:
graph[row] # append empty list
return(graph)
class Graph(object):
"""
Create a graph class
"""
def __init__(self, graph_d=None):
if graph_d == None:
graph_d = {}
self.graph_d = graph_d # initialize the graph
# get the vertices of the graph
def vertices(self):
return list(self.graph_d.keys()) # build the vertices
# get the edges of the graph
def edges(self):
edges_lst = []
for node in self.graph_d:
try:
for neigh in self.graph_d[node]:
edges_lst.append((node, neigh))
except TypeError:
edges_lst.append((node, self.graph_d[node])) # append each edge in a tuple
return edges_lst
def average_number_pages1(g):
"""
Method to calculate the average number of links in a random page
"""
idx = (random.choice(g.vertices()))
if isinstance(g.graph_d[idx], list):
return (len(g.graph_d[idx]))
else:
return 1
def density_graph(g):
"""
Function to compute the density of a directed graph
"""
V = len(g.vertices())
E = len(g.edges())
return E / (V *(V - 1))
def IsSymmetric(mat, g):
"""
Build a lil matrix to create a sparse matrix of the vertices and edges,
get the sum of the point in the matrix,
check if the matrix is symmetric or not
"""
# looping on each vertex to assign the edges == 1
for vertex in g.graph_d:
if isinstance(g.graph_d[vertex], int):
mat[vertex, g.graph_d[vertex]] = 1
else:
for target in g.graph_d[vertex]:
mat[vertex, target] = 1
rows, cols = mat.nonzero() # get only the non zero elements from the sparse matrix
return rows, cols
"""
-------------------------------------------------------------------
REQUEST 2
-------------------------------------------------------------------
"""
def pages_reached(page, click, dic):
total_pages = [] # This list will store number of pages
page_list = [] #This list will store input value initially and then will add correspondence value as per number of click
page_list.append(str(page))
for no_of_click in range(click): #This will run as per number of clicks
new_lst = []
for i in page_list: # loop each page in the list
for j in dic[i]: # loop each neighbor in the page
new_lst.append(str(j)) #append the neighbors in new list
total_pages.append(str(j)) # append the neighbors in new list
page_list = new_lst #update the list to loop in
return total_pages
"""
-------------------------------------------------------------------
REQUEST 3
-------------------------------------------------------------------
"""
def in_degree_centrality(data):
"""
function that look for the in-degree values of each node
"""
concat = [data['Source'], data['Target']] # concat all the nodes
all_nodes = list(pd.concat(concat).unique()) # get the list of the unique values
in_degree = dict.fromkeys(all_nodes, 0) # dict with keys all the nodes and values the 0s
only_target_node = list(data.Target) # list of the target nodes which have at least a in-degree value
for node in only_target_node:
in_degree[node] +=1 # for each node in the target, update the dict adding 1
return in_degree
def most_central_article(category, in_degree_dict):
"""
function that return the vertex with the highest in-degree centrality
"""
max_in_degree_value = 0 # set the max in degree value to 0
max_in_degree_vertex = '' # set the max in degree vertex to empty string
not_degree_article = [] # set the not degree list as empty
for vertex in category: # loop in each vertex of the chosen category
if vertex in in_degree_dict: # check if the vertex is in the graph
vertex_degree = in_degree_dict[vertex]
if vertex_degree > max_in_degree_value: # if vertex in degree is higher than 0 or the old max value, update it
max_in_degree_value = vertex_degree # update the max in degree value
max_in_degree_vertex = vertex # update the max in degree vertex
else:
not_degree_article.append(vertex) # append the articles that are not in the graph, that do not have a in degree value
continue
return max_in_degree_vertex, max_in_degree_value, not_degree_article
def minimum_number_clicks(graph, categories_red, data):
"""
function that return the vertex with the highest in-degree centrality
"""
print('Write the category')
while True:
category_input = str(input()) # obtain the category as input
if category_input not in categories_red: # check if category exist
print(category_input, ' not exist as category, change category')
else:
break
print()
print("Write the set of pages in the category chosen separated by a ',':")
pages_input = input() # get the numeber of pages to look for
pages_input = pages_input.split(',')
pages_input = [int(i) for i in pages_input]
print()
pages_not = []
for pages in pages_input: # loop over the pages in input
if pages not in categories_red[category_input]: # check if pages are in the category chosen
print(pages, ' not in ', category_input)
pages_not.append(pages) # append the pages that are not in the category chosen
pages_input = [i for i in pages_input if i not in pages_not]
graph = graph # the graph
central_vertex = most_central_article(categories_red[category_input], in_degree_centrality(data))[0] # set the max vertex
v = central_vertex
visited = [False] * (max(graph) + 1) # set as False the visited vertex
queue = [] # set the queue list
queue.append(v) # append the starting vertex to the list
visited[v] = True # set the starting vertex as visited
reached = 0 # initialize the number of reached vertex
reached_vertex = [] # initialize the list of reached vertex
number_of_click = 0
while queue: # while queue is active, so there is at least one element in it
if reached < (len(pages_input)): # control if the reached value is more than the length of the input pages to reach
v = queue.pop(0) # pop the first value in queue and get that value
try:
number_of_click += 1 # sum the numeber of click
for i in graph[v]: # loop over the neighbors of the page
if visited[i] == False: # check if the page is already visited or not
visited[i] = True # mark as true
queue.append(i) # append in the queue the vertex
if i in pages_input: # if the page is one of the input
reached += 1 # add the reached value
reached_vertex.append(i) # append the reached vertex
except TypeError:
number_of_click += 1
j = graph[v]
if visited[j] == False:
visited[j] = True
queue.append(j)
if j in pages_input:
reached += 1
reached_vertex.append(j)
else:
break
print('Reached vertex are: {}'.format(reached_vertex))
print('Minimum number of clicks, from most central article {} to reach the set of pages, is {}.'.format(central_vertex, number_of_click))
not_reached_vertex = [i for i in pages_input if i not in reached_vertex]
print('Not possible to reach {}'.format(not_reached_vertex))
"""
-------------------------------------------------------------------
REQUEST 4
-------------------------------------------------------------------
"""
def cat_subgraph(category_1, category_2, categories_dict, dataframe):
"""
function that create the subgraph of the category chosen
"""
#first get the two lists of pages of the categories
a = categories_dict[category_1]
b = categories_dict[category_2]
#given those lists, find the sources for both
source_a = dataframe[dataframe['Source'].isin(a)]
source_b = dataframe[dataframe['Source'].isin(b)]
#now find the edges, that have as targets the other list
edges_ab = source_a[source_a['Target'].isin(b)]
edges_ba = source_b[source_b['Target'].isin(a)]
#edges within the categories
edges_aa = source_a[source_a['Target'].isin(a)]
edges_bb = source_b[source_b['Target'].isin(b)]
#put them together
sub_df = | pd.concat([edges_ab, edges_ba, edges_aa, edges_bb]) | pandas.concat |
import numpy as np
import glob
import pandas as pd
import os
from netCDF4 import Dataset
import socket
atlas_name = "meanstate" # or "eape"
hostname = socket.gethostname()
if (hostname[:8] == "datarmor") or (hostname[::2][:3] == "rin"):
# login node is datarmor3
# computational nodes are rXiYnZ
gdac = "/home/ref-argo/gdac/dac"
pargopy = "/home1/datawork/groullet/argopy"
elif hostname in ["altair", "libra"]:
gdac = "/net/alpha/exports/sciences/roullet/Argo/dac"
pargopy = "/net/alpha/exports/sciences/roullet/Argo"
else:
raise ValueError("Configure tools.py before using Argopy")
daclist = ["aoml", "bodc", "coriolis", "csio",
"csiro", "incois", "jma", "kma",
"kordi", "meds", "nmdis"]
zref = np.array([0., 10., 20., 30., 40., 50., 60., 70., 80., 90.,
100., 110., 120., 130., 140., 150., 160., 170.,
180., 190., 200., 220., 240., 260., 280, 300.,
320., 340., 360., 380., 400., 450., 500., 550.,
600., 650., 700., 750., 800., 850., 900., 950.,
1000., 1050., 1100., 1150., 1200., 1250., 1300.,
1350., 1400., 1450., 1500., 1550., 1600., 1650.,
1700., 1750., 1800., 1850., 1900., 1950.,
2000.])
argodb_keys = ["DAC", "WMO", "IPROF", "N_LEVELS", "DATA_MODE", "LONGITUDE", "LATITUDE", "JULD", "STATUS"]
global_dir = "%s/global" % pargopy
argodb_dir = "%s/argo" % global_dir
argodb_file = "%s/argo_global.pkl" % argodb_dir
argo_file = gdac+"/%s/%i/%i_prof.nc"
def create_folders():
for d in [global_dir, argodb_dir]:
if os.path.exists(d):
pass
else:
os.makedirs(d)
def unmask(data):
""" transform masked array into regular numpy array """
data_out = {}
for k in data.keys():
if type(data[k]) is np.ma.core.MaskedArray:
data_out[k] = data[k].data
else:
data_out[k] = data[k]
return data_out
def bytes2str(data):
""" byte strings into strings"""
data_out = {}
for k in data.keys():
data_out[k] = data[k]
if type(data[k]) is np.ndarray:
firstelem = data_out[k].ravel()[0]
#print(k, type(firstelem))
if type(firstelem) is np.bytes_:
data_out[k] = np.asarray(data[k].data, dtype=str)
return data_out
def get_all_wmos():
print("retrieve all wmos in the DAC ", end="")
wmos = []
dacs = []
for dac in daclist:
prfiles = glob.glob("{}/{}/*/*_prof.nc".format(gdac, dac))
wmos += [int(f.split("/")[-2]) for f in prfiles]
dacs += [dac for f in prfiles]
nwmos = len(wmos)
print("/ found: %i" % nwmos)
return (dacs, wmos)
def write_argodb(argo):
f = argodb_file
print("write %s " % f)
pd.to_pickle(argo, f)
def read_argodb():
d = argodb_dir
f = argodb_file
if os.path.exists(f):
print("read %s " % f)
argo = pd.read_pickle(f)
else:
if os.path.exists(d):
pass
else:
os.makedirs(d)
print("Creation of the empty argo database: %s" % f)
argo = | pd.DataFrame(columns=argodb_keys) | pandas.DataFrame |
Subsets and Splits