repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
ES-DOC/esdoc-jupyterhub
notebooks/noaa-gfdl/cmip6/models/sandbox-1/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'sandbox-1', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: NOAA-GFDL Source ID: SANDBOX-1 Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-20 15:02:35 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --> Flux Correction 3. Key Properties --> Genealogy 4. Key Properties --> Software Properties 5. Key Properties --> Coupling 6. Key Properties --> Tuning Applied 7. Key Properties --> Conservation --> Heat 8. Key Properties --> Conservation --> Fresh Water 9. Key Properties --> Conservation --> Salt 10. Key Properties --> Conservation --> Momentum 11. Radiative Forcings 12. Radiative Forcings --> Greenhouse Gases --> CO2 13. Radiative Forcings --> Greenhouse Gases --> CH4 14. Radiative Forcings --> Greenhouse Gases --> N2O 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3 17. Radiative Forcings --> Greenhouse Gases --> CFC 18. Radiative Forcings --> Aerosols --> SO4 19. Radiative Forcings --> Aerosols --> Black Carbon 20. Radiative Forcings --> Aerosols --> Organic Carbon 21. Radiative Forcings --> Aerosols --> Nitrate 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect 24. Radiative Forcings --> Aerosols --> Dust 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic 27. Radiative Forcings --> Aerosols --> Sea Salt 28. Radiative Forcings --> Other --> Land Use 29. Radiative Forcings --> Other --> Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --> Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --> Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE    Type: STRING    Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE    Type: STRING    Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE    Type: STRING    Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --> Software Properties Software properties of model 4.1. Repository Is Required: FALSE    Type: STRING    Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE    Type: STRING    Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE    Type: STRING    Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --> Coupling ** 5.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --> Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE    Type: STRING    Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE    Type: STRING    Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE    Type: STRING    Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE    Type: STRING    Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --> Conservation --> Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --> Conservation --> Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --> Conservation --> Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --> Conservation --> Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4 Methane forcing 13.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --> Aerosols --> SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --> Aerosols --> Dust Dust forcing 24.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE    Type: ENUM    Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE    Type: ENUM    Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE    Type: ENUM    Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE    Type: ENUM    Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --> Other --> Land Use Land use forcing 28.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --> Other --> Solar Solar forcing 29.1. Provision Is Required: TRUE    Type: ENUM    Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE    Type: STRING    Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/mohc/cmip6/models/sandbox-1/atmos.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-1', 'atmos') """ Explanation: ES-DOC CMIP6 Model Properties - Atmos MIP Era: CMIP6 Institute: MOHC Source ID: SANDBOX-1 Topic: Atmos Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. Properties: 156 (127 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:15 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --> Overview 2. Key Properties --> Resolution 3. Key Properties --> Timestepping 4. Key Properties --> Orography 5. Grid --> Discretisation 6. Grid --> Discretisation --> Horizontal 7. Grid --> Discretisation --> Vertical 8. Dynamical Core 9. Dynamical Core --> Top Boundary 10. Dynamical Core --> Lateral Boundary 11. Dynamical Core --> Diffusion Horizontal 12. Dynamical Core --> Advection Tracers 13. Dynamical Core --> Advection Momentum 14. Radiation 15. Radiation --> Shortwave Radiation 16. Radiation --> Shortwave GHG 17. Radiation --> Shortwave Cloud Ice 18. Radiation --> Shortwave Cloud Liquid 19. Radiation --> Shortwave Cloud Inhomogeneity 20. Radiation --> Shortwave Aerosols 21. Radiation --> Shortwave Gases 22. Radiation --> Longwave Radiation 23. Radiation --> Longwave GHG 24. Radiation --> Longwave Cloud Ice 25. Radiation --> Longwave Cloud Liquid 26. Radiation --> Longwave Cloud Inhomogeneity 27. Radiation --> Longwave Aerosols 28. Radiation --> Longwave Gases 29. Turbulence Convection 30. Turbulence Convection --> Boundary Layer Turbulence 31. Turbulence Convection --> Deep Convection 32. Turbulence Convection --> Shallow Convection 33. Microphysics Precipitation 34. Microphysics Precipitation --> Large Scale Precipitation 35. Microphysics Precipitation --> Large Scale Cloud Microphysics 36. Cloud Scheme 37. Cloud Scheme --> Optical Cloud Properties 38. Cloud Scheme --> Sub Grid Scale Water Distribution 39. Cloud Scheme --> Sub Grid Scale Ice Distribution 40. Observation Simulation 41. Observation Simulation --> Isscp Attributes 42. Observation Simulation --> Cosp Attributes 43. Observation Simulation --> Radar Inputs 44. Observation Simulation --> Lidar Inputs 45. Gravity Waves 46. Gravity Waves --> Orographic Gravity Waves 47. Gravity Waves --> Non Orographic Gravity Waves 48. Solar 49. Solar --> Solar Pathways 50. Solar --> Solar Constant 51. Solar --> Orbital Parameters 52. Solar --> Insolation Ozone 53. Volcanos 54. Volcanos --> Volcanoes Treatment 1. Key Properties --> Overview Top level key properties 1.1. Model Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview of atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "AGCM" # "ARCM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Type of atmospheric model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "primitive equations" # "non-hydrostatic" # "anelastic" # "Boussinesq" # "hydrostatic" # "quasi-hydrostatic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE    Type: ENUM    Cardinality: 1.N Basic approximations made in the atmosphere. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --> Resolution Characteristics of the model resolution 2.1. Horizontal Resolution Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Canonical Horizontal Resolution Is Required: TRUE    Type: STRING    Cardinality: 1.1 Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Range Horizontal Resolution Is Required: TRUE    Type: STRING    Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.4. Number Of Vertical Levels Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Number of vertical levels resolved on the computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.high_top') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 2.5. High Top Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --> Timestepping Characteristics of the atmosphere model time stepping 3.1. Timestep Dynamics Is Required: TRUE    Type: STRING    Cardinality: 1.1 Timestep for the dynamics, e.g. 30 min. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. Timestep Shortwave Radiative Transfer Is Required: FALSE    Type: STRING    Cardinality: 0.1 Timestep for the shortwave radiative transfer, e.g. 1.5 hours. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Timestep Longwave Radiative Transfer Is Required: FALSE    Type: STRING    Cardinality: 0.1 Timestep for the longwave radiative transfer, e.g. 3 hours. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "present day" # "modified" # TODO - please enter value(s) """ Explanation: 4. Key Properties --> Orography Characteristics of the model orography 4.1. Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Time adaptation of the orography. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.changes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "related to ice sheets" # "related to tectonics" # "modified mean" # "modified variance if taken into account in model (cf gravity waves)" # TODO - please enter value(s) """ Explanation: 4.2. Changes Is Required: TRUE    Type: ENUM    Cardinality: 1.N If the orography type is modified describe the time adaptation changes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Grid --> Discretisation Atmosphere grid discretisation 5.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of grid discretisation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "spectral" # "fixed grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Grid --> Discretisation --> Horizontal Atmosphere discretisation in the horizontal 6.1. Scheme Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Horizontal discretisation type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "finite elements" # "finite volumes" # "finite difference" # "centered finite difference" # TODO - please enter value(s) """ Explanation: 6.2. Scheme Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Horizontal discretisation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "second" # "third" # "fourth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.3. Scheme Order Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Horizontal discretisation function order End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "filter" # "pole rotation" # "artificial island" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.4. Horizontal Pole Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Horizontal discretisation pole singularity treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Gaussian" # "Latitude-Longitude" # "Cubed-Sphere" # "Icosahedral" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.5. Grid Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "isobaric" # "sigma" # "hybrid sigma-pressure" # "hybrid pressure" # "vertically lagrangian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7. Grid --> Discretisation --> Vertical Atmosphere discretisation in the vertical 7.1. Coordinate Type Is Required: TRUE    Type: ENUM    Cardinality: 1.N Type of vertical coordinate system End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Dynamical Core Characteristics of the dynamical core 8.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of atmosphere dynamical core End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name for the dynamical core of the model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Adams-Bashforth" # "explicit" # "implicit" # "semi-implicit" # "leap frog" # "multi-step" # "Runge Kutta fifth order" # "Runge Kutta second order" # "Runge Kutta third order" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Timestepping Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Timestepping framework type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "surface pressure" # "wind components" # "divergence/curl" # "temperature" # "potential temperature" # "total water" # "water vapour" # "water liquid" # "water ice" # "total water moments" # "clouds" # "radiation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.4. Prognostic Variables Is Required: TRUE    Type: ENUM    Cardinality: 1.N List of the model prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Dynamical Core --> Top Boundary Type of boundary layer at the top of the model 9.1. Top Boundary Condition Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Top boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Top Heat Is Required: TRUE    Type: STRING    Cardinality: 1.1 Top boundary heat treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Top Wind Is Required: TRUE    Type: STRING    Cardinality: 1.1 Top boundary wind treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Dynamical Core --> Lateral Boundary Type of lateral boundary condition (if the model is a regional model) 10.1. Condition Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Type of lateral boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Dynamical Core --> Diffusion Horizontal Horizontal diffusion scheme 11.1. Scheme Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Horizontal diffusion scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "iterated Laplacian" # "bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Scheme Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Horizontal diffusion scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heun" # "Roe and VanLeer" # "Roe and Superbee" # "Prather" # "UTOPIA" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Dynamical Core --> Advection Tracers Tracer advection scheme 12.1. Scheme Name Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Tracer advection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Eulerian" # "modified Euler" # "Lagrangian" # "semi-Lagrangian" # "cubic semi-Lagrangian" # "quintic semi-Lagrangian" # "mass-conserving" # "finite volume" # "flux-corrected" # "linear" # "quadratic" # "quartic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Scheme Characteristics Is Required: TRUE    Type: ENUM    Cardinality: 1.N Tracer advection scheme characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "dry mass" # "tracer mass" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.3. Conserved Quantities Is Required: TRUE    Type: ENUM    Cardinality: 1.N Tracer advection scheme conserved quantities End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Priestley algorithm" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.4. Conservation Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Tracer advection scheme conservation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "VanLeer" # "Janjic" # "SUPG (Streamline Upwind Petrov-Galerkin)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamical Core --> Advection Momentum Momentum advection scheme 13.1. Scheme Name Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Momentum advection schemes name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "2nd order" # "4th order" # "cell-centred" # "staggered grid" # "semi-staggered grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Scheme Characteristics Is Required: TRUE    Type: ENUM    Cardinality: 1.N Momentum advection scheme characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa D-grid" # "Arakawa E-grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Scheme Staggering Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Momentum advection scheme staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Angular momentum" # "Horizontal momentum" # "Enstrophy" # "Mass" # "Total energy" # "Vorticity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Conserved Quantities Is Required: TRUE    Type: ENUM    Cardinality: 1.N Momentum advection scheme conserved quantities End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Conservation Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Momentum advection scheme conservation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.aerosols') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "sulphate" # "nitrate" # "sea salt" # "dust" # "ice" # "organic" # "BC (black carbon / soot)" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "polar stratospheric ice" # "NAT (nitric acid trihydrate)" # "NAD (nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particle)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiation Characteristics of the atmosphere radiation process 14.1. Aerosols Is Required: TRUE    Type: ENUM    Cardinality: 1.N Aerosols whose radiative effect is taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Radiation --> Shortwave Radiation Properties of the shortwave radiation scheme 15.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of shortwave radiation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name for the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Spectral Integration Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Shortwave radiation scheme spectral integration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.4. Transport Calculation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Shortwave radiation transport calculation methods End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.5. Spectral Intervals Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Shortwave radiation scheme number of spectral intervals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiation --> Shortwave GHG Representation of greenhouse gases in the shortwave radiation scheme 16.1. Greenhouse Gas Complexity Is Required: TRUE    Type: ENUM    Cardinality: 1.N Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.2. ODS Is Required: FALSE    Type: ENUM    Cardinality: 0.N Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Other Flourinated Gases Is Required: FALSE    Type: ENUM    Cardinality: 0.N Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiation --> Shortwave Cloud Ice Shortwave radiative properties of ice crystals in clouds 17.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General shortwave radiative interactions with cloud ice crystals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Physical Representation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical representation of cloud ice crystals in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.3. Optical Methods Is Required: TRUE    Type: ENUM    Cardinality: 1.N Optical methods applicable to cloud ice crystals in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiation --> Shortwave Cloud Liquid Shortwave radiative properties of liquid droplets in clouds 18.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General shortwave radiative interactions with cloud liquid droplets End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.2. Physical Representation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical representation of cloud liquid droplets in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.3. Optical Methods Is Required: TRUE    Type: ENUM    Cardinality: 1.N Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity Cloud inhomogeneity in the shortwave radiation scheme 19.1. Cloud Inhomogeneity Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Method for taking into account horizontal cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiation --> Shortwave Aerosols Shortwave radiative properties of aerosols 20.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General shortwave radiative interactions with aerosols End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20.2. Physical Representation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical representation of aerosols in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20.3. Optical Methods Is Required: TRUE    Type: ENUM    Cardinality: 1.N Optical methods applicable to aerosols in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiation --> Shortwave Gases Shortwave radiative properties of gases 21.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General shortwave radiative interactions with gases End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22. Radiation --> Longwave Radiation Properties of the longwave radiation scheme 22.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of longwave radiation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name for the longwave radiation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Spectral Integration Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Longwave radiation scheme spectral integration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.4. Transport Calculation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Longwave radiation transport calculation methods End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 22.5. Spectral Intervals Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Longwave radiation scheme number of spectral intervals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiation --> Longwave GHG Representation of greenhouse gases in the longwave radiation scheme 23.1. Greenhouse Gas Complexity Is Required: TRUE    Type: ENUM    Cardinality: 1.N Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. ODS Is Required: FALSE    Type: ENUM    Cardinality: 0.N Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.3. Other Flourinated Gases Is Required: FALSE    Type: ENUM    Cardinality: 0.N Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiation --> Longwave Cloud Ice Longwave radiative properties of ice crystals in clouds 24.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General longwave radiative interactions with cloud ice crystals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24.2. Physical Reprenstation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical representation of cloud ice crystals in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24.3. Optical Methods Is Required: TRUE    Type: ENUM    Cardinality: 1.N Optical methods applicable to cloud ice crystals in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiation --> Longwave Cloud Liquid Longwave radiative properties of liquid droplets in clouds 25.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General longwave radiative interactions with cloud liquid droplets End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Physical Representation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical representation of cloud liquid droplets in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Optical Methods Is Required: TRUE    Type: ENUM    Cardinality: 1.N Optical methods applicable to cloud liquid droplets in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity Cloud inhomogeneity in the longwave radiation scheme 26.1. Cloud Inhomogeneity Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Method for taking into account horizontal cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiation --> Longwave Aerosols Longwave radiative properties of aerosols 27.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General longwave radiative interactions with aerosols End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27.2. Physical Representation Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical representation of aerosols in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27.3. Optical Methods Is Required: TRUE    Type: ENUM    Cardinality: 1.N Optical methods applicable to aerosols in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiation --> Longwave Gases Longwave radiative properties of gases 28.1. General Interactions Is Required: TRUE    Type: ENUM    Cardinality: 1.N General longwave radiative interactions with gases End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29. Turbulence Convection Atmosphere Convective Turbulence and Clouds 29.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of atmosphere convection and turbulence End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Mellor-Yamada" # "Holtslag-Boville" # "EDMF" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence Properties of the boundary layer turbulence scheme 30.1. Scheme Name Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Boundary layer turbulence scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "TKE prognostic" # "TKE diagnostic" # "TKE coupled with water" # "vertical profile of Kz" # "non-local diffusion" # "Monin-Obukhov similarity" # "Coastal Buddy Scheme" # "Coupled with convection" # "Coupled with gravity waves" # "Depth capped at cloud base" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30.2. Scheme Type Is Required: TRUE    Type: ENUM    Cardinality: 1.N Boundary layer turbulence scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Closure Order Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Boundary layer turbulence scheme closure order End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 30.4. Counter Gradient Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Uses boundary layer turbulence scheme counter gradient End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31. Turbulence Convection --> Deep Convection Properties of the deep convection scheme 31.1. Scheme Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Deep convection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "adjustment" # "plume ensemble" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.2. Scheme Type Is Required: TRUE    Type: ENUM    Cardinality: 1.N Deep convection scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CAPE" # "bulk" # "ensemble" # "CAPE/WFN based" # "TKE/CIN based" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.3. Scheme Method Is Required: TRUE    Type: ENUM    Cardinality: 1.N Deep convection scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vertical momentum transport" # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "updrafts" # "downdrafts" # "radiative effect of anvils" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.4. Processes Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical processes taken into account in the parameterisation of deep convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.5. Microphysics Is Required: FALSE    Type: ENUM    Cardinality: 0.N Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32. Turbulence Convection --> Shallow Convection Properties of the shallow convection scheme 32.1. Scheme Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Shallow convection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "cumulus-capped boundary layer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.2. Scheme Type Is Required: TRUE    Type: ENUM    Cardinality: 1.N shallow convection scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "same as deep (unified)" # "included in boundary layer turbulence" # "separate diagnosis" # TODO - please enter value(s) """ Explanation: 32.3. Scheme Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 shallow convection scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.4. Processes Is Required: TRUE    Type: ENUM    Cardinality: 1.N Physical processes taken into account in the parameterisation of shallow convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.5. Microphysics Is Required: FALSE    Type: ENUM    Cardinality: 0.N Microphysics scheme for shallow convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33. Microphysics Precipitation Large Scale Cloud Microphysics and Precipitation 33.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of large scale cloud microphysics and precipitation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation Properties of the large scale precipitation scheme 34.1. Scheme Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name of the large scale precipitation parameterisation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "liquid rain" # "snow" # "hail" # "graupel" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34.2. Hydrometeors Is Required: TRUE    Type: ENUM    Cardinality: 1.N Precipitating hydrometeors taken into account in the large scale precipitation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics Properties of the large scale cloud microphysics scheme 35.1. Scheme Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name of the microphysics parameterisation scheme used for large scale clouds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mixed phase" # "cloud droplets" # "cloud ice" # "ice nucleation" # "water vapour deposition" # "effect of raindrops" # "effect of snow" # "effect of graupel" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Processes Is Required: TRUE    Type: ENUM    Cardinality: 1.N Large scale cloud microphysics processes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Cloud Scheme Characteristics of the cloud scheme 36.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of the atmosphere cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.2. Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name for the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "atmosphere_radiation" # "atmosphere_microphysics_precipitation" # "atmosphere_turbulence_convection" # "atmosphere_gravity_waves" # "atmosphere_solar" # "atmosphere_volcano" # "atmosphere_cloud_simulator" # TODO - please enter value(s) """ Explanation: 36.3. Atmos Coupling Is Required: FALSE    Type: ENUM    Cardinality: 0.N Atmosphere components that are linked to the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.4. Uses Separate Treatment Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "entrainment" # "detrainment" # "bulk cloud" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.5. Processes Is Required: TRUE    Type: ENUM    Cardinality: 1.N Processes included in the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.6. Prognostic Scheme Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is the cloud scheme a prognostic scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.7. Diagnostic Scheme Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is the cloud scheme a diagnostic scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud amount" # "liquid" # "ice" # "rain" # "snow" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.8. Prognostic Variables Is Required: FALSE    Type: ENUM    Cardinality: 0.N List the prognostic variables used by the cloud scheme, if applicable. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "random" # "maximum" # "maximum-random" # "exponential" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 37. Cloud Scheme --> Optical Cloud Properties Optical cloud properties 37.1. Cloud Overlap Method Is Required: FALSE    Type: ENUM    Cardinality: 0.1 Method for taking into account overlapping of cloud layers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Cloud Inhomogeneity Is Required: FALSE    Type: STRING    Cardinality: 0.1 Method for taking into account cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) """ Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution Sub-grid scale water distribution 38.1. Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Sub-grid scale water distribution type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 38.2. Function Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 Sub-grid scale water distribution function name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 38.3. Function Order Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Sub-grid scale water distribution function type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) """ Explanation: 38.4. Convection Coupling Is Required: TRUE    Type: ENUM    Cardinality: 1.N Sub-grid scale water distribution coupling with convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) """ Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution Sub-grid scale ice distribution 39.1. Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Sub-grid scale ice distribution type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 39.2. Function Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 Sub-grid scale ice distribution function name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 39.3. Function Order Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Sub-grid scale ice distribution function type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) """ Explanation: 39.4. Convection Coupling Is Required: TRUE    Type: ENUM    Cardinality: 1.N Sub-grid scale ice distribution coupling with convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40. Observation Simulation Characteristics of observation simulation 40.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of observation simulator characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "no adjustment" # "IR brightness" # "visible optical depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Observation Simulation --> Isscp Attributes ISSCP Characteristics 41.1. Top Height Estimation Method Is Required: TRUE    Type: ENUM    Cardinality: 1.N Cloud simulator ISSCP top height estimation methodUo End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "lowest altitude level" # "highest altitude level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. Top Height Direction Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Cloud simulator ISSCP top height direction End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Inline" # "Offline" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 42. Observation Simulation --> Cosp Attributes CFMIP Observational Simulator Package attributes 42.1. Run Configuration Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Cloud simulator COSP run configuration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.2. Number Of Grid Points Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Cloud simulator COSP number of grid points End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.3. Number Of Sub Columns Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.4. Number Of Levels Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Cloud simulator COSP number of levels End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 43. Observation Simulation --> Radar Inputs Characteristics of the cloud radar simulator 43.1. Frequency Is Required: TRUE    Type: FLOAT    Cardinality: 1.1 Cloud simulator radar frequency (Hz) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "surface" # "space borne" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 43.2. Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Cloud simulator radar type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 43.3. Gas Absorption Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Cloud simulator radar uses gas absorption End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 43.4. Effective Radius Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Cloud simulator radar uses effective radius End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "ice spheres" # "ice non-spherical" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 44. Observation Simulation --> Lidar Inputs Characteristics of the cloud lidar simulator 44.1. Ice Types Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Cloud simulator lidar ice type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "max" # "random" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 44.2. Overlap Is Required: TRUE    Type: ENUM    Cardinality: 1.N Cloud simulator lidar overlap End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 45. Gravity Waves Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources. 45.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of gravity wave parameterisation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Rayleigh friction" # "Diffusive sponge layer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.2. Sponge Layer Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Sponge layer in the upper levels in order to avoid gravity wave reflection at the top. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "continuous spectrum" # "discrete spectrum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.3. Background Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Background wave distribution End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "effect on drag" # "effect on lifting" # "enhanced topography" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.4. Subgrid Scale Orography Is Required: TRUE    Type: ENUM    Cardinality: 1.N Subgrid scale orography effects taken into account. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 46. Gravity Waves --> Orographic Gravity Waves Gravity waves generated due to the presence of orography 46.1. Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name for the orographic gravity wave scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "linear mountain waves" # "hydraulic jump" # "envelope orography" # "low level flow blocking" # "statistical sub-grid scale variance" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.2. Source Mechanisms Is Required: TRUE    Type: ENUM    Cardinality: 1.N Orographic gravity wave source mechanisms End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "non-linear calculation" # "more than two cardinal directions" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.3. Calculation Method Is Required: TRUE    Type: ENUM    Cardinality: 1.N Orographic gravity wave calculation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "includes boundary layer ducting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.4. Propagation Scheme Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Orographic gravity wave propogation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.5. Dissipation Scheme Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Orographic gravity wave dissipation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves Gravity waves generated by non-orographic processes. 47.1. Name Is Required: FALSE    Type: STRING    Cardinality: 0.1 Commonly used name for the non-orographic gravity wave scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convection" # "precipitation" # "background spectrum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.2. Source Mechanisms Is Required: TRUE    Type: ENUM    Cardinality: 1.N Non-orographic gravity wave source mechanisms End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "spatially dependent" # "temporally dependent" # TODO - please enter value(s) """ Explanation: 47.3. Calculation Method Is Required: TRUE    Type: ENUM    Cardinality: 1.N Non-orographic gravity wave calculation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.4. Propagation Scheme Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Non-orographic gravity wave propogation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.5. Dissipation Scheme Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Non-orographic gravity wave dissipation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 48. Solar Top of atmosphere solar insolation characteristics 48.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of solar insolation of the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SW radiation" # "precipitating energetic particles" # "cosmic rays" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 49. Solar --> Solar Pathways Pathways for solar forcing of the atmosphere 49.1. Pathways Is Required: TRUE    Type: ENUM    Cardinality: 1.N Pathways for the solar forcing of the atmosphere model domain End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) """ Explanation: 50. Solar --> Solar Constant Solar constant and top of atmosphere insolation characteristics 50.1. Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Time adaptation of the solar constant. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 50.2. Fixed Value Is Required: FALSE    Type: FLOAT    Cardinality: 0.1 If the solar constant is fixed, enter the value of the solar constant (W m-2). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 50.3. Transient Characteristics Is Required: TRUE    Type: STRING    Cardinality: 1.1 solar constant transient characteristics (W m-2) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) """ Explanation: 51. Solar --> Orbital Parameters Orbital parameters and top of atmosphere insolation characteristics 51.1. Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Time adaptation of orbital parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 51.2. Fixed Reference Date Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Reference date for fixed orbital parameters (yyyy) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 51.3. Transient Method Is Required: TRUE    Type: STRING    Cardinality: 1.1 Description of transient orbital parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Berger 1978" # "Laskar 2004" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 51.4. Computation Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Method used for computing orbital parameters. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 52. Solar --> Insolation Ozone Impact of solar insolation on stratospheric ozone 52.1. Solar Ozone Impact Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Does top of atmosphere insolation impact on stratospheric ozone? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 53. Volcanos Characteristics of the implementation of volcanoes 53.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview description of the implementation of volcanic effects in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "high frequency solar constant anomaly" # "stratospheric aerosols optical thickness" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 54. Volcanos --> Volcanoes Treatment Treatment of volcanoes in the atmosphere 54.1. Volcanoes Implementation Is Required: TRUE    Type: ENUM    Cardinality: 1.1 How volcanic effects are modeled in the atmosphere. End of explanation """
HumanCompatibleAI/imitation
examples/3_train_gail.ipynb
mit
from stable_baselines3 import PPO from stable_baselines3.ppo import MlpPolicy import gym import seals env = gym.make("seals/CartPole-v0") expert = PPO( policy=MlpPolicy, env=env, seed=0, batch_size=64, ent_coef=0.0, learning_rate=0.0003, n_epochs=10, n_steps=64, ) expert.learn(1000) # Note: set to 100000 to train a proficient expert """ Explanation: Train an Agent using Generative Adversarial Imitation Learning The idea of generative adversarial imitation learning is to train a discriminator network to distinguish between expert trajectories and learner trajectories. The learner is trained using a traditional reinforcement learning algorithm such as PPO and is rewarded for trajectories that make the discriminator think that it was an expert trajectory. As usual, we first need an expert. Note that we now use a variant of the CartPole environment from the seals package, which has fixed episode durations. Read more about why we do this here. End of explanation """ from imitation.data import rollout from imitation.data.wrappers import RolloutInfoWrapper from stable_baselines3.common.vec_env import DummyVecEnv rollouts = rollout.rollout( expert, DummyVecEnv([lambda: RolloutInfoWrapper(gym.make("seals/CartPole-v0"))] * 5), rollout.make_sample_until(min_timesteps=None, min_episodes=60), ) """ Explanation: We generate some expert trajectories, that the discriminator needs to distinguish from the learner's trajectories. End of explanation """ from imitation.algorithms.adversarial.gail import GAIL from imitation.rewards.reward_nets import BasicRewardNet from imitation.util.networks import RunningNorm from stable_baselines3 import PPO from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv import gym import seals venv = DummyVecEnv([lambda: gym.make("seals/CartPole-v0")] * 8) learner = PPO( env=venv, policy=MlpPolicy, batch_size=64, ent_coef=0.0, learning_rate=0.0003, n_epochs=10, ) reward_net = BasicRewardNet( venv.observation_space, venv.action_space, normalize_input_layer=RunningNorm ) gail_trainer = GAIL( demonstrations=rollouts, demo_batch_size=1024, gen_replay_buffer_capacity=2048, n_disc_updates_per_round=4, venv=venv, gen_algo=learner, reward_net=reward_net, ) learner_rewards_before_training, _ = evaluate_policy( learner, venv, 100, return_episode_rewards=True ) gail_trainer.train(20000) # Note: set to 300000 for better results learner_rewards_after_training, _ = evaluate_policy( learner, venv, 100, return_episode_rewards=True ) """ Explanation: Now we are ready to set up our GAIL trainer. Note, that the reward_net is actually the network of the discriminator. We evaluate the learner before and after training so we can see if it made any progress. End of explanation """ import matplotlib.pyplot as plt import numpy as np print(np.mean(learner_rewards_after_training)) print(np.mean(learner_rewards_before_training)) plt.hist( [learner_rewards_before_training, learner_rewards_after_training], label=["untrained", "trained"], ) plt.legend() plt.show() """ Explanation: When we look at the histograms of rewards before and after learning, we can see that the learner is not perfect yet, but it made some progress at least. If not, just re-run the above cell. End of explanation """
tipsybear/actors-simulation
notebooks/distributions.ipynb
mit
%matplotlib inline from gvas.viz import * from gvas.dynamo import Uniform, Normal from gvas.dynamo import Stream """ Explanation: Distribution Analysis This notebook visualizes the distribution dynamos that subclass gvas.dynamo.Distribution. This is partly to have a debug of the distribution, but also to provide an entry point to vizualizations and simulation analysis later on. End of explanation """ Uniform(0, 100).plot(n=100000, context='paper') """ Explanation: Uniform Distribution End of explanation """ Normal(0, 12).plot(n=100000, context='paper') """ Explanation: Normal Distribution End of explanation """ Stream(100, 24, 10, 0.015, 15).plot(n=200, context='paper') """ Explanation: Streaming Data Simulates the flow of streaming data. End of explanation """
axm108/CPWResonator
notebooks/cpw_resonator_plots.ipynb
mit
mCPW = CPWResonator(length = [7153E-6], conductorWidth = [30E-3], gapWidth = [19E-3], conductorThickness = [1E-3], resonatorType = 'quarter', conductorMaterial = 'Copper', substrateMaterial = 'ArlonAD1000', temperature = [3], couplingCapacitance = [10E-15], loadBoundaryCondition = 'Short', mode = [3]) mCPW.characteristicImpedance() """ Explanation: Copper PCB CPW End of explanation """ default = {'length': [7153E-6], 'conductorWidth': [20E-6], 'gapWidth': [10E-6], 'conductorThickness': [100E-9], 'resonatorType': 'quarter', 'conductorMaterial': 'Niobium Nitride', 'substrateMaterial': 'Silicon', 'temperature': [3], 'couplingCapacitance': [10E-15], 'loadBoundaryCondition': 'Short', 'mode': [1]} """ Explanation: Default values End of explanation """ minLen = 5E-3 maxLen = 20E-3 length = np.linspace(minLen, maxLen, 101) mCPW = CPWResonator(length = length, conductorWidth = default['conductorWidth'], gapWidth = default['gapWidth'], conductorThickness = default['conductorThickness'], resonatorType = default['resonatorType'], conductorMaterial = default['conductorMaterial'], substrateMaterial = default['substrateMaterial'], temperature = default['temperature'], couplingCapacitance = default['couplingCapacitance'], loadBoundaryCondition = default['loadBoundaryCondition'], mode = default['mode']) plt.figure(figsize=(7,5)) plt.gca().tick_params(direction='in') plt.plot(length*10**3, mCPW.uncoupledResonantFrequency() / 10**9, '-', color='tab:blue', lw=2) plt.xlabel('Length (mm)') plt.ylabel('Uncoupled resonant frequency, $f_0$ (GHz)') #plt.savefig('length-frequency.pdf') """ Explanation: Multi-value parameters Uncoupled fundamental resonant frequency as a function of resonator length End of explanation """ minTemp = 0 maxTemp = 15 temperature = np.linspace(minTemp, maxTemp,101) mCPW = CPWResonator(length = default['length'], conductorWidth = default['conductorWidth'], gapWidth = default['gapWidth'], conductorThickness = default['conductorThickness'], resonatorType = default['resonatorType'], conductorMaterial = default['conductorMaterial'], substrateMaterial = default['substrateMaterial'], temperature = temperature, couplingCapacitance = default['couplingCapacitance'], loadBoundaryCondition = default['loadBoundaryCondition'], mode = default['mode']) plt.figure(figsize=(5,3)) plt.gca().tick_params(direction='in') norm_freq = (mCPW.uncoupledResonantFrequency() / np.max(mCPW.uncoupledResonantFrequency())) abs_freq = mCPW.uncoupledResonantFrequency() / 10**9 plt.plot(temperature, norm_freq, '-', color='tab:blue', lw=2) plt.xlabel('Temperature, $T$ (K)') plt.ylabel('Normalised resonant frequency') #plt.savefig('temperature-frequency.pdf') """ Explanation: Uncoupled fundamental resonant frequency as a function of temperature End of explanation """ minCap = 0.1E-15 maxCap = 30E-15 couplingCapacitance = np.linspace(minCap, maxCap, 1001) mCPW = CPWResonator(length = default['length'], conductorWidth = default['conductorWidth'], gapWidth = default['gapWidth'], conductorThickness = default['conductorThickness'], resonatorType = default['resonatorType'], conductorMaterial = default['conductorMaterial'], substrateMaterial = default['substrateMaterial'], temperature = default['temperature'], couplingCapacitance = couplingCapacitance, loadBoundaryCondition = default['loadBoundaryCondition'], mode = default['mode']) plt.figure(figsize=(4,4)) plt.gca().tick_params(direction='in') plt.plot(couplingCapacitance* 10**15, mCPW.coupledResonantFrequency() / 10**9, color='tab:blue', lw=2) plt.xlabel('Coupling capacitance, $C_{\kappa}$ (fF)') plt.ylabel('Coupled resonant frequency, $f_n$ (GHz)') #plt.savefig('couplingCapacitance-frequency.pdf') """ Explanation: Coupled resonant frequency as a function of coupling capacitance End of explanation """ minCap = 0.1E-15 maxCap = 30E-15 couplingCapacitance = np.linspace(minCap, maxCap, 1001) mCPW = CPWResonator(length = default['length'], conductorWidth = default['conductorWidth'], gapWidth = default['gapWidth'], conductorThickness = default['conductorThickness'], resonatorType = default['resonatorType'], conductorMaterial = default['conductorMaterial'], substrateMaterial = default['substrateMaterial'], temperature = default['temperature'], couplingCapacitance = couplingCapacitance, loadBoundaryCondition = default['loadBoundaryCondition'], mode = default['mode']) fig, ax1 = plt.subplots(figsize=(5,3)) fig.gca().tick_params(direction='in') n = np.size(couplingCapacitance) internalQualityFactor = [mCPW.internalQualityFactor()]*n externalQualityFactor = mCPW.externalQualityFactor() ax2 = ax1.twinx() ax2.plot(couplingCapacitance* 10**15, internalQualityFactor, '--', label="$Q_{int}$", alpha=0.6, color='tab:green') ax2.plot(couplingCapacitance* 10**15, mCPW.externalQualityFactor(), '--', label="$Q_{ext}$", alpha=0.6, color='tab:green') ax1.plot(couplingCapacitance* 10**15, np.zeros_like(internalQualityFactor), '--', label="", alpha=0.6, color='tab:blue') #plt.legend() ax1.plot(couplingCapacitance* 10**15, mCPW.insertionLoss(), '-', label="Insertion loss", color='tab:blue') ax2.plot(couplingCapacitance* 10**15, mCPW.loadedQualityFactor(), '-', label="$Q_{L}$", color='tab:green') plt.yscale('log') plt.gca().tick_params(direction='in') plt.xscale('log') plt.gca().tick_params(direction='in') ax1.set_xlabel('Coupling capacitance, $C_{\kappa}$ (fF)') ax1.set_ylabel('Insertion loss (dB)', color='tab:blue') ax2.set_ylabel('Loaded quality factor, $Q_{L}$', color='tab:green') plt.xlim([0.1,30]) plt.ylim([0,10**6]) #plt.savefig('couplingCapacitance-insertionLoss-qualityFactor.pdf') """ Explanation: Loaded quality factor and insertion loss as a function of coupling capacitance End of explanation """ mCPW = CPWResonator(length = default['length'], conductorWidth = default['conductorWidth'], gapWidth = default['gapWidth'], conductorThickness = default['conductorThickness'], resonatorType = default['resonatorType'], conductorMaterial = default['conductorMaterial'], substrateMaterial = default['substrateMaterial'], temperature = default['temperature'], couplingCapacitance = default['couplingCapacitance'], loadBoundaryCondition = default['loadBoundaryCondition'], mode = [1,2,3,4,5]) plt.subplots(figsize=(6,3)) plt.gca().tick_params(direction='in') plt.plot(mCPW.uncoupledResonantFrequency()/10**9, mCPW.externalQualityFactor(method=0), 'bo', label="Main") #plt.plot(mCPW.uncoupledResonantFrequency()/10**9, mCPW.externalQualityFactor(method=1), 'yo', label="Approx") #plt.plot(mCPW.uncoupledResonantFrequency()/10**9, mCPW.externalQualityFactor(method=2), 'go', label="QW Ref") plt.xlabel('Coupled resonant frequency, $f_n$ (GHz)') plt.ylabel('External quality factor, $Q_{ext}$') #plt.legend() #plt.savefig('frequency-qualityFactor.pdf') """ Explanation: External quality factor as a function of harmonic mode End of explanation """
dirmeier/dataframe
examples/examples_for_data_frame.ipynb
gpl-3.0
from dataframe import DataFrame from dataframe import GroupedDataFrame """ Explanation: DataFrame tutorial This is a short tutorial with examples for the dataframe library. Creating a DataFrame object If we want to use dataframe, we first import the two central classes: End of explanation """ from sklearn import datasets import re iris_data = datasets.load_iris() """ Explanation: For demonstration purposes we also include some datasets (and regex for parsing): End of explanation """ features = [re.sub("\s|cm|\(|\)", "", x) for x in iris_data.feature_names] print(features) data = {features[i]: iris_data.data[:,i] for i in range(len(iris_data.data[1,:]))} """ Explanation: This will load all the data from sklearn. In particular we use the iris dataset, which goes back to Ronald Fisher I think. From the iris dataset, we take the feature names and covariables for each feature and put it into a dictionary. End of explanation """ data["target"] = iris_data.target """ Explanation: We also add the species of each sample: End of explanation """ frame = DataFrame(**data) """ Explanation: Now we can take the dictionary to create a DataFrame object by using: End of explanation """ frame_expl = DataFrame(sepallength=iris_data.data[:,0], sepalwidth=iris_data.data[:,1], petallength=iris_data.data[:,2], petalwidth=iris_data.data[:,3], target=iris_data.target) """ Explanation: Notice that we use the **kwargs syntax to give keyword arguments to the constructor. Alternatively you can just call the constructor like this: End of explanation """ print("Frame kwargs:") print(frame) print("Frame verbose:") print(frame_expl) """ Explanation: The results are the same, only that the second approach is more verbose and we have to enter the arguments manually. End of explanation """ sub_frame = frame.subset("target") print(sub_frame) """ Explanation: Note that upon instantiation the column names are sorted alphabetically. Using the DataFrame class Basically DataFrame has four nice features. We will use them one after another. Subsetting DataFrame columns subset lets you select some columns from the original DataFrame and returns a new DataFrame object: End of explanation """ from dataframe import Callable import numpy class Mean(Callable): def __call__(self, *args): vals = args[0].values return numpy.mean(vals) """ Explanation: Aggregating DataFrame columns aggregate takes one or multiple columns and computes an aggregation function. With the aggregated values a new DataFrame object is returned. Beware that your aggregation function returns a scalar, e.g. a float. First we need to write a class that extends Callable and that overwrites __call__. Some basic functions are already implemented. For the sake of illustration let's write a class that calculates the mean of a list: End of explanation """ print(frame) agg_frame = frame.aggregate(Mean, "mean", "petallength") print(agg_frame) """ Explanation: Now you can aggregate the frame like this: End of explanation """ print(len(frame["target"].values)) """ Explanation: Note that all other columns are discarded here, because the DataFrame cannot know what you want to do with them. Modifying DataFrame columns Similar to aggregate, we can modify several columns, too. To do that, we again have to write a class extending Callable. Beware that unlike in aggregation, modification requires to give a list of the same size as your original column length, i.e. your class has to return a list and not a scalar. For example: End of explanation """ import scipy.stats as sps class Zscore(Callable): def __call__(self, *args): vals = args[0].values return sps.zscore(vals).tolist() mod_frame = frame.modify(Zscore, "zscore", "petallength") print(mod_frame) """ Explanation: So if we call modify on a column in our frame the result has to be of length 150. As an example let's standardize the column pentallength. End of explanation """ grouped_frame = frame.group("target") print(grouped_frame) """ Explanation: I noticed that scipy calculates other values than when I standardize using R. Maybe you have the same issue. Grouping the DataFrame Using group creates a new object from your DataFrame that puts single rows into groups, creating a GroupedDataFrame object. End of explanation """ sub_grouped_frame = grouped_frame.subset("petallength", "target") print(sub_grouped_frame) """ Explanation: In the table to the top, we created several groups. Visually you can distinguish a DataFrame from a GroupedDataFrame by the dashes when printing. We'll discuss using the GroupedDataFrame class in the next section. Using the GroupedDataFrame class Basically GroupedDataFrame has the same features as DataFrame since both inherit from the same superclass ADataFrame. So the routines do the same things, only on every group and not on the whole DataFrame object. We start out with a plain DataFrame and work through all the important methods. Since it is the same methods as in DataFrame I just show some examples. Subsetting GroupedDataFrame columns End of explanation """ agg_grouped_frame = grouped_frame.aggregate(Mean, "mean", "petalwidth") print(agg_grouped_frame) """ Explanation: Aggregating GroupedDataFrame columns End of explanation """ mod_grouped_frame = grouped_frame.modify(Zscore, "zscore", "petallength") print(mod_grouped_frame) """ Explanation: Modifying GroupedDataFrame columns End of explanation """ twice_grouped_frame = grouped_frame.group("petallength") print(twice_grouped_frame) """ Explanation: Grouping GroupedDataFrame columns End of explanation """ print(frame) """ Explanation: Piping One of the many great features of the unix-commandline is method piping. For example bash grep -i "^daemon" /etc/passwd | sed 's/:/ /g' | cut -f1 -d' ' | tr -s 'dae' 'si' (This is rather inefficient, but for the sake of demostration it works). In order for python to support this, we overloaded the >> operator such that instead of calling python frame.method(*args) you can alternatively call a method like this now python method(frame, *args) This sofar only works for the four main methods for dataframes (subset, ...). In the following are a few examples. Using the pipe operator We start with the frame we initialized earlier: End of explanation """ from dataframe import group, modify, subset, aggregate obj = frame >> subset("target") print(obj) """ Explanation: >> is implemented for the four dataframe methods group, subset, aggregate and modify. Let's first just subset the frame. End of explanation """ obj = subset(frame, "target") print(obj) """ Explanation: Or you can directly put it into the method. End of explanation """ obj = frame >> \ group("target") >> \ aggregate(Mean, "m", "sepallength") print(obj) """ Explanation: Of course we can chain multiple times, too. Here we first group the data by the target column and the aggregate the groups using the mean: End of explanation """ obj = frame >> \ group("target") >> \ modify(Zscore, "zs", "petalwidth") print(obj) """ Explanation: Group the data again and then modify it by taking Z-scores: End of explanation """ obj = frame >> \ subset("target", "petalwidth") >> \ group("target") >> \ modify(Zscore, "zs", "petalwidth") >> \ aggregate(Mean, "m", "zs") print(obj) """ Explanation: Finally a last example using all the methods: End of explanation """
Adamage/python-training
Lesson_02_classes_object_oriented.ipynb
apache-2.0
class Example: a = 1 print type(Example) """ Explanation: Python Training - Lession 2 - classes in Object Oriented Programming In Python, pretty much every variable is an object, and therefore an instance of some class. But what is a class? A first, basic understanding of a class should be: A data structure with named variables and procedures. At this stage of programming, the simpler we keep things, the better. Let's see how we can define a class. Simple class definition End of explanation """ object_from_class = Example() print object_from_class """ Explanation: Creating objects - instances of a class End of explanation """ object_from_class.a """ Explanation: Accessing objects 'fields' End of explanation """ class ClassI: # Define instance variables in a special method, called a "constructor", that defines what happens when an object is created. def __init__(self): self.a = 1 self.b = 2 class ClassC: # Define class variables normally. They are here, whether you create an object or not. a = 3 b = 44 instance_of_ClassC = ClassC() print instance_of_ClassC.a, instance_of_ClassC.b print ClassC.a instance_of_ClassI = ClassI() print instance_of_ClassI.a, instance_of_ClassI.b # This will cause an error, because to access instance variables, you need an instance of class! print ClassI.a """ Explanation: What are class variables and instance variables? Class variables are variables attached to the definition of a class. Simply, they are just regular variable definitions inside a class Instance variables are variables created for each instance of a class. We denote them by adding "self." in front of them. Examples: End of explanation """ # Let's define some functions. def multiply(a,b): return a*b def count_letter_in_word(word, letter): track_letters = {} for character in word: if character in track_letters: track_letters[character] += 1 else: track_letters[character] = 1 if letter in track_letters: return track_letters[letter] else: return 0 # Let's define a class to store a model of data. # This time, we put more parameters for the constructor: name and age. This allows us to fill the object during the creation. class Person: def __init__(self, name, age): self.name = name self.age = age # Now let's use our code. adam = Person("Adam", 18) print count_letter_in_word(adam.name, "a") print multiply(adam.age, 10) """ Explanation: So what's up with that Object Oriented Programming? Loose definition It is a kind of methodology and a set of rules for programming. Loosely speaking, it means that we should split our data and functionalities into classes with methods (functions), to follow a specific set of principles. Some definitions. Class - a distinct set of variables and procedures, centered around one thing, to store data, and do operations on that data, to communicate, and other stuff. Field = attribute = a variable defined in a class Method = procedure - a set of instructions defined in a class Static method - a function defined in a class, but that does not actually require to create an object of that class! Self - when a method or field uses 'self', it means it targets the object with which they are associated - "this", "the object I am inside right now", "the object on which I was invoked" Type - to which class does an object correspond, of which class it is an instance of Inheritance, composition, relationships. You will often use words like "parent" or "child", when talking about classes. THe main reason they are used in this context, is to indicate the hierarchy of inheritance. But what is inheritance? Imagine now, you create a class, which fields are actually objects of other classes. This is composition. It means your objects HAVE other objects. We call this "has-a" relationship. Now imagine, you want to write classes representing various jobs in some company. So you write classes "Driver", "Recruiter", "Boss". Now you start to think what they would do, and quickly realise there are many things they share, for example, they can get a salary, can leave work, have a break, etc. The most simple thing would be to write procedures for those actions, separately in each class. But thanks to inheritance, you would need to write it only once, in a BASE CLASS named "Employee". THen, all the others would INHERIT from this base class, getting all those methods for free. You could say, that "Driver' is an "Employee", and so is "Recruiter". We call this "is-a" relationship. You can mix those relationships together, to reuse code whenever possible. A rule of thumb is to use inheritance only when it really is the best thing to do, and not overdo it. Excessive inheritance actually looses all advantages of inheritance, and causes lot's of troubles in big projects (it is hard to modify the hierarchy). Another rule of thumb is, that usually inheritance is really good for very similar things, for storing data, and sharing data and procedures when we have a big amount of classes. Polymorphism. In simple words, this means that you do not care from which class in hierarchy some method comes from. Even simpler, that you create your code, you do not worry if some object is an instance of the base class, it's children, or grandchildren, you should be able to use the same methods on each of them. Principles of OOP - SOLID The five basic principles describe how to best write classes. Take your time to learn them, and do not rush into advanced programming before understanding these principles. OOP is a paradigm. There are others, like "functional programming", with their own design patterns and principles. This tutorial's scope is "beginner friendly", so we will skip this for now, but come back to them as soon as you feel you can understand them. https://en.wikipedia.org/wiki/SOLID_(object-oriented_design) What does this mean in practice? To write programs, you will need to write code that is readable, powerful and easily modified - using modularity, reusability, algorithms. Python is a language that allows to use all kinds of programming, not only OOP, to suit best your goals. In practice, we will creates all kinds of Python files: - libraries of functions - file with a class definition - only to model data - file with a class definition - as a "library" with data AND tools that operate on them - file with the main program - our entry point into running what we wanted to do - file with test cases - to check if our program works correctly - ... From my perspective, design patterns and efficient, clear code is more important than sticking to one paradigm for no reason. For example, you do not need a class just for one method. You also do not need a class if all your methods are static, which means they do not need any "state", like an instance of an object that has a certain state during it's lifetime. Look at this code for example: End of explanation """
gkvoelkl/ipython-turtle-widget
ipython-turtle-widget.ipynb
mit
from ipyturtle import Turtle t = Turtle() t """ Explanation: ipython-turtle-widget Creating Turtle Graphics in IPython/Jupyter Draw on the page or use an extra window. under construction If you like it, use it. If you have some suggestions, tell me ([email protected]). Install To install use pip: To install JupyterLab extension: For a development installation (requires npm), Examples The most examples are inspired by Harold Abelson, Andrea diSessa: Turtle Geometry, MIT Press 1986 Start End of explanation """ t = Turtle(fixed=False, width=100, height=100) t """ Explanation: The turtle is drawn on its own part of the screen. <img src="pic/screen.png" width="280" align="left"><br><br><br><br><br><br><br> Or integrate the turtle graphic into the page End of explanation """ t.right(90) t.heading() t.forward(150) t.left(45) t.back(100) t.left(45) t.penup() t.forward(100) """ Explanation: <img src="pic/start.png" align="left"><br><br><br><br><br> With width and height you can change the extension of the drawing canvas. First Steps End of explanation """ t.reset() #clear canvas and start again t.back(40) t.forward(100) t.position() def square(size): for i in range(4): t.forward(size) t.right(90) square(20) """ Explanation: Square End of explanation """ t.reset() def triangle(size): for i in range(3): t.forward(size) t.right(120) triangle(100) """ Explanation: Triangel End of explanation """ t.reset() def house(size): square(size) t.forward(size) t.right(30) triangle(size) t.back(100) house(100) """ Explanation: House End of explanation """ t = Turtle(fixed=False, width=120, height=120) def circle(): for i in range(360): t.forward(1) t.right(1) t """ Explanation: Circle End of explanation """ circle() """ Explanation: <img src="pic/circle.png" align="left"><br><br><br><br><br> End of explanation """ t.reset() def poly(side, angle): turn = 0 while turn == 0 or turn % 360 != 0: t.forward(side) t.right(angle) turn += angle poly(44,135) t.reset() """ Explanation: Poly End of explanation """ t.pencolor() """ Explanation: Color Return the current pen color as RGB tuple or web color name End of explanation """ t.pencolor('Green') """ Explanation: Set pen color as web color name End of explanation """ t.pencolor(255,0,0) t.forward(40) t.right(120) t.pencolor('Blue') t.forward(40) """ Explanation: Set pen color with RGB value End of explanation """ def lbranch(length, angle, level): t.pencolor('Green') t.forward(2*length) node(length, angle, level) t.back(2*length) def rbranch(length, angle, level): t.pencolor('Brown') t.forward(length) node(length, angle, level) t.back(length) def node(length, angle, level): if level==0: return t.left(angle) lbranch(length, angle, level-1) t.right(2*angle) rbranch(length, angle, level-1) t.left(angle) t.reset() node(8,24,7) """ Explanation: Branch End of explanation """ def nested_triangle(size): if size < 10: return for i in range(3): nested_triangle(size/2) t.forward(size) t.right(120) t.reset() nested_triangle(100) """ Explanation: Nested Triangle End of explanation """ def snowflake(size, level): for i in range(3): side(size, level) t.right(120) def side(size, level): if level == 0: t.forward(size) return side(size/3, level - 1) t.left(60) side(size/3, level - 1) t.right(120) side(size/3, level - 1) t.left(60) side(size/3, level - 1) t.reset() snowflake(100,4) """ Explanation: Snowflake End of explanation """ t.reset() sideLength = 40 for square in range(5): for side in range(4): t.forward(sideLength) t.left(90) sideLength += 10 """ Explanation: Nested squares End of explanation """
Olsthoorn/TransientGroundwaterFlow
Syllabus_in_notebooks/Sec6_3_13_Alexandria_Egypte_desert_infiltration.ipynb
gpl-3.0
import numpy as np from scipy.special import exp1 # Theis well function import matplotlib.pyplot as plt """ Explanation: Section 6.3.13 The effect of irrigating the desert (south of Alexandria) IHE, Delft, transient groundwater @T.N.Olsthoorn, 2019-01-04 Context Effect of a rising lake on adjacent groundwater heads South-west of Alexandria part of the Sahara desert has been reclaimed by irrigating with river Nile water since the 1970s. This area is about 30x30 km and the recharge is about 1.5 m/year, half of which is evaporated by the crops and half is recharged, i.e. largely lost for crop production (it may become too saline in the subsurface). The Sahara aquifer is about 200 m thick and unconfined. The initial water table was at 30 below ground surface. We can simulate this situation by means of Theis wells placed in a regular grid to simulate the area-wide irrigation and then superimpose the result to see what happens at arbitrary locations over time. Of course, we may also make influence maps by computing the results for a grid of points and then contouring the results. This would just more computation time, but is not necesarily more complicated. Let the center of the irrigated area coincide with our coordinate system. Loading modules End of explanation """ def newfig(title='?', xlabel='?', ylabel='?', xlim=None, ylim=None, xscale='linear', yscale='linear', size_inches=(14, 8)): '''Setup a new axis for plotting''' fig, ax = plt.subplots() fig.set_size_inches(size_inches) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xscale(xscale) ax.set_yscale(yscale) if xlim is not None: ax.set_xlim(xlim) if ylim is not None: ax.set_ylim(ylim) ax.grid(True) return ax """ Explanation: Convenience funtion for setting up a graph End of explanation """ # aquifer properties, because it make more sense here, we'll work in m and years instead of m and days kD = 200 * 25 * (365) # m2/y (not m2/d) S = 0.25 # [-], storage coefficient, does not change if we work in years or days # place the wells, which are on a regular 1000x1000 grid to simulate the irrigation. # The width and length of the irrigated area are both 30 km. a = 1000. # m cell size L = 30e3 # km xw = np.linspace(-(L - a)/2, (L - a)/2, L/a) # m, coordinatres of the cell centers = well yw = np.linspace(-(L - a)/2, (L - a)/2, L/a) # m, coordinaters of the cell centers = well # Genrate full 2D arrays of the x and y coordinates of the wells. Becaus these are 2D arrays, # I chose to use capital letters X and Y with a small w to show these are the irrigation well-coordinates Xw, Yw = np.meshgrid(xw, yw) # # Show the wells ax = newfig('Location of the wells and the grid', 'x [m]', 'y [m]', xlim=(-(L + a) / 2 , (L + a) / 2), ylim=(-(L + a) / 2, (L + a) / 2)) ax.grid(False) ax.plot(Xw, Yw, 'r.') plt.show() # See what happens if you omit plt.show() # Recharge is irrigation - vaportranspiration is the loss of irrigation wat to the deep groundwater # in the aquifer q = 0.25 # m/y, the yearly recharge Q = a**2 * q # irrigation per well in m3/y (with so many wells this is a lot of water) # Observation points, where we want to see what happens to the groundwater # We jus take a few points on the y-axis, some inside the irrigated area some outside of it Y0 = np.array([0, 10, 20, 40, 60]) * 1e3 # note the 1e3 factor which converts km to m X0 = np.zeros_like(Y0) # only one valuenp.zeros_like(Y0) # same # effect on observation point t = np.linspace(0, 50, 51)[1:] # time in years, omit t[0] which is 0, to prevend division by zero. F = Q / (4 * np.pi * kD) # need to compute this only once result = dict() # Use a dict to store the results (for convenience of plotting later) for i, (x0, y0) in enumerate(zip(X0, Y0)): R = np.sqrt((x0 - Xw)**2 + (y0 - Yw)**2).ravel()[:, np.newaxis] # shape is (900, 1) T = R**2 * S / (4 * kD) # shape is (900, 1) # Compute the head change and add it to the dict together with its context (x0, y0 and index) # Shape of t[np.newaxis,:] will be (1, 51) so the T/t has shape (900, 51) (about 50000 points) # But because we sum over all R, (axis=0) we end up with 51 values. The summing over R is # the superposition over all 900 wells that contribute. result[i] = {'x': x0, 'y': y0, 's': np.sum(F * exp1(T / t[np.newaxis,:]), axis=0)} # Show results ax = newfig('Rise in water table due to irrigation losses in Sahara', 't [years]', '$\Delta h$ [m]', xscale='linear') for k in result: ax.plot(t, result[k]['s'], label='y = {:.0f} km'.format(result[k]['y']/1000)) ax.legend() plt.show() # The increase of head after 50 years for k in result: data = result[k] data['rise'] = (data['s'][-1] - data['s'][-2]) / (t[-1] - t[-2]) print(f"Point {k} at y={data['y']:5.0f} m rises {data['rise']:.3f} m/y at t={t[-1]:.1f} y") """ Explanation: Implementation End of explanation """ # Grid coordinates x0 = np.linspace(-50e3, 50e3, 10) # Grid line coordinates y0 = np.linspace(-50e3, 50e3, 10) # Grid line coordinates X0, Y0 = np.meshgrid(x0, y0) # Full grid of coordinates allow contouring # Contouring the situation at t = 50 years ax = newfig('Rise in water table due to irrigation losses in Sahara at {t:.0f} year', 'x [m]', 'y [m]') t = 50 # years s = np.zeros_like(X0) for xwi in Xw.ravel(): for ywi in Yw.ravel(): R = np.sqrt((X0 - xwi)**2 + (Y0 - ywi)**2) s += Q/(4 * np.pi * kD) * exp1(R**2 * S/ (4 * kD * t)) ax.contour(X0, Y0, s, levels=30) """ Explanation: Contour the head built up over 50 years irrigation It is possible to plot contour lines. However this requires a lot of computation power, because we have 900 wells that are superposed in each observation point and to cover an area of say 50 x 50 km with a grid of 250 m x 250 m, we have 40000 observation points. So about 1000 wells times 40000 observation points combines arrays of 60 Mb each, and 60 million times the exp1 function has to be computed which internally is also a summation. So it can be done, but it may take many hours to finish. To speed things up, one may choose far less wells and far less observation points Instead of all thus summation over all these wells, one could look foan analytical solution for a constant recharge on a rectangular or circular area. These do exist, see Bruggeman(1999), who gives one for a circular irrigation area. However, that one requires the integration over the product of two Bessel functions. This is not really difficult, but needlessly complex, more so, because a numercal model with a radial network would do the job most easily in this case. The contouring is done below, but the number of wells and observation points was drastically reduced. Still the computation takes quite some time, so you may simply want to skip it. End of explanation """
UCBerkeleySETI/breakthrough
GBT/pulsar_searches/Pulsar_Search/Pulsar_DedisperseV3.ipynb
gpl-3.0
!pip install blimpy # Pulsar data !wget http://blpd13.ssl.berkeley.edu/borisov/AGBT19B_999_124/spliced_blc40414243444546o7o0515253545556o7o061626364656667_guppi_58837_86186_PSR_B0355+54_0013.gpuspec.8.0001.fil # For more info on pulsar searches check out this deck # http://ipta.phys.wvu.edu/files/student-week-2017/IPTA2017_KuoLiu_pulsartiming.pdf """ Explanation: Pulsar Folding and Searching By Peter Ma Pulsar searching is a very compute-intensive task. Searching for repeating signals within noisy data is difficult because pulses tend to have a low signal to noise ratio. Our goal is to process, clean, identify potential periods, and fold the pulses to increase the SNR. This notebook demonstrates the algorithms used for searching regular pulses within radio spectrograms. Keep in mind, there are faster algorithms used in state-of-the-art pulsar search pipelines [ex. Tree dedispersion]. This notebook implements the simplest pulsar searching technique. First, we start with downloading the data and BLIMPY which is an I/O tool developed to interface with the radio data. Note: Run this notebook in COLAB as some operations are resource-intensive. For example, downloading and loading +5GB files into RAM. This notebook is not optimized. End of explanation """ from blimpy import Waterfall import pylab as plt import numpy as np import math from scipy import stats, interpolate from copy import deepcopy %matplotlib inline obs = Waterfall('/content/spliced_blc40414243444546o7o0515253545556o7o061626364656667_guppi_58837_86186_PSR_B0355+54_0013.gpuspec.8.0001.fil', t_start=0,t_stop= 80000,max_load=10) obs.info() # Loads data into numpy array data = obs.data data.shape coarse_channel_width = np.int(np.round(187.5/64/abs(obs.header['foff']))) # Here we plot the integrated signal over time. obs.plot_spectrum() fig = plt.figure(figsize=(10,8)) plt.title('Spectrogram With Bandpass') plt.xlabel("Fchans") plt.ylabel("Time") plt.imshow(data[:3000,0,1500:3000], aspect='auto') plt.colorbar() """ Explanation: Loading Data First, we load the data. NOTE, targets with the starting name of PSR are radio scans of known pulsars PSR_B0355+54_0013. But, files with HIP65960 cataloged targets that shouldn't have pulsar characteristics. If you wish to learn more about the data check out https://ui.adsabs.harvard.edu/abs/2019PASP..131l4505L/abstract The header information gives vital information about the observational setup of the telescope. For example, the coarse channel width or the observation time and duration, etc. End of explanation """ average_power = np.zeros((data.shape[2])) shifted_power = np.zeros((int(data.shape[2]/8))) x=[] spl_order = 2 print("Fitting Spline") data_adjust = np.zeros(data.shape) average_power = data.mean(axis=0) # Note the value 8 is the COARSE CHANNEL WIDTH # We adjust each coarse channel to correct the bandpass artifacts for i in range(0, data.shape[2], 8): average_channel = average_power[0,i:i+8] x = np.arange(0,coarse_channel_width,1) knots = np.arange(0, coarse_channel_width, coarse_channel_width//spl_order+1) tck = interpolate.splrep(x, average_channel, s=knots[1:]) xnew = np.arange(0, coarse_channel_width,1) ynew = interpolate.splev(xnew, tck, der=0) data_adjust[:,0,i:i+8] = data[:,0,i:i+8] - ynew plt.figure() plt.plot( data_adjust.mean(axis=0)[0,:]) plt.title('Spline Fit - adjusted') plt.xlabel("Fchans") plt.ylabel("Power") fig = plt.figure(figsize=(10,8)) plt.title('After bandpass correction') plt.imshow(data_adjust[:3000,0,:], aspect='auto') plt.colorbar() """ Explanation: Band Pass Removal The goal of this process is to clean the data of its artifacts created by combining multiple bands. Our data is created by taking sliding windows of the raw voltage data and computing an FFT of that sliding window. With these FFTs (each containing frequency information about a timestamp) for each coarse channel, we use a bandpass filter to cut off frequencies that don’t belong to that coarse channel’s frequency range. But we can’t achieve a perfect cut, and that’s why there's a falling off at the edges. They’re called band-pass because they only allow signals in a particular frequency range, called a band, to pass-through. When we assemble the products we see these dips in the spectrogram. In other words - they aren't real signals. To remove the bandpass features, we use spline lines to fit each channel to get a model of the bandpass of that channel. By using splines, we can fit the bandpass without fitting the more significant signals. If you want more details on this check out https://github.com/FX196/SETI-Energy-Detection for a detailed explanation. End of explanation """ def delay_from_DM(DM, freq_emitted): if (type(freq_emitted) == type(0.0)): if (freq_emitted > 0.0): return DM / (0.000241 * freq_emitted * freq_emitted) else: return 0.0 else: return Num.where(freq_emitted > 0.0, DM / (0.000241 * freq_emitted * freq_emitted), 0.0) def de_disperse(data,DM,fchan,width,tsamp): clean = deepcopy(data) for i in range(clean.shape[1]): end = clean.shape[0] freq_emitted = i*width+ fchan time = int((delay_from_DM(DM, freq_emitted))/tsamp) if time!=0 and time<clean.shape[0]: # zero_block = np.zeros((time)) zero_block = clean[:time,i] shift_block = clean[:end-time,i] clean[time:end,i]= shift_block clean[:time,i]= zero_block elif time!=0: clean[:,i]= np.zeros(clean[:,i].shape) return clean def DM_can(data, data_base, sens, DM_base, candidates, fchan,width,tsamp ): snrs = np.zeros((candidates,2)) for i in range(candidates): DM = DM_base+sens*i data = de_disperse(data, DM, fchan,width,tsamp) time_series = data.sum(axis=1) snrs[i,1] = SNR(time_series) snrs[i,0] =DM if int((delay_from_DM(DM, fchan))/tsamp)+1 > data.shape[0]: break if i %1==0: print("Candidate "+str(i)+"\t SNR: "+str(round(snrs[i,1],4)) + "\t Largest Time Delay: "+str(round(delay_from_DM(DM, fchan), 6))+' seconds'+"\t DM val:"+ str(DM)+"pc/cm^3") data = data_base return snrs # Functions to determine SNR and TOP candidates def SNR(arr): index = np.argmax(arr) average_noise = abs(arr.mean(axis=0)) return math.log(arr[index]/average_noise) def top(arr, top = 10): candidate = [] # Delete the first and second element fourier transform arr[0]=0 arr[1]=0 for i in range(top): # We add 1 as the 0th index = period of 1 not 0 index = np.argmax(arr) candidate.append(index+1) arr[index]=0 return candidate """ Explanation: Dedispersion When pulses reach Earth they reach the observer at different times due to dispersion. This dispersion is the result of the interstellar medium causing time delays. This creates a "swooping curve" on the radio spectrogram instead of plane waves. If we are going to fold the pulses to increase the SNR then we're making the assumption that the pulses arrive at the same time. Thus we need to correct the dispersion by shifting each channel down a certain time delay relative to its frequency channel. We index a frequency column in the spectrogram. Then we split it between a time delay and original data and swap the positions. However, the problem is, we don't know the dispersion measure DM of the signal. The DM is the path integral of the signal through the interstellar medium with an electron density measure of. $$DM =\int_0^d n_e dl$$ What we do is we brute force the DM by executing multiple trials DMs and we take the highest SNR created by the dedispersion with the given trial DM. End of explanation """ small_data = data_adjust[:,0,:] data_base = data_adjust[:,0,:] sens =0.05 DM_base = 6.4 candidates = 50 fchan = obs.header['fch1'] width = obs.header['foff'] tsamp = obs.header['tsamp'] fchan = fchan+ width*small_data.shape[1] snrs = DM_can(small_data, data_base, sens, DM_base, candidates, fchan, abs(width),tsamp) plt.plot(snrs[:,0], snrs[:,1]) plt.title('DM values vs SNR') plt.xlabel("DM values") plt.ylabel("SNR of Dedispersion") DM = snrs[np.argmax(snrs[:,1]),0] print(DM) fchan = fchan+ width*small_data.shape[1] data_adjust[:,0,:] = de_disperse(data_adjust[:,0,:], DM, fchan,abs(width),tsamp) fig = plt.figure(figsize=(10, 8)) plt.imshow(data_adjust[:,0,:], aspect='auto') plt.title('De-dispersed Data') plt.xlabel("Fchans") plt.ylabel("Time") plt.colorbar() plt.show() """ Explanation: Dedispersion Trials The computer now checks multiple DM values and adjust each frequency channel where it records its SNR. We increment the trial DM by a tunable parameter sens. After the trials, we take the largest SNR created by adjusting the time delays. We use that data to perform the FFT's and record the folded profiles. End of explanation """ # Preforming the fourier transform. %matplotlib inline import scipy.fftpack from scipy.fft import fft N = 1000 T = 1.0 / 800.0 x = np.linspace(0.0, N*T, N) y = abs(data_adjust[:,0,:].mean(axis=1)) yf = fft(y) xf = np.linspace(0.0, 1.0/(2.0*T), N//2) # Magintude of the fourier transform # Between 0.00035 and 3.5 seconds mag = np.abs(yf[:60000]) candidates = top(mag, top=15) plt.plot(2.0/N * mag[1:]) plt.grid() plt.title('Fourier Transform of Signal') plt.xlabel("Periods") plt.ylabel("Magnitude of Fourier Transform") plt.show() print("Signal To Noise Ratio for the Fourier Transform is: "+str(SNR(mag))) print("Most likely Candidates are: "+str(candidates)) """ Explanation: Detecting Pulses - Fourier Transforms and Folding Next, we apply the discrete Fourier transform on the data to detect periodic pulses. To do so, we look for the greatest magnitude of the Fourier transform. This indicates potential periods within the data. Then we check for consistency by folding the data by the period which the Fourier transform indicates. The folding algorithm is simple. You take each period and you fold the signals on top of itself. If the period you guessed matches the true period then by the law of superposition it will increase the SNR. This spike in signal to noise ratio appears in the following graph. This algorithm is the following equation. End of explanation """ # Lets take an example of such a period! # The 0th candidate is the top ranked candidate by the FFT period = 895 fold = np.zeros((period, data.shape[2])) multiples = int(data.data.shape[0]/period) results = np.zeros((period)) for i in range(multiples-1): fold[:,:]=data_adjust[i*period:(i+1)*period,0,:]+ fold results = fold.mean(axis=1) results = results - results.min() results = results / results.max() print(SNR(results)) plt.plot(results) plt.title('Folded Signal Profile With Period: '+str(round(period*0.000349,5))) plt.xlabel("Time (Multiples of 0.00035s)") plt.ylabel("Normalized Integrated Signal") # Lets take an example of such a period! # The 0th candidate is the top ranked candidate by the FFT can_snr =[] for k in range(len(candidates)): period = candidates[k] fold = np.zeros((period, data.shape[2])) multiples = int(data.data.shape[0]/period) results = np.zeros((period)) for i in range(multiples-1): fold[:,:]=data[i*period:(i+1)*period,0,:]+ fold results = fold.mean(axis=1) results = results - results.min() results = results / results.max() can_snr.append(SNR(results)) # print(SNR(results)) print("Max SNR of Fold Candidates: "+ str(max(can_snr))) # Generates multiple images saved to create a GIF from scipy import stats data = data period = candidates[0] fold = np.zeros((period, data.shape[2])) multiples = int(data.data.shape[0]/period) results = np.zeros((period)) for i in range(multiples-1): fold[:,:]=data[i*period:(i+1)*period,0,:]+ fold results = fold.mean(axis=1) results = results - results.min() results = results / results.max() # Generates multiple frames of the graph as it folds! plt.plot(results) plt.title('Folded Signal Period '+str(period*0.000349)+" seconds| Fold Iteration: "+str(i)) plt.xlabel("Time (Multiples of 0.00035s)") plt.ylabel("Normalized Integrated Signal") plt.savefig('/content/drive/My Drive/Deeplearning/Pulsars/output/candidates/CAN_3/multi_chan_'+str(period)+'_'+str(i)+'.png') plt.close() results = fold.mean(axis=1) results = results - results.min() results = results / results.max() print("The Signal To Noise of the Fold is: "+str(SNR(results))) plt.plot(results) """ Explanation: Folding Algorithm The idea of the folding algorithm is to see if the signal forms a consistent profile as you fold/integrate the values together. If the profile appears consistent/stable then you're looking at an accurate reading of the pulsar's period. This confirms the implications drawn from the Fourier transform. This is profiling the pulsar. When folding the pulses it forms a "fingerprint" of the pulsar. These folds are unique to the pulsar detected. $$s_j = \sum^{N/P-1}{K=0} D{j+kP} $$ We are suming over the regular intervals of period P. This is implemented below. End of explanation """ !wget http://blpd13.ssl.berkeley.edu/dl/GBT_58402_66282_HIP65960_time.h5 from blimpy import Waterfall import pylab as plt import numpy as np import math from scipy import stats, interpolate %matplotlib inline obs = Waterfall('/content/GBT_58402_66282_HIP65960_time.h5', f_start=0,f_stop= 361408,max_load=5) obs.info() # Loads data into numpy array data = obs.data coarse_channel_width = np.int(np.round(187.5/64/abs(obs.header['foff']))) obs.plot_spectrum() average_power = np.zeros((data.shape[2])) shifted_power = np.zeros((int(data.shape[2]/8))) x=[] spl_order = 2 print("Fitting Spline") data_adjust = np.zeros(data.shape) average_power = data.mean(axis=0) # Note the value 8 is the COARSE CHANNEL WIDTH # We adjust each coarse channel to correct the bandpass artifacts for i in range(0, data.shape[2], coarse_channel_width): average_channel = average_power[0,i:i+coarse_channel_width] x = np.arange(0,coarse_channel_width,1) knots = np.arange(0, coarse_channel_width, coarse_channel_width//spl_order+1) tck = interpolate.splrep(x, average_channel, s=knots[1:]) xnew = np.arange(0, coarse_channel_width,1) ynew = interpolate.splev(xnew, tck, der=0) data_adjust[:,0,i:i+coarse_channel_width] = data[:,0,i:i+coarse_channel_width] - ynew from copy import deepcopy small_data = data[:,0,:] data_base = data[:,0,:] sens =0.05 DM_base = 6.4 candidates = 50 fchan = obs.header['fch1'] width = obs.header['foff'] tsamp = obs.header['tsamp'] # fchan = fchan+ width*small_data.shape[1] fchan = 7501.28173828125 snrs = DM_can(small_data, data_base, sens, DM_base, candidates, fchan, abs(width),tsamp) plt.plot(snrs[:,0], snrs[:,1]) plt.title('DM values vs SNR') plt.xlabel("DM values") plt.ylabel("SNR of Dedispersion") DM = snrs[np.argmax(snrs[:,1]),0] print(DM) fchan = fchan+ width*small_data.shape[1] data_adjust[:,0,:] = de_disperse(data_adjust[:,0,:], DM, fchan,abs(width),tsamp) # Preforming the fourier transform. %matplotlib inline import scipy.fftpack from scipy.fft import fft N = 60000 T = 1.0 / 800.0 x = np.linspace(0.0, N*T, N) y = data[:,0,:].mean(axis=1) yf = fft(y) xf = np.linspace(0.0, 1.0/(2.0*T), N//2) # Magintude of the fourier transform # Between 0.00035 and 3.5 seconds # We set this to a limit of 200 because # The total tchan is only 279 mag = np.abs(yf[:200]) candidates = top(mag, top=15) plt.plot(2.0/N * mag[1:]) plt.grid() plt.title('Fourier Transform of Signal') plt.xlabel("Periods") plt.ylabel("Magnitude of Fourier Transform") plt.show() print("Signal To Noise Ratio for the Fourier Transform is: "+str(SNR(mag))) print("Most likely Candidates are: "+str(candidates)) """ Explanation: What Happens If The Data Doesn't Contain Pulses? Below we will show you that this algorithm detects pulses and excludes targets that do not include this feature. We will do so by loading a target that isn't known to be a pulsar. HIP65960 is a target that doesn't contain repeating signals. Below we will repeat and apply the same algorithm but on a target that isn't a pulsar. We won't reiterate the explanations again. End of explanation """ # Lets take an example of such a period! # The 0th candidate is the top ranked candidate by the FFT can_snr =[] for k in range(len(candidates)): period = candidates[k] fold = np.zeros((period, data.shape[2])) multiples = int(data.data.shape[0]/period) results = np.zeros((period)) for i in range(multiples-1): fold[:,:]=data[i*period:(i+1)*period,0,:]+ fold results = fold.mean(axis=1) results = results - results.min() results = results / results.max() can_snr.append(SNR(results)) print("Max SNR of Fold Candidates: "+ str(max(can_snr))) """ Explanation: NOTICE Notice how the signal to noise ratio is a lot smaller, It's smaller by 2 orders of magnitude (100x) than the original pulsar fold. Typically with a SNR of 1, it isn't considered a signal of interest as it's most likely just noise. End of explanation """
gdhungana/desispec
doc/nb/QA_Prod.ipynb
bsd-3-clause
%matplotlib notebook # imports from desispec.qa import qa_prod as dqap """ Explanation: QA_Prod (v1.1) End of explanation """ specprod_dir = '/Users/xavier/DESI/DESI_SCRATCH/redux/madrone/' reload(dqap) qa_prod = dqap.QA_Prod(specprod_dir) """ Explanation: Init setenv DESI_SPECTRO_DATA /Users/xavier/DESI/DESI_SCRATCH/sim/madrone/ End of explanation """ qa_prod.remake_frame_qa(remake_plots=True) """ Explanation: Remake In Python End of explanation """ qa_prod.slurp(remove=False) """ Explanation: Script desi_qa_prod --specprod_dir /Users/xavier/DESI/DESI_SCRATCH/redux/madrone --remake_frame 3 Slurp In Python End of explanation """ from desispec.qa import qa_prod as dqap specprod_dir = '/Users/xavier/DESI/DESI_SCRATCH/redux/madrone/' reload(dqap) qa_prod = dqap.QA_Prod(specprod_dir) """ Explanation: Script desi_qa_prod --specprod_dir /Users/xavier/DESI/DESI_SCRATCH/redux/madrone/ --slurp --make_frameqa=1 Simple Figs End of explanation """ qa_prod.load_data() qa_prod.data['20160607'][6]['b0']#['SKYSUB']['QA'] """ Explanation: Load QA End of explanation """ sky_resid_b, ne_dict = qa_prod.get_qa_array('SKYSUB', 'MED_RESID', channels=['b']) sky_resid_b ne_dict """ Explanation: Grab an array of QA values End of explanation """ plt.clf() ax = plt.gca() ax.hist(sky_resid_b) ax.set_xlim(-1,1) ax.set_xlabel('MED_SKY_RESID') """ Explanation: Histogram End of explanation """ from desispec.qa import qa_plots as dqqp reload(dqqp) dqqp.prod_channel_hist(qa_prod, 'SKYSUB', 'MED_RESID', xlim=(-1,1)) """ Explanation: Camera Histograms End of explanation """
mne-tools/mne-tools.github.io
stable/_downloads/b6ccbb801939862ed915d2c7295ac245/sensor_permutation_test.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <[email protected]> # # License: BSD-3-Clause import numpy as np import mne from mne import io from mne.stats import permutation_t_test from mne.datasets import sample print(__doc__) """ Explanation: Permutation T-test on sensor data One tests if the signal significantly deviates from 0 during a fixed time window of interest. Here computation is performed on MNE sample dataset between 40 and 60 ms. End of explanation """ data_path = sample.data_path() meg_path = data_path / 'MEG' / 'sample' raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' event_id = 1 tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) # pick MEG Gradiometers picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, exclude='bads') epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6)) data = epochs.get_data() times = epochs.times temporal_mask = np.logical_and(0.04 <= times, times <= 0.06) data = np.mean(data[:, :, temporal_mask], axis=2) n_permutations = 50000 T0, p_values, H0 = permutation_t_test(data, n_permutations, n_jobs=1) significant_sensors = picks[p_values <= 0.05] significant_sensors_names = [raw.ch_names[k] for k in significant_sensors] print("Number of significant sensors : %d" % len(significant_sensors)) print("Sensors names : %s" % significant_sensors_names) """ Explanation: Set parameters End of explanation """ evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis], epochs.info, tmin=0.) # Extract mask and indices of active sensors in the layout stats_picks = mne.pick_channels(evoked.ch_names, significant_sensors_names) mask = p_values[:, np.newaxis] <= 0.05 evoked.plot_topomap(ch_type='grad', times=[0], scalings=1, time_format=None, cmap='Reds', vmin=0., vmax=np.max, units='-log10(p)', cbar_fmt='-%0.1f', mask=mask, size=3, show_names=lambda x: x[4:] + ' ' * 20, time_unit='s') """ Explanation: View location of significantly active sensors End of explanation """
gururajl/deep-learning
intro-to-tflearn/TFLearn_Digit_Recognition.ipynb
mit
# Import Numpy, TensorFlow, TFLearn, and MNIST data import numpy as np import tensorflow as tf import tflearn import tflearn.datasets.mnist as mnist """ Explanation: Handwritten Number Recognition with TFLearn and MNIST In this notebook, we'll be building a neural network that recognizes handwritten numbers 0-9. This kind of neural network is used in a variety of real-world applications including: recognizing phone numbers and sorting postal mail by address. To build the network, we'll be using the MNIST data set, which consists of images of handwritten numbers and their correct labels 0-9. We'll be using TFLearn, a high-level library built on top of TensorFlow to build the neural network. We'll start off by importing all the modules we'll need, then load the data, and finally build the network. End of explanation """ # Retrieve the training and test data trainX, trainY, testX, testY = mnist.load_data(one_hot=True) """ Explanation: Retrieving training and test data The MNIST data set already contains both training and test data. There are 55,000 data points of training data, and 10,000 points of test data. Each MNIST data point has: 1. an image of a handwritten digit and 2. a corresponding label (a number 0-9 that identifies the image) We'll call the images, which will be the input to our neural network, X and their corresponding labels Y. We're going to want our labels as one-hot vectors, which are vectors that holds mostly 0's and one 1. It's easiest to see this in a example. As a one-hot vector, the number 0 is represented as [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], and 4 is represented as [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]. Flattened data For this example, we'll be using flattened data or a representation of MNIST images in one dimension rather than two. So, each handwritten number image, which is 28x28 pixels, will be represented as a one dimensional array of 784 pixel values. Flattening the data throws away information about the 2D structure of the image, but it simplifies our data so that all of the training data can be contained in one array whose shape is [55000, 784]; the first dimension is the number of training images and the second dimension is the number of pixels in each image. This is the kind of data that is easy to analyze using a simple neural network. End of explanation """ # Visualizing the data import matplotlib.pyplot as plt %matplotlib inline # Function for displaying a training image by it's index in the MNIST set def show_digit(index): label = trainY[index].argmax(axis=0) # Reshape 784 array into 28x28 image image = trainX[index].reshape([28,28]) plt.title('Training data, index: %d, Label: %d' % (index, label)) plt.imshow(image, cmap='gray_r') plt.show() # Display the first (index 0) training image show_digit(0) """ Explanation: Visualize the training data Provided below is a function that will help you visualize the MNIST data. By passing in the index of a training example, the function show_digit will display that training image along with it's corresponding label in the title. End of explanation """ # Define the neural network def build_model(): # This resets all parameters and variables, leave this here tf.reset_default_graph() #### Your code #### # Include the input layer, hidden layer(s), and set how you want to train the model # This model assumes that your network is named "net" net = tflearn.input_data([None, 784]) net = tflearn.fully_connected(net, 128, activation='ReLU') net = tflearn.fully_connected(net, 32, activation='ReLU') # Output layer and training model net = tflearn.fully_connected(net, 10, activation='softmax') net = tflearn.regression(net, optimizer='sgd', learning_rate=0.01, loss='categorical_crossentropy') model = tflearn.DNN(net) return model # Build the model model = build_model() """ Explanation: Building the network TFLearn lets you build the network by defining the layers in that network. For this example, you'll define: The input layer, which tells the network the number of inputs it should expect for each piece of MNIST data. Hidden layers, which recognize patterns in data and connect the input to the output layer, and The output layer, which defines how the network learns and outputs a label for a given image. Let's start with the input layer; to define the input layer, you'll define the type of data that the network expects. For example, net = tflearn.input_data([None, 100]) would create a network with 100 inputs. The number of inputs to your network needs to match the size of your data. For this example, we're using 784 element long vectors to encode our input data, so we need 784 input units. Adding layers To add new hidden layers, you use net = tflearn.fully_connected(net, n_units, activation='ReLU') This adds a fully connected layer where every unit (or node) in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call, it designates the input to the hidden layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling tflearn.fully_connected(net, n_units). Then, to set how you train the network, use: net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') Again, this is passing in the network you've been building. The keywords: optimizer sets the training method, here stochastic gradient descent learning_rate is the learning rate loss determines how the network error is calculated. In this example, with categorical cross-entropy. Finally, you put all this together to create the model with tflearn.DNN(net). Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc. Hint: The final output layer must have 10 output nodes (one for each digit 0-9). It's also recommended to use a softmax activation layer as your final output layer. End of explanation """ # Training model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=100, n_epoch=30) """ Explanation: Training the network Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Too few epochs don't effectively train your network, and too many take a long time to execute. Choose wisely! End of explanation """ # Compare the labels that our model predicts with the actual labels # Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample. predictions = np.array(model.predict(testX)).argmax(axis=1) # Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels actual = testY.argmax(axis=1) test_accuracy = np.mean(predictions == actual, axis=0) # Print out the result print("Test accuracy: ", test_accuracy) """ Explanation: Testing After you're satisified with the training output and accuracy, you can then run the network on the test data set to measure it's performance! Remember, only do this after you've done the training and are satisfied with the results. A good result will be higher than 95% accuracy. Some simple models have been known to get up to 99.7% accuracy! End of explanation """
google-research/google-research
gfsa/notebooks/guide_for_new_tasks.ipynb
apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ !git clone https://github.com/google-research/google-research.git --depth=1 import os os.chdir("google-research") !pip install flax import gast import numpy as np from gfsa import automaton_builder from gfsa import generic_ast_graphs from gfsa import graph_types from gfsa import py_ast_graphs from gfsa import schema_util from gfsa.datasets.mazes import maze_schema from gfsa.datasets.mazes import maze_task from gfsa.visualization.pprint import pprint from gfsa.visualization.pytrees import summarize_tree """ Explanation: How to use the GFSA layer for new tasks This notebook describes the high-level process for using the GFSA layer in a new task, specifically focusing on how to represent a new type of graph as an MDP so that you can use the GFSA layer. Setup and imports End of explanation """ # AST specification: py_ast_graphs.PY_AST_SPECS # Derived schema (using `generic_ast_graphs.build_ast_graph_schema`) py_ast_graphs.SCHEMA """ Explanation: Defining your graph domain The first step in using the GFSA layer for a new task is to specify how to interpret the graphs in your domain as MDPs. Specifically, you must define a set of node types, and then for each node type, define the set of possible actions ("out edges") the agent can take at that node, and the set of observations ("in edges") the agent can receive when it arrives at the node. In the codebase, this is referred to as a "graph schema". For the dataset of simple Python functions, the graph schema is derived from a simpler "AST specification": End of explanation """ maze_task.SCHEMA """ Explanation: It's possible to infer the AST specification from a dataset of ASTs using ast_spec_inference.py. In the paper, we use two different AST specifications, one for the synthetic Python examples (shown above), and one for the Python examples written by humans (since these use many additional types of AST nodes). For the maze dataset, the node types are determined by the shape of the grid cell, and the graph schema determines which actions are valid: End of explanation """ road_network_schema = { # Houses are only connected to roads, so the only movement action # available is to go to the road; likewise the only observation we receive # after moving is the observation that we arrived from a road. "house": graph_types.NodeSchema( in_edges=["from_road"], out_edges=["to_road"]), # Roads are more complex. We can always move to a random previous or next # road in the road network. We can also try to move to a house, but if there # is no house, we will have to stay on the road. We denote this with a # special observation (in `in_edges`). "road": graph_types.NodeSchema( in_edges=["from_next", "from_prev", "from_house", "no_house_here"], out_edges=["to_next", "to_prev", "to_house"]), } road_network_schema """ Explanation: As a toy example of how you might encode a new graph domain, suppose we have a network of houses connected by directed roads. Each house is adjacent to exactly one road, and each road has at least one entry and exit point but may have more. We can encode this structure using the following schema: End of explanation """ the_ast = gast.parse(""" def test_function(foo): if foo: return pass """) generic_ast = py_ast_graphs.py_ast_to_generic(the_ast) mdp_graph, id_conversion_map = generic_ast_graphs.ast_to_graph(generic_ast, ast_spec=py_ast_graphs.PY_AST_SPECS) schema_util.assert_conforms_to_schema(mdp_graph, py_ast_graphs.SCHEMA) mdp_graph """ Explanation: Building MDP graphs Before running the GFSA layer on a specific input graph, you need to specify the result of taking each of the actions defined in the schema. For AST graphs, these transitions can be automatically computed based on the AST and its specification: End of explanation """ the_maze_raw = [ "███████ ████", "████ █ █ █", "████ ███████", ] the_maze = np.array([[c != " " for c in r] for r in the_maze_raw]) mdp_graph, coordinates = maze_schema.encode_maze(the_maze) schema_util.assert_conforms_to_schema(mdp_graph, maze_task.SCHEMA) mdp_graph """ Explanation: In the maze dataset, we precompute the destination of taking each possible action at each possible node: End of explanation """ GraphNode = graph_types.GraphNode InputTaggedNode = graph_types.InputTaggedNode mdp_graph = { 'R0': GraphNode(node_type='road', out_edges={ 'to_next': [InputTaggedNode(node_id='R1', in_edge='from_prev')], 'to_prev': [InputTaggedNode(node_id='R1', in_edge='from_next')], 'to_house': [InputTaggedNode(node_id='H0', in_edge='from_road')] }), 'R1': GraphNode(node_type='road', out_edges={ 'to_next': [InputTaggedNode(node_id='R0', in_edge='from_prev')], 'to_prev': [InputTaggedNode(node_id='R0', in_edge='from_next'), InputTaggedNode(node_id='R3', in_edge='from_next')], 'to_house': [InputTaggedNode(node_id='H1', in_edge='from_road')] }), 'R2': GraphNode(node_type='road', out_edges={ 'to_next': [InputTaggedNode(node_id='R3', in_edge='from_prev'), InputTaggedNode(node_id='R4', in_edge='from_prev')], 'to_prev': [InputTaggedNode(node_id='R4', in_edge='from_next')], 'to_house': [InputTaggedNode(node_id='R2', in_edge='no_house_here')] }), 'R3': GraphNode(node_type='road', out_edges={ 'to_next': [InputTaggedNode(node_id='R1', in_edge='from_prev')], 'to_prev': [InputTaggedNode(node_id='R2', in_edge='from_next')], 'to_house': [InputTaggedNode(node_id='H2', in_edge='from_road')] }), 'R4': GraphNode(node_type='road', out_edges={ 'to_next': [InputTaggedNode(node_id='R2', in_edge='from_prev')], 'to_prev': [InputTaggedNode(node_id='R2', in_edge='from_next')], 'to_house': [InputTaggedNode(node_id='R4', in_edge='no_house_here')] }), 'H0': GraphNode(node_type='house', out_edges={ 'to_road': [InputTaggedNode(node_id='R0', in_edge='from_house')] }), 'H1': GraphNode(node_type='house', out_edges={ 'to_road': [InputTaggedNode(node_id='R1', in_edge='from_house')] }), 'H2': GraphNode(node_type='house', out_edges={ 'to_road': [InputTaggedNode(node_id='R3', in_edge='from_house')] }), } schema_util.assert_conforms_to_schema(mdp_graph, road_network_schema) """ Explanation: For the toy houses-and-roads example above, we might have a graph that looks something like this: End of explanation """ road_builder = automaton_builder.AutomatonBuilder(road_network_schema) pprint(summarize_tree(road_builder.encode_graph(mdp_graph, as_jax=False))) """ Explanation: Note that every action must have at least one destination and associated observation! This is the case even for roads with no house, for which the "to_house" action results in staying in place and getting a special sentinel observation. Encoding MDP graphs into GFSA-compatible NDArrays Once you have an MDP graph that conforms to a schema, you can use an automaton builder object to encode that graph into a set of NDArrays: End of explanation """
adrianstaniec/deep-learning
14_language-translation/dlnd_language_translation.ipynb
mit
import helper import problem_unittests as tests source_path = 'data/small_vocab_en' target_path = 'data/small_vocab_fr' source_text = helper.load_data(source_path) target_text = helper.load_data(target_path) """ Explanation: Language Translation In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French. Get the Data Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus. End of explanation """ view_sentence_range = (0, 10) import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()}))) sentences = source_text.split('\n') word_counts = [len(sentence.split()) for sentence in sentences] print('Number of sentences: {}'.format(len(sentences))) print('Average number of words in a sentence: {}'.format(np.average(word_counts))) print() print('English sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) print() print('French sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): """ Convert source and target text to proper vectors of word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists of lists (source_id_text, target_id_text) """ sentences = source_text.split('\n') source_vectors = [] for sent in sentences: source_vectors.append([ source_vocab_to_int[word] for word in sent.split(' ') if word != '' ]) sentences = target_text.split('\n') target_vectors = [] for sent in sentences: target_vectors.append([ target_vocab_to_int[word] for word in sent.split(' ') if word != '' ] + [target_vocab_to_int['<EOS>']]) return source_vectors, target_vectors tests.test_text_to_ids(text_to_ids) """ Explanation: Implement Preprocessing Function Text to Word Ids As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the &lt;EOS&gt; word id at the end of target_text. This will help the neural network predict when the sentence should end. You can get the &lt;EOS&gt; word id by doing: python target_vocab_to_int['&lt;EOS&gt;'] You can get other word ids using source_vocab_to_int and target_vocab_to_int. End of explanation """ helper.preprocess_and_save_data(source_path, target_path, text_to_ids) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ import numpy as np import helper import problem_unittests as tests (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ from distutils.version import LooseVersion import warnings import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) == LooseVersion('1.1.0'), 'Please use TensorFlow version 1.1' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) """ Explanation: Check the Version of TensorFlow and Access to GPU This will check to make sure you have the correct version of TensorFlow and access to a GPU End of explanation """ def model_inputs(): """ Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences. :return: Tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) """ inputs = tf.placeholder(tf.int32, [None, None], 'input') targets = tf.placeholder(tf.int32, [None, None], 'target') lr = tf.placeholder(tf.float32, name='lr') kp = tf.placeholder(tf.float32, name='keep_prob') target_seq_len = tf.placeholder(tf.int32, [None], name='target_sequence_length') max_target_seq_len = tf.reduce_max(target_seq_len, name='max_target_len') source_seq_len = tf.placeholder(tf.int32, [None], name='source_sequence_length') return inputs, targets, lr, kp, target_seq_len, max_target_seq_len, source_seq_len tests.test_model_inputs(model_inputs) """ Explanation: Build the Neural Network You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below: - model_inputs - process_decoder_input - encoding_layer - decoding_layer_train - decoding_layer_infer - decoding_layer - seq2seq_model Input Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: Input text placeholder named "input" using the TF Placeholder name parameter with rank 2. Targets placeholder with rank 2. Learning rate placeholder with rank 0. Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0. Target sequence length placeholder named "target_sequence_length" with rank 1 Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0. Source sequence length placeholder named "source_sequence_length" with rank 1 End of explanation """ def process_decoder_input(target_data, target_vocab_to_int, batch_size): """ Preprocess target data for encoding :param target_data: Target Placehoder :param target_vocab_to_int: Dictionary to go from the target words to an id :param batch_size: Batch Size :return: Preprocessed target data """ # difficult way # ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) # easy way ending = target_data[:, :-1] dec_input = tf.concat( values=[tf.fill(dims=[batch_size, 1], value=target_vocab_to_int['<GO>']), ending], axis=1) return dec_input tests.test_process_encoding_input(process_decoder_input) """ Explanation: Process Decoder Input Implement process_decoder_input by removing the last word id from each batch in target_data and concat the GO ID to the begining of each batch. End of explanation """ from imp import reload reload(tests) def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size): """ Create encoding layer :param rnn_inputs: Inputs for the RNN :param rnn_size: RNN Size :param num_layers: Number of layers :param keep_prob: Dropout keep probability :param source_sequence_length: a list of the lengths of each sequence in the batch :param source_vocab_size: vocabulary size of source data :param encoding_embedding_size: embedding size of source data :return: tuple (RNN output, RNN state) """ emb = tf.contrib.layers.embed_sequence( ids=rnn_inputs, vocab_size=source_vocab_size, embed_dim=encoding_embedding_size) def wrapped_cell(rnn_size, keep_prob): initer = tf.random_uniform_initializer(-0.1, 0.1, seed=2) cell = tf.contrib.rnn.LSTMCell(num_units=rnn_size, initializer=initer) return tf.contrib.rnn.DropoutWrapper( cell=cell, input_keep_prob=keep_prob) stacked = tf.contrib.rnn.MultiRNNCell( [wrapped_cell(rnn_size, keep_prob) for _ in range(num_layers)]) rnn_output, rnn_state = tf.nn.dynamic_rnn( cell=stacked, inputs=emb, sequence_length=source_sequence_length, dtype=tf.float32) return rnn_output, rnn_state tests.test_encoding_layer(encoding_layer) """ Explanation: Encoding Implement encoding_layer() to create a Encoder RNN layer: * Embed the encoder input using tf.contrib.layers.embed_sequence * Construct a stacked tf.contrib.rnn.LSTMCell wrapped in a tf.contrib.rnn.DropoutWrapper * Pass cell and embedded input to tf.nn.dynamic_rnn() End of explanation """ def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob): """ Create a decoding layer for training :param encoder_state: Encoder State :param dec_cell: Decoder RNN Cell :param dec_embed_input: Decoder embedded input :param target_sequence_length: The lengths of each sequence in the target batch :param max_summary_length: The length of the longest sequence in the batch :param output_layer: Function to apply the output layer :param keep_prob: Dropout keep probability :return: BasicDecoder output containing training logits and sample_id """ train_help = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input, sequence_length=target_sequence_length) train_decoder = tf.contrib.seq2seq.BasicDecoder(cell=dec_cell, helper=train_help, initial_state=encoder_state, output_layer=output_layer) f_outputs, f_state = tf.contrib.seq2seq.dynamic_decode(decoder=train_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) # TODO: keep_prob - unused argument? return f_outputs tests.test_decoding_layer_train(decoding_layer_train) """ Explanation: Decoding - Training Create a training decoding layer: * Create a tf.contrib.seq2seq.TrainingHelper * Create a tf.contrib.seq2seq.BasicDecoder * Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode End of explanation """ def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob): """ Create a decoding layer for inference :param encoder_state: Encoder state :param dec_cell: Decoder RNN Cell :param dec_embeddings: Decoder embeddings :param start_of_sequence_id: GO ID :param end_of_sequence_id: EOS Id :param max_target_sequence_length: Maximum length of target sequences :param vocab_size: Size of decoder/target vocabulary :param output_layer: Function to apply the output layer :param batch_size: Batch size :param keep_prob: Dropout keep probability :return: BasicDecoder output containing inference logits and sample_id """ start_tokens = tf.tile(input=tf.constant([start_of_sequence_id], dtype=tf.int32), multiples=[batch_size], name='start_tokens') infer_help = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding=dec_embeddings, start_tokens=start_tokens, end_token=end_of_sequence_id) infer_decoder = tf.contrib.seq2seq.BasicDecoder(cell=dec_cell, helper=infer_help, initial_state=encoder_state, output_layer=output_layer) f_outputs, f_state = tf.contrib.seq2seq.dynamic_decode(decoder=infer_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) # TODO: keep_prob - unused argument return f_outputs tests.test_decoding_layer_infer(decoding_layer_infer) """ Explanation: Decoding - Inference Create inference decoder: * Create a tf.contrib.seq2seq.GreedyEmbeddingHelper * Create a tf.contrib.seq2seq.BasicDecoder * Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode End of explanation """ def decoding_layer(dec_input, encoder_state, target_sequence_length, max_target_sequence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, decoding_embedding_size): """ Create decoding layer :param dec_input: Decoder input :param encoder_state: Encoder state :param target_sequence_length: The lengths of each sequence in the target batch :param max_target_sequence_length: Maximum length of target sequences :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :param target_vocab_size: Size of target vocabulary :param batch_size: The size of the batch :param keep_prob: Dropout keep probability :param decoding_embedding_size: Decoding embedding size :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ # Embed the target sequences dec_embeddings = tf.Variable( tf.random_uniform([target_vocab_size, decoding_embedding_size])) emb = tf.nn.embedding_lookup(dec_embeddings, dec_input) # Construct the decoder LSTM cell #(just like you constructed the encoder cell above) def make_cell(rnn_size): dec_cell = tf.contrib.rnn.LSTMCell( rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return dec_cell dec_cell = tf.contrib.rnn.MultiRNNCell( [make_cell(rnn_size) for _ in range(num_layers)]) # Create an output layer to map the outputs of the decoder # to the elements of our vocabulary output_layer = Dense( target_vocab_size, kernel_initializer=tf.truncated_normal_initializer( mean=0.0, stddev=0.1)) with tf.variable_scope("decode"): train_logits = decoding_layer_train( encoder_state, dec_cell, emb, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) with tf.variable_scope("decode", reuse=True): infer_logits = decoding_layer_infer( encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], max_target_sequence_length, target_vocab_size, output_layer, batch_size, keep_prob) return train_logits, infer_logits tests.test_decoding_layer(decoding_layer) """ Explanation: Build the Decoding Layer Implement decoding_layer() to create a Decoder RNN layer. Embed the target sequences Construct the decoder LSTM cell (just like you constructed the encoder cell above) Create an output layer to map the outputs of the decoder to the elements of our vocabulary Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) function to get the training logits. Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) function to get the inference logits. Note: You'll need to use tf.variable_scope to share variables between training and inference. End of explanation """ def seq2seq_model(input_data, target_data, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sentence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int): """ Build the Sequence-to-Sequence part of the neural network :param input_data: Input placeholder :param target_data: Target placeholder :param keep_prob: Dropout keep probability placeholder :param batch_size: Batch Size :param source_sequence_length: Sequence Lengths of source sequences in the batch :param target_sequence_length: Sequence Lengths of target sequences in the batch :param source_vocab_size: Source vocabulary size :param target_vocab_size: Target vocabulary size :param enc_embedding_size: Decoder embedding size :param dec_embedding_size: Encoder embedding size :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ _, enc_state = encoding_layer( input_data, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, enc_embedding_size) dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size) train_dec_out, infer_dec_out = decoding_layer( dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) return train_dec_out, infer_dec_out tests.test_seq2seq_model(seq2seq_model) """ Explanation: Build the Neural Network Apply the functions you implemented above to: Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size). Process target data using your process_decoder_input(target_data, target_vocab_to_int, batch_size) function. Decode the encoded input using your decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) function. End of explanation """ # Number of Epochs epochs = 20 # Batch Size batch_size = 512 # RNN Size rnn_size = 512 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 64 decoding_embedding_size = 64 # Learning Rate learning_rate = 0.001 # Dropout Keep Probability keep_probability = 0.5 display_step = 50 """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set num_layers to the number of layers. Set encoding_embedding_size to the size of the embedding for the encoder. Set decoding_embedding_size to the size of the embedding for the decoder. Set learning_rate to the learning rate. Set keep_probability to the Dropout keep probability Set display_step to state how many steps between each debug output statement End of explanation """ save_path = 'checkpoints/dev' ((source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _) = helper.load_preprocess() max_target_sentence_length = max([len(sentence) for sentence in source_int_text]) train_graph = tf.Graph() with train_graph.as_default(): (input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length) = model_inputs() #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) training_logits = tf.identity(train_logits.rnn_output, name='logits') inference_logits = tf.identity(inference_logits.sample_id, name='predictions') masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size # Slice the right amount for the batch sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] # Pad pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths """ Explanation: Batch and pad the source and target sequences End of explanation """ def get_accuracy(target, logits): """ Calculate accuracy """ max_seq = max(target.shape[1], logits.shape[1]) if max_seq - target.shape[1]: target = np.pad(target, [(0, 0), (0, max_seq - target.shape[1])], 'constant') if max_seq - logits.shape[1]: logits = np.pad(logits, [(0, 0), (0, max_seq - logits.shape[1])], 'constant') return np.mean(np.equal(target, logits)) # Split data to training and validation sets train_source = source_int_text[batch_size:] train_target = target_int_text[batch_size:] valid_source = source_int_text[:batch_size] valid_target = target_int_text[:batch_size] (valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths) = next( get_batches(valid_source, valid_target, batch_size, source_vocab_to_int[ '<PAD>'], target_vocab_to_int['<PAD>'])) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate( get_batches(train_source, train_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])): _, loss = sess.run([train_op, cost], { input_data: source_batch, targets: target_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths, keep_prob: keep_probability }) if batch_i % display_step == 0 and batch_i > 0: batch_train_logits = sess.run(inference_logits, { input_data: source_batch, source_sequence_length: sources_lengths, target_sequence_length: targets_lengths, keep_prob: 1.0 }) batch_valid_logits = sess.run(inference_logits, { input_data: valid_sources_batch, source_sequence_length: valid_sources_lengths, target_sequence_length: valid_targets_lengths, keep_prob: 1.0 }) train_acc = get_accuracy(target_batch, batch_train_logits) valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits) print( 'Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}' .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_path) print('Model Trained and Saved') print('# Number of Epochs ', epochs) print('# Batch Size ', batch_size) print('# RNN Size ', rnn_size) print('# Number of Layers ', num_layers) print('# Embedding Size ', encoding_embedding_size) print('# Learning Rate ', learning_rate) print('# Dropout Keep Probability ', keep_probability) """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ # Save parameters for checkpoint helper.save_params(save_path) """ Explanation: My conclusions from hyperparameter adjustmensts are that for comparable results: - if you double the batch size, also doouble the number of epochs - if you half the dropout keep probability, also double the network size Save Parameters Save the batch_size and save_path parameters for inference. End of explanation """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess() load_path = helper.load_params() """ Explanation: Checkpoint End of explanation """ def sentence_to_seq(sentence, vocab_to_int): """ Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids """ return [ vocab_to_int[w] if w in vocab_to_int.keys() else vocab_to_int['<UNK>'] for w in sentence.lower().split(' ') ] tests.test_sentence_to_seq(sentence_to_seq) """ Explanation: Sentence to Sequence To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences. Convert the sentence to lowercase Convert words into ids using vocab_to_int Convert words not in the vocabulary, to the &lt;UNK&gt; word id. End of explanation """ translate_sentence = 'he saw a old yellow truck .' translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_path + '.meta') loader.restore(sess, load_path) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size, target_sequence_length: [len(translate_sentence)*2]*batch_size, source_sequence_length: [len(translate_sentence)]*batch_size, keep_prob: 1.0})[0] print('Input') print(' Word Ids: {}'.format([i for i in translate_sentence])) print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in translate_logits])) print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits]))) """ Explanation: Translate This will translate translate_sentence from English to French. End of explanation """ import sys sys.modules.keys() import types lines = !conda list versions = dict() for line in lines[2:]: li = line.split() versions[li[0]] = li[1] def imports(): print('Modules used in the notebook:\n') for val in globals().values(): if isinstance(val, types.ModuleType): name = val.__name__ ver = '' if name in versions: ver = versions[name] # special case for tensorflow-gpu if name + '-gpu' in versions: ver = versions[name + '-gpu'] name = name + '-gpu' print('{:25}{:>10}'.format(name, ver)) imports() """ Explanation: Imperfect Translation You might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data. You can train on the WMT10 French-English corpus. This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project. Submitting This Project When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_language_translation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission. Appendix End of explanation """
kitu2007/dl_class
tv-script-generation/dlnd_tv_script_generation.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper data_dir = './data/simpsons/moes_tavern_lines.txt' text = helper.load_data(data_dir) # Ignore notice, since we don't use it for analysing the data text = text[81:] """ Explanation: TV Script Generation In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern. Get the Data The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc.. End of explanation """ view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) scenes = text.split('\n\n') print('Number of scenes: {}'.format(len(scenes))) sentence_count_scene = [scene.count('\n') for scene in scenes] print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene))) sentences = [sentence for scene in scenes for sentence in scene.split('\n')] print('Number of lines: {}'.format(len(sentences))) word_count_sentence = [len(sentence.split()) for sentence in sentences] print('Average number of words in each line: {}'.format(np.average(word_count_sentence))) print() print('The sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ import numpy as np import problem_unittests as tests from collections import Counter def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ count = Counter(text) vocab = sorted(count, key=count.get, reverse=True) vocab_to_int = {word: ii for ii,word in enumerate(vocab,0)} int_to_vocab = {val:key for key,val in vocab_to_int.items()} return (vocab_to_int, int_to_vocab) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) import re dd = re.split(' |\n',scenes[0]) # split both based on space and \n and could be others """ Explanation: Implement Preprocessing Functions The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below: - Lookup Table - Tokenize Punctuation Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call vocab_to_int - Dictionary to go from the id to word, we'll call int_to_vocab Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab) End of explanation """ from string import punctuation # strips all punctuations.. this is cool! print(punctuation) text1 = 'I am though! but alas _ not enough.' words1 = ''.join([c for c in text1 if c not in punctuation]) #words1 = [word for word in text1.split(" ") if word not in punctuation] words1 def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenize dictionary where the key is the punctuation and the value is the token """ dict1={'.':'||Period||', ',':'||Comma||', '"':'||Quotation-mark||', ';':'||Semicolon||', '!':"||Exclamation-mark||", '?':"||Question-mark||", '(':"||Left-Parentheses||", ')':"||Right-Parentheses||", '--':"||Dash||", '\n':"Return"} return dict1 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) """ Explanation: Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!". Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( . ) - Comma ( , ) - Quotation Mark ( " ) - Semicolon ( ; ) - Exclamation mark ( ! ) - Question mark ( ? ) - Left Parentheses ( ( ) - Right Parentheses ( ) ) - Dash ( -- ) - Return ( \n ) This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||". End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import numpy as np import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) len(vocab_to_int) """ Explanation: Build the Neural Network You'll build the components necessary to build a RNN by implementing the following functions below: - get_inputs - get_init_cell - get_embed - build_rnn - build_nn - get_batches Check the Version of TensorFlow and Access to GPU End of explanation """ def get_inputs(): """ Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate) """ input = tf.placeholder(tf.int32,shape=(None,None),name='input') targets = tf.placeholder(tf.int32,shape=(None,None),name='targets') learning_rate = tf.placeholder(tf.float32,name='learning_rate') # TODO: Implement Function return (input, targets, learning_rate) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_inputs(get_inputs) """ Explanation: Input Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Input text placeholder named "input" using the TF Placeholder name parameter. - Targets placeholder - Learning Rate placeholder Return the placeholders in the following tuple (Input, Targets, LearningRate) End of explanation """ keep_prob = tf.placeholder(tf.float32,name='keep_prob') def get_init_cell(batch_size, rnn_size): """ Create an RNN Cell and initialize it. :param batch_size: Size of batches :param rnn_size: Size of RNNs :return: Tuple (cell, initialize state) """ cell = tf.contrib.rnn.BasicLSTMCell(rnn_size) drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob) cells = [cell, cell] cell = tf.contrib.rnn.MultiRNNCell(cells) # don't fully understand this.. what if I made this a list initialize_state = cell.zero_state(batch_size=batch_size, dtype=tf.float32) initialize_state = tf.identity(initialize_state, name='initial_state') #initialize_state = tf.contrib.rnn.MultiRNNCell.zero_state(batch_size=batch_size,dtype=tf.float32) # TODO: Implement Function return cell, initialize_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_init_cell(get_init_cell) """ Explanation: Build RNN Cell and Initialize Stack one or more BasicLSTMCells in a MultiRNNCell. - The Rnn size should be set using rnn_size - Initalize Cell State using the MultiRNNCell's zero_state() function - Apply the name "initial_state" to the initial state using tf.identity() Return the cell and initial state in the following tuple (Cell, InitialState) End of explanation """ def get_embed(input_data, vocab_size, embed_dim): """ Create embedding for <input_data>. :param input_data: TF placeholder for text input. :param vocab_size: Number of words in vocabulary. :param embed_dim: Number of embedding dimensions :return: Embedded input. """ # TODO: Implement Function word_embedding = tf.Variable(initial_value=tf.random_uniform((vocab_size, embed_dim),-1,1),name='word_embedding') embedded_input = tf.nn.embedding_lookup(word_embedding, input_data) return embedded_input """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_embed(get_embed) """ Explanation: Word Embedding Apply embedding to input_data using TensorFlow. Return the embedded sequence. End of explanation """ def build_rnn(cell, inputs): """ Create a RNN using a RNN Cell :param cell: RNN Cell :param inputs: Input text data :return: Tuple (Outputs, Final State) """ outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name='final_state') # TODO: Implement Function return outputs, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_rnn(build_rnn) """ Explanation: Build RNN You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN. - Build the RNN using the tf.nn.dynamic_rnn() - Apply the name "final_state" to the final state using tf.identity() Return the outputs and final_state state in the following tuple (Outputs, FinalState) End of explanation """ def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim): """ Build part of the neural network :param cell: RNN cell :param rnn_size: Size of rnns :param input_data: Input data :param vocab_size: Vocabulary size :param embed_dim: Number of embedding dimensions :return: Tuple (Logits, FinalState) """ embed_dim = 200 embedded_input = get_embed(input_data, vocab_size, embed_dim) outputs, final_state = build_rnn(cell, embedded_input) #output_weight = tf.Variable(tf.truncated_normal((vocab_size,rnn_size)),name='output_weights') logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None) #ipdb.set_trace() # initial state to the RNN is optional return logits, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_nn(build_nn) def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ effective_len = len(int_text) - 2*seq_length - 2 num_batches = int( effective_len /(batch_size*seq_length)) ind = 0 input1 = np.zeros((num_batches,2,batch_size,seq_length),dtype=np.int32) for j in range(batch_size): for i in range(num_batches): input1[i][0][j] = int_text[ind:ind+seq_length] input1[i][1][j] = int_text[ind+1:ind+seq_length+1] ind += seq_length return input1 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_batches(get_batches) """ Explanation: Build the Neural Network Apply the functions you implemented above to: - Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function. - Build RNN using cell and your build_rnn(cell, inputs) function. - Apply a fully connected layer with a linear activation and vocab_size as the number of outputs. Return the logits and final state in the following tuple (Logits, FinalState) End of explanation """ # Number of Epochs num_epochs = 10 # Batch Size batch_size = 64 # RNN Size rnn_size = 128 # Embedding Dimension Size embed_dim = None # Sequence Length seq_length = 11 # Learning Rate learning_rate = 0.01 # Show stats for every n number of batches show_every_n_batches = 10 keep_prob = 0.5 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ save_dir = './save' """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set num_epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set embed_dim to the size of the embedding. Set seq_length to the length of sequence. Set learning_rate to the learning rate. Set show_every_n_batches to the number of batches the neural network should print progress. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from tensorflow.contrib import seq2seq train_graph = tf.Graph() with train_graph.as_default(): vocab_size = len(int_to_vocab) input_text, targets, lr = get_inputs() input_data_shape = tf.shape(input_text) cell, initial_state = get_init_cell(input_data_shape[0], rnn_size) logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function cost = seq2seq.sequence_loss( logits, targets, tf.ones([input_data_shape[0], input_data_shape[1]])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ batches = get_batches(int_text, batch_size, seq_length) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(num_epochs): state = sess.run(initial_state, {input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { input_text: x, targets: y, initial_state: state, lr: learning_rate} train_loss, state, _ = sess.run([cost, final_state, train_op], feed) # Show every <show_every_n_batches> batches if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0: print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( epoch_i, batch_i, len(batches), train_loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_dir) print('Model Trained and Saved') """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params((seq_length, save_dir)) seq_length """ Explanation: Save Parameters Save seq_length and save_dir for generating a new TV script. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() seq_length, load_dir = helper.load_params() """ Explanation: Checkpoint End of explanation """ def get_tensors(loaded_graph): """ Get input, initial state, final state, and probabilities tensor from <loaded_graph> :param loaded_graph: TensorFlow graph loaded from file :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) """ input_tensor = loaded_graph.get_tensor_by_name('input:0') initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0') final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0') prob_tensor = loaded_graph.get_tensor_by_name('probs:0') # TODO: Implement Function return input_tensor, initial_state_tensor, final_state_tensor, prob_tensor """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_tensors(get_tensors) """ Explanation: Implement Generate Functions Get Tensors Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names: - "input:0" - "initial_state:0" - "final_state:0" - "probs:0" Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) End of explanation """ def pick_word(probabilities, int_to_vocab): """ Pick the next word in the generated text :param probabilities: Probabilites of the next word :param int_to_vocab: Dictionary of word ids as the keys and words as the values :return: String of the predicted word """ ind = np.argmax(probabilities) return int_to_vocab[ind] """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_pick_word(pick_word) """ Explanation: Choose Word Implement the pick_word() function to select the next word using probabilities. End of explanation """ gen_length = 200 # homer_simpson, moe_szyslak, or Barney_Gumble prime_word = 'moe_szyslak' """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_dir + '.meta') loader.restore(sess, load_dir) # Get Tensors from loaded model input_text, initial_state, final_state, probs = get_tensors(loaded_graph) # Sentences generation setup gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {input_text: np.array([[1]])}) # Generate sentences for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [probs, final_state], {input_text: dyn_input, initial_state: prev_state}) pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\n ', '\n') tv_script = tv_script.replace('( ', '(') print(tv_script) """ Explanation: Generate TV Script This will generate the TV script for you. Set gen_length to the length of TV script you want to generate. End of explanation """
Sebbenbear/notebooks
InterviewCake Questions.ipynb
apache-2.0
from functools import reduce def get_products_of_all_ints_except_at_index(arr): results = [] if len(arr) < 2: raise Exception("Arrays too short, can't do it") for index, value in enumerate(arr): new_array = arr[0:index] + arr[index+1:] product = reduce((lambda x, y: x * y), new_array) results.append(product) return results arr = [1, 7, 3, 4] print(get_products_of_all_ints_except_at_index(arr)) # arr = [1] # print(get_products_of_all_ints_except_at_index(arr)) arr = [1, 2] print(get_products_of_all_ints_except_at_index(arr)) """ Explanation: You have a list of integers, and for each index you want to find the product of every integer except the integer at that index. Write a function get_products_of_all_ints_except_at_index() that takes a list of integers and returns a list of the products. For example, given: [1, 7, 3, 4] your function would return: [84, 12, 28, 21] by calculating: [7 * 3 * 4, 1 * 3 * 4, 1 * 7 * 4, 1 * 7 * 3] Do not use division in your solution. End of explanation """ from IPython.core.display import Image, display from IPython.display import Image, Markdown import random def get_max_profit(stock_prices): '''returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday. have to buy before selling O(n^2) Solution ''' max_profit = 0 for inner_index in range(len(stock_prices)): for outer_index in range(len(stock_prices)): earlier_time = min(inner_index, outer_index) later_time = max(inner_index, outer_index) # Get the earlier/later prices for correct ordering earlier_price = stock_prices[earlier_time] later_price = stock_prices[later_time] potential_profit = later_price - earlier_price max_profit = max(max_profit, potential_profit) return max_profit stock_prices_yesterday = [] print(get_max_profit(stock_prices_yesterday) == 0) stock_prices_yesterday = [6] print(get_max_profit(stock_prices_yesterday) == 0) stock_prices_yesterday = [10, 7, 5, 8, 11, 9] print(get_max_profit(stock_prices_yesterday) == 6) def get_max_profit(stock_prices): '''returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday. have to buy before selling O(n^2) Solution ''' max_profit = 0 for inner_index in range(len(stock_prices)): for outer_index in range(len(stock_prices[inner_index:])): earlier_time = min(inner_index, outer_index) later_time = max(inner_index, outer_index) # Get the earlier/later prices for correct ordering earlier_price = stock_prices[earlier_time] later_price = stock_prices[later_time] potential_profit = later_price - earlier_price max_profit = max(max_profit, potential_profit) return max_profit stock_prices_yesterday = [] print(get_max_profit(stock_prices_yesterday) == 0) stock_prices_yesterday = [6] print(get_max_profit(stock_prices_yesterday) == 0) stock_prices_yesterday = [10, 7, 5, 8, 11, 9] print(get_max_profit(stock_prices_yesterday) == 6) #incorrect """ Explanation: Apple Stocks Write an efficient function that takes stock_prices_yesterday and returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday. End of explanation """ def get_max_profit(stock_prices): '''returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday. have to buy before selling. Came up with this on my own! Runtime is O(n) ''' if len(stock_prices) < 2: return 0 min_buy = stock_prices[0] max_sell = stock_prices[1] print(min_buy, max_sell) for time in range(len(stock_prices)-1): if time == 0: continue buy = stock_prices[time] sell = stock_prices[time+1] min_buy = min(min_buy, buy) max_sell = max(max_sell, sell) print(min_buy, max_sell) return max_sell - min_buy # stock_prices_yesterday = [] # print(get_max_profit(stock_prices_yesterday) == 0) # stock_prices_yesterday = [6] # print(get_max_profit(stock_prices_yesterday) == 0) stock_prices_yesterday = [10, 7, 5, 8, 11, 9] print(get_max_profit(stock_prices_yesterday)) # stock_prices_yesterday = [4, 3, 2, 1] # print(get_max_profit(stock_prices_yesterday)) # stock_prices_yesterday = [4, 4, 4, 4] # print(get_max_profit(stock_prices_yesterday)) def get_max_profit(stock_prices_yesterday): # make sure we have at least 2 prices if len(stock_prices_yesterday) < 2: raise IndexError('Getting a profit requires at least 2 prices') # we'll greedily update min_price and max_profit, so we initialize # them to the first price and the first possible profit min_price = stock_prices_yesterday[0] max_profit = stock_prices_yesterday[1] - stock_prices_yesterday[0] for index, current_price in enumerate(stock_prices_yesterday): # skip the first (0th) time # we can't sell at the first time, since we must buy first, # and we can't buy and sell at the same time! # if we took this out, we'd try to buy *and* sell at time 0. # this would give a profit of 0, which is a problem if our # max_profit is supposed to be *negative*--we'd return 0. if index == 0: continue # see what our profit would be if we bought at the # min price and sold at the current price potential_profit = current_price - min_price # update max_profit if we can do better max_profit = max(max_profit, potential_profit) # update min_price so it's always # the lowest price we've seen so far min_price = min(min_price, current_price) return max_profit stock_prices_yesterday = [10, 7, 5, 8, 11, 9] print(get_max_profit(stock_prices_yesterday)) stock_prices_yesterday = [4, 3, 2, 1] print(get_max_profit(stock_prices_yesterday)) stock_prices_yesterday = [4, 4, 4, 4] print(get_max_profit(stock_prices_yesterday)) """ Explanation: Need to do better than O(n^2), so it'll probably be either O(n log n) or O(n). Let's try a greedy approach. End of explanation """
lguarneros/fimda
dinamica-2puentes.ipynb
gpl-3.0
%matplotlib inline import numpy as np import pylab as pl import matplotlib.patches as mpatches import matplotlib.ticker as ticker import os import shutil from IPython.display import Image from matplotlib.ticker import FormatStrFormatter """ Explanation: FIMDA Script que realiza el análisis de dinámica para una trayectoria con 2 puentes di sulfuro. El presente script genera los archivos resultantes del análisis de una trayectoria haciendo uso: Gromacs 5 y Gromacs 4 Xmgrace VMD Chimera catdcd trjconv. Se deberá contar con la trayectoria que contenga el rmsd más bajo para realizar el análisis sobre ella. Librerías a utilizar End of explanation """ ruta=os.getcwd() c=input('Nombre de la trayectoria para realizar el análisis... Ejemplo: run001....') if os.path.isdir(c): indir = '/'+c print (indir) ruta_old_traj=ruta+indir print (ruta) print (ruta_old_traj) else: print ('La carpetac'+c+' no existe...') # ruta_scripts=ruta+'/scripts_fimda' print (ruta_scripts) if os.path.exists(ruta_scripts): print ('Ruta identificada para búsqueda de scripst adicionales ===>',ruta_scripts) else: print ('La carpeta de scripst adicionales no existe, copiar en '+ruta_scripts+' ..!!!') """ Explanation: Ruta de la trayectoria Escribir después de la diagonal, la ruta de la trayectoria seleccionada con el rmsd más bajo. End of explanation """ #Verificando que exista la nueva carpeta para la conversión de trayectorias #nuevaruta = ruta+'/'+indir+'_XTC' nuevaruta = ruta+indir+'_Dinamica' print ( nuevaruta ) if not os.path.exists(nuevaruta): os.makedirs(nuevaruta) print ('Se ha creado la ruta ===>',nuevaruta) else: print ("La ruta "+nuevaruta+" existe..!!!") """ Explanation: Convirtiendo la trayectoria DCD -> XTC Los siguientes comandos convierten la trayectoria DCD contenida en la carpeta seleccionada a formato de XTC Crear la nueva ruta para enviar las trayectorias convertidas End of explanation """ print ('Obtenemos los archivos a convertir') #Buscamos el archivo DCD, PDB y PSF para realizar las operaciones for filename in os.listdir(ruta_old_traj): if filename.endswith('.dcd'): dcd_file=filename if filename.endswith('.psf'): psf_file=filename if filename.endswith('.pdb'): pdb_file=filename print ('pdb file =>', pdb_file) print ('psf file =>', psf_file) print ('dcd file =>', dcd_file) print ( 'Nos vemos a ....', ruta_old_traj ) os.chdir( ruta_old_traj ) print ('\nEjecutando CATDCD para convertir la trayectoria....') output_catdcd=!catdcd -otype trr -o output.trr $dcd_file print (output_catdcd.n) print ('\nEjecutando TRJCONV para convertir la trayectoria....') output_trjconv=!trjconv -f output.trr -o output.xtc -timestep 20 #print (output_trjconv.n) print ('\nBorrando archivos temporales de conversión...') output_rm=!rm output.trr print ('\nMoviendo los archivos de salida al directorio '+nuevaruta) source_file=ruta_old_traj+'/output.xtc' dest_file=nuevaruta+'/output.xtc' shutil.move(source_file,dest_file) print ('\Copiando el archivo ionized.pdb a '+nuevaruta) source_file=ruta_old_traj+'/ionized.pdb' dest_file=nuevaruta+'/ionized.pdb' shutil.copy(source_file,dest_file) print ('\nCopiando el archivo ionized.psf a '+nuevaruta) source_file=ruta_old_traj+'/ionized.psf' dest_file=nuevaruta+'/ionized.psf' shutil.copy(source_file,dest_file) print('\nTrayectoria convertida, regresando a '+ruta) os.chdir( ruta ) """ Explanation: Realizando la conversión de la trayectoria End of explanation """ print ('Visualizando la nueva trayectoria') file_psf=nuevaruta+'/'+psf_file traj = nuevaruta+'/output.xtc' !vmd $file_psf $traj """ Explanation: Cargando la nueva trayectoria en VMD para su revisión End of explanation """ ### Creando el directorio para el análisis del RMSD #Verificando que exista la nueva carpeta para la conversión de trayectorias #nuevaruta = ruta+'/'+indir+'_XTC' ruta_rmsd = nuevaruta+'/rmsd' print ( ruta_rmsd ) if not os.path.exists(ruta_rmsd): os.makedirs(ruta_rmsd) print ('Se ha creado la ruta ===>',ruta_rmsd) else: print ("La ruta "+ruta_rmsd+" existe..!!!") print ( 'Nos vamos a ....', ruta_rmsd ) os.chdir( ruta_rmsd ) """ Explanation: Calculando el RMSD con Gromacs 5 El siguiente script obtiene el RMSD de la trayectoria haciendo uso de Gromacs 5 Creando la carpeta de RMSD End of explanation """ print ('Ejecutando el análisis de rmsd...') !echo 3 3 | g_rms -f ../output.xtc -s ../ionized.pdb -a avgrp.xvg """ Explanation: Calculando el RMSD con la opción 3 'C-Alpha' Select group for least squares fit Group 3 ( C-alpha) Select a group: 3 Selected 3: 'C-alpha' Select group for RMSD calculation Group 3 ( C-alpha) Select a group: 3 Selected 3: 'C-alpha' End of explanation """ #Inicializando vector rmsd=[] try: archivo = open( 'rmsd.xvg' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in archivo.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (not '#' in cadena) and (not '@' in cadena): num=float(sl[0]) #num2=float(sl[1]) num=num/1000 rmsd.append(repr(num)+'\t'+sl[1]+'\n') i=i+1 #Escribiendo el archivo RMSD f = open('rmsd.dat', 'w') #f.write('@ title "RMSD" \n') f.write('@ xaxis label " Time (ns)" \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label " RMSD (nm)" \n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 1.5\n') f.write('@TYPE xy \n') #f.write('@ subtitle "C-alpha after lsq fit to C-alpha" \n') f.write("".join(rmsd)) f.close() #Cargando el archivo para visualizar en xmgrace !xmgrace rmsd.dat #Cargando la imagen generada en xmgrace Image(filename='rmsd.png') """ Explanation: Creando archivo rmsd.dat para su visualización en XMGRACE Se genera el archivo de salida rmsd.dat, éste se deberá visualizar con Xmgrace para guardarlo en formato PNG. End of explanation """ #Inicializando vector rmsd_residue=[] try: archivo_rmsd = open( 'aver.xvg' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=1 for linea in archivo_rmsd.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (not '#' in cadena) and (not '@' in cadena): num=int(sl[0]) print ('Residuo =>',num+1) rmsd_residue.append(repr(num+1)+'\t'+sl[1]+'\n') i=i+1 #Escribiendo el archivo RMSD_RESIDUE f = open('rmsd_residue.dat', 'w') #f.write('@ title "C-alpha" \n') f.write('@ xaxis label "Residue" \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label " RMSD (nm)" \n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 2.5\n') f.write('@ s0 symbol 1\n') f.write('@ s0 symbol size 1.000000\n') f.write('@ s0 symbol color 1\n') f.write('@ s0 symbol pattern 1\n') f.write('@ s0 symbol fill color 2\n') f.write('@ s0 symbol fill pattern 1\n') f.write('@ s0 symbol linewidth 1.0\n') f.write('@TYPE xy \n') f.write("".join(rmsd_residue)) f.close() !xmgrace rmsd_residue.dat #Cargando la imagen generada en xmgrace Image(filename='rmsd_residue.png') """ Explanation: Creando el archivo rmsd_residue.dat para visualizar con xmgrace Se crea el archivo rmsd_residue.dat formateado para su visualización en Xmgrace, en donde se deberá guardar como imagen PNG. End of explanation """ data_rmsd=np.loadtxt('rmsd.xvg',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) pl.plot(data_rmsd[:,0]/1000, data_rmsd[:,1], linewidth = 2, markeredgewidth=3, color='black') pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('RMSD (nm)', fontsize = 40) #pl.suptitle('RMSD', fontsize=50) #pl.title('C-alpha after lsq fit to C-alpha', fontsize=30) pl.xticks(fontsize=30) pl.yticks(fontsize=30) """ Explanation: Creando archivo rmsd.dat para su visualización en Matplotlib Se genera el gráfico de salida para matplotlib End of explanation """ data_rmsd_res=np.loadtxt('aver.xvg',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) pl.plot(data_rmsd_res[:,0]+1, data_rmsd_res[:,1], '-o', color='black', markersize=25, markerfacecolor='red',markeredgecolor='black',markeredgewidth=3, linewidth = 4, ) pl.xlabel("Residue", fontsize = 40) pl.ylabel('RMSD (nm)', fontsize = 40) #pl.title('C-alpha', fontsize=40) pl.xticks(fontsize=30) pl.yticks(fontsize=30) pl.xlim(0, len(data_rmsd_res[:,1])) """ Explanation: Creando archivo rmsd_residue.dat para su visualización en Matplotlib Se genera el gráfico de salida para matplotlib End of explanation """ ### Creando el directorio para el análisis del RMSF #Verificando que exista la nueva carpeta para la conversión de trayectorias ruta_rmsf = nuevaruta+'/rmsf' print ( ruta_rmsf ) if not os.path.exists(ruta_rmsf): os.makedirs(ruta_rmsf) print ('Se ha creado la ruta ===>',ruta_rmsf) else: print ("La ruta "+ruta_rmsf+" existe..!!!") print ( 'Nos vamos a ....', ruta_rmsf ) os.chdir( ruta_rmsf ) """ Explanation: RMSF Se crea una carpeta RMSF para guardar los archivos generados. End of explanation """ print ('Ejecutando el análisis de rmsf...') !echo 3 | g_rmsf -f ../output.xtc -s ../ionized.pdb -oq bfac.pdb -o rmsf.xvg -res """ Explanation: Calculando el RMSF con la opción 3 'C-Alpha' End of explanation """ #Inicializando vector rmsf=[] rmsf_x=[] rmsf_y=[] try: file_rmsf = open( 'rmsf.xvg' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in file_rmsf.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (not '#' in cadena) and (not '@' in cadena): print ('Residue =>',cadena) rmsf.append(sl[0]+'\t'+sl[1]+'\n') rmsf_x.append(int(sl[0])) rmsf_y.append(float(sl[1])) i=i+1 file_rmsf.close() #Escribiendo el archivo RMSD f = open('rmsf.dat', 'w') #f.write('@ title "RMSF fluctuation" \n') f.write('@ xaxis label " Residue" \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label "RMSF (nm)" \n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 2.5\n') f.write('@ s0 symbol 1\n') f.write('@ s0 symbol size 1.000000\n') f.write('@ s0 symbol color 1\n') f.write('@ s0 symbol pattern 1\n') f.write('@ s0 symbol fill color 2\n') f.write('@ s0 symbol fill pattern 1\n') f.write('@ s0 symbol linewidth 1.0\n') f.write('@TYPE xy \n') f.write("".join(rmsf)) f.close() !xmgrace rmsf.dat #Cargando la imagen generada en xmgrace Image(filename='rmsf.png') """ Explanation: Creando archivo rmsf.dat para su visualización en XMGRACE Se genera el archivo de salida rmsf.dat, éste se deberá visualizar con Xmgrace para guardarlo en formato PNG. End of explanation """ data_rmsf=np.loadtxt('rmsf.xvg',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) pl.plot(data_rmsf[:,0], data_rmsf[:,1], '-o', color='black', markersize=25, markerfacecolor='red',markeredgecolor='black',markeredgewidth=3, linewidth = 4, ) pl.xlabel("Residue", fontsize = 40) pl.ylabel('RMSF (nm)', fontsize = 40) #pl.title('RMSF Fluctuation', fontsize=40) pl.xticks(fontsize=30) pl.yticks(fontsize=30) pl.xlim(0, len(data_rmsf[:,1])) """ Explanation: Creando archivo rmsf.dat para su visualización en Matplotlib Se genera el gráfico de salida para matplotlib End of explanation """ #Inicializando vector bfactors=[] try: file_bfactor = open( 'bfac.pdb' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in file_bfactor.readlines(): fila = linea.strip() sl = fila.split() if (sl[0]=='ATOM'): #print (sl[0]) idresidue=fila[23:26] bfactor=fila[60:66] print (idresidue + '\t'+bfactor) bfactors.append(idresidue+'\t'+bfactor+'\n') #i=i+1 #Escribiendo el archivo BFACTOR.dat f = open('bfactor.dat', 'w') #f.write('@ title "B-factors" \n') foo = 'baz "\\"' f.write('@ xaxis label " Residue" \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label "B-factors (' +"\\"+'cE'+"\\"+'C)"\n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 2.5\n') f.write('@ s0 symbol 1\n') f.write('@ s0 symbol size 1.000000\n') f.write('@ s0 symbol color 1\n') f.write('@ s0 symbol pattern 1\n') f.write('@ s0 symbol fill color 2\n') f.write('@ s0 symbol fill pattern 1\n') f.write('@ s0 symbol linewidth 1.0\n') f.write('@TYPE xy \n') f.write("".join(bfactors)) f.close() !xmgrace bfactor.dat #Cargando la imagen generada en xmgrace Image(filename='bfactor.png') """ Explanation: B-factors Generando archivo para visualizarlo con XMGRACE End of explanation """ #Inicializando vector bfactors=[] try: file_bfactor = open( 'bfac.pdb' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 print ('Residuo' + '\t'+'bfactor') for linea in file_bfactor.readlines(): fila = linea.strip() sl = fila.split() if (sl[0]=='ATOM'): #print (sl[0]) idresidue=fila[23:26] bfactor=fila[60:66] print (idresidue + '\t'+bfactor) bfactors.append(idresidue+'\t'+bfactor+'\n') #i=i+1 #Escribiendo el archivo BFACTOR.dat f = open('bfactor.dat', 'w') f.write("".join(bfactors)) f.close() data_bfactor=np.loadtxt('bfactor.dat',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes #ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) pl.plot(data_bfactor[:,0], data_bfactor[:,1], '-o', color='black', markersize=25, markerfacecolor='red',markeredgecolor='black',markeredgewidth=3, linewidth = 4, ) pl.xlabel('Residue', fontsize = 40) pl.ylabel('B-factors ('+ r'$\AA$'+')' , fontsize = 40) #pl.title('B-Factors', fontsize=40) pl.xticks(fontsize=30) pl.yticks(fontsize=30) pl.xlim(0, len(data_bfactor[:,1])) """ Explanation: Generando archivo para visualizar con Matplotlib End of explanation """ ### Creando el directorio para el análisis del RMSF #Verificando que exista la nueva carpeta para la conversión de trayectorias ruta_ss = nuevaruta+'/estructura' print ( ruta_ss ) if not os.path.exists(ruta_ss): os.makedirs(ruta_ss) print ('Se ha creado la ruta ===>',ruta_ss) else: print ("La ruta "+ruta_ss+" existe..!!!") print ( 'Nos vamos a ....', ruta_ss ) os.chdir( ruta_ss ) """ Explanation: Secondary Structure Se crea la carpeta para cálculo de la estructura End of explanation """ print ('Ejecutando el análisis de esctructura secundaria...') !echo 5 | do_dssp -f ../output.xtc -s ../ionized.pdb -o sec_est.xpm -tu ns print ('\n Convirtiendo el archivo a ps...') !xpm2ps -f sec_est.xpm -by 6 -bx .1 -o est_sec.eps print('\nConvirtiendo a png...') !convert -density 600 est_sec.eps -resize 1024x1024 est_sec.png print ('Cargando el archivo...') Image(filename='est_sec.png', width=1024) """ Explanation: Calculando la estructura secundaria Se necesita contar con el programa dssp en la ruta /usr/local/bin, el cual se enlaza con Gromacs 5 End of explanation """ ### Creando el directorio para el análisis del r-gyro #Verificando que exista la nueva carpeta para la conversión de trayectorias ruta_rgyro = nuevaruta+'/rgyro' print ( ruta_rgyro ) if not os.path.exists(ruta_rgyro): os.makedirs(ruta_rgyro) print ('Se ha creado la ruta ===>',ruta_rgyro) else: print ("La ruta "+ruta_rgyro+" existe..!!!") print ( 'Nos vamos a ....', ruta_rgyro) os.chdir( ruta_rgyro ) """ Explanation: R-GYRATE Se crea una carpeta rgiro para guardar los archivos generados. End of explanation """ print ('Ejecutando el análisis de rgyro...') !echo 3 | g_gyrate -f ../output.xtc -s ../ionized.pdb -o gyrate.xvg """ Explanation: Calculando el r-gyro con la opción (3) - C-alpha Se calcula para los carbonos alfa. End of explanation """ #Inicializando vector rgyro=[] try: file_rmsf = open( 'gyrate.xvg' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in file_rmsf.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (not '#' in cadena) and (not '@' in cadena): num=float(sl[0]) #num2=float(sl[1]) num=num/1000 rgyro.append(repr(num)+'\t'+sl[1]+'\n') i=i+1 #Escribiendo el archivo RGYRO.DAT f = open('rgyro.dat', 'w') #f.write('@ title "Radius of gyration" \n') f.write('@ xaxis label " Time (ns)" \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label "Rg (nm)" \n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 2.5\n') f.write('@TYPE xy \n') f.write("".join(rgyro)) f.close() !xmgrace rgyro.dat #Cargando la imagen generada en xmgrace Image(filename='rgyro.png') """ Explanation: Generando el archivo rgyro.dat para su análisis con XMGRACE End of explanation """ data_rgyro=np.loadtxt('gyrate.xvg',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) pl.plot(data_rgyro[:,0]/1000, data_rgyro[:,1], linewidth = 2, color='black') pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('Rg (nm)', fontsize = 40) #pl.suptitle('Radius of gyration', fontsize=50) pl.xticks(fontsize=30) pl.yticks(fontsize=30) """ Explanation: Ploteando el archivo gyrate.xvg con matplotlib End of explanation """ ### Creando el directorio para el análisis del RMSF #Verificando que exista la nueva carpeta para la conversión de trayectorias ruta_helix = nuevaruta+'/rmsd_helix' print ( ruta_helix ) if not os.path.exists(ruta_helix): os.makedirs(ruta_helix) print ('Se ha creado la ruta ===>',ruta_helix) else: print ("La ruta "+ruta_helix+" existe..!!!") print ( 'Nos vamos a ....', ruta_helix) os.chdir( ruta_helix ) """ Explanation: RMSD Helix Alfa Para realizar este análisis se debe cargar el pdb original de la proteina que se encuentra en la carpeta 01_BUILD. Cargarlo con VMD y dirigirse al Menú EXTENSIONS -> ANALYSIS -> SEQUENCE VIEWER, en la cual se tomará el rango de átomos del campo Struct (H), el cual se proporcionará de la forma "resid X1 to X2" donde X1 es primer átomo de la helix y X2 el último átomo de la helix. End of explanation """ num=input('Número de hélices con las que cuenta la proteína:') print (num) if (int(num)==1): indices_ha1=input('Proporciona el rango de índices de la Hélice 1:') print (indices_ha1) r_helix_1=1 r_helix_2=0 r_helix_3=0 r_helix_4=0 if (int(num)==2): indices_ha1=input('Proporciona el rango de índices de la Hélice 1:') print (indices_ha1) indices_ha2=input('Proporciona el rango de índices de la Hélice 2:') print (indices_ha2) r_helix_1=1 r_helix_2=1 r_helix_3=0 r_helix_4=0 if (int(num)==3): indices_ha1=input('Proporciona el rango de índices de la Hélice 1:') print (indices_ha1) indices_ha2=input('Proporciona el rango de índices de la Hélice 2:') print (indices_ha2) indices_ha3=input('Proporciona el rango de índices de la Hélice 3:') print (indices_ha3) r_helix_1=1 r_helix_2=1 r_helix_3=1 r_helix_4=0 if (int(num)==4): indices_ha1=input('Proporciona el rango de índices de la Hélice 1:') print (indices_ha1) indices_ha2=input('Proporciona el rango de índices de la Hélice 2:') print (indices_ha2) indices_ha3=input('Proporciona el rango de índices de la Hélice 3:') print (indices_ha3) indices_ha4=input('Proporciona el rango de índices de la Hélice 4:') print (indices_ha4) r_helix_1=1 r_helix_2=1 r_helix_3=1 r_helix_4=1 #Script para vmd de la Hélice Alfa 2 psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file if (r_helix_1==1): f = open('ha1.tcl', 'w') print(f) f.write('set psfFile '+ psf+' \n') f.write('set dcdFile '+ dcd+' \n') f.write('\nmol load psf $psfFile dcd $dcdFile\n') f.write('set outfile ' +'[open ' +'rmsd_ha1.dat'+' w]\n') f.write('set nf [molinfo top get numframes]\n') f.write('\n#RMSD calculation loop\n') f.write('set f1 [atomselect top "'+indices_ha1+' " frame 0]\n') f.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f.write(' set sel [atomselect top "'+indices_ha1+' " frame $i]\n') f.write(' $sel move [measure fit $sel $f1]\n') f.write(' set time [expr {$i +1}]\n') f.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f.write(' puts $outfile "$time $time"\n') f.write('}\n') f.write('close $outfile') f.close() if (r_helix_2==1): f = open('ha2.tcl', 'w') print(f) f.write('set psfFile '+ psf+' \n') f.write('set dcdFile '+ dcd+' \n') f.write('\nmol load psf $psfFile dcd $dcdFile\n') f.write('set outfile ' +'[open ' +'rmsd_ha2.dat'+' w]\n') f.write('set nf [molinfo top get numframes]\n') f.write('\n#RMSD calculation loop\n') f.write('set f1 [atomselect top "'+indices_ha2+' " frame 0]\n') f.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f.write(' set sel [atomselect top "'+indices_ha2+' " frame $i]\n') f.write(' $sel move [measure fit $sel $f1]\n') f.write(' set time [expr {$i +1}]\n') f.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f.write(' puts $outfile "$time $time"\n') f.write('}\n') f.write('close $outfile') f.close() if (r_helix_3==1): f = open('ha3.tcl', 'w') print(f) f.write('set psfFile '+ psf+' \n') f.write('set dcdFile '+ dcd+' \n') f.write('\nmol load psf $psfFile dcd $dcdFile\n') f.write('set outfile ' +'[open ' +'rmsd_ha3.dat'+' w]\n') f.write('set nf [molinfo top get numframes]\n') f.write('\n#RMSD calculation loop\n') f.write('set f1 [atomselect top "'+indices_ha3+' " frame 0]\n') f.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f.write(' set sel [atomselect top "'+indices_ha3+' " frame $i]\n') f.write(' $sel move [measure fit $sel $f1]\n') f.write(' set time [expr {$i +1}]\n') f.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f.write(' puts $outfile "$time $time"\n') f.write('}\n') f.write('close $outfile') f.close() if (r_helix_4==1): f = open('ha4.tcl', 'w') print(f) f.write('set psfFile '+ psf+' \n') f.write('set dcdFile '+ dcd+' \n') f.write('\nmol load psf $psfFile dcd $dcdFile\n') f.write('set outfile ' +'[open ' +'rmsd_ha4.dat'+' w]\n') f.write('set nf [molinfo top get numframes]\n') f.write('\n#RMSD calculation loop\n') f.write('set f1 [atomselect top "'+indices_ha4+' " frame 0]\n') f.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f.write(' set sel [atomselect top "'+indices_ha4+' " frame $i]\n') f.write(' $sel move [measure fit $sel $f1]\n') f.write(' set time [expr {$i +1}]\n') f.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f.write(' puts $outfile "$time $time"\n') f.write('}\n') f.write('close $outfile') f.close() if (r_helix_1==1): #Calculando con VMD hélice 1 !vmd -dispdev text < ha1.tcl if (r_helix_2==1): #Calculando con VMD hélice 2 !vmd -dispdev text < ha2.tcl if (r_helix_3==1): #Calculando con VMD hélice 3 !vmd -dispdev text < ha3.tcl if (r_helix_4==1): #Calculando con VMD hélice 4 !vmd -dispdev text < ha4.tcl if (int(num)==1): #Graficando data_ha1=np.loadtxt('rmsd_ha1.dat',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) #pl.plot(data_ha1[:,0], data_ha1[:,1], linewidth = 3) pl.plot(data_ha1[:,1]*0.02, data_ha1[:,0]/10, linewidth = 3, color='black') pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('RMSD (nm)', fontsize = 40) #pl.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #pl.title('RMSD Helix Alfa', fontsize=50) pl.xticks(fontsize=30) pl.yticks(fontsize=30) if (int(num)==2): #Graficando data_ha1=np.loadtxt('rmsd_ha1.dat',comments=['#', '@']) data_ha2=np.loadtxt('rmsd_ha2.dat',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) #pl.plot(data_ha1[:,0], data_ha1[:,1], linewidth = 3) pl.plot(data_ha1[:,1]*0.02, data_ha1[:,0]/10, linewidth = 3, color='black') pl.plot(data_ha2[:,1]*0.02, data_ha2[:,0]/10, linewidth = 3, color='red') pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('RMSD (nm)', fontsize = 40) #pl.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #pl.title('RMSD Helix Alfa', fontsize=50) pl.xticks(fontsize=30) pl.yticks(fontsize=30) if (int(num)==3): #Graficando data_ha1=np.loadtxt('rmsd_ha1.dat',comments=['#', '@']) data_ha2=np.loadtxt('rmsd_ha2.dat',comments=['#', '@']) data_ha3=np.loadtxt('rmsd_ha3.dat',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) #pl.plot(data_ha1[:,0], data_ha1[:,1], linewidth = 3) pl.plot(data_ha1[:,1]*0.02, data_ha1[:,0]/10, linewidth = 3, color='black') pl.plot(data_ha2[:,1]*0.02, data_ha2[:,0]/10, linewidth = 3, color='red') pl.plot(data_ha3[:,1]*0.02, data_ha3[:,0]/10, linewidth = 3, color='green') pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('RMSD (nm)', fontsize = 40) #pl.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #pl.title('RMSD Helix Alfa', fontsize=50) pl.xticks(fontsize=30) pl.yticks(fontsize=30) if (int(num)==4): #Graficando data_ha1=np.loadtxt('rmsd_ha1.dat',comments=['#', '@']) data_ha2=np.loadtxt('rmsd_ha2.dat',comments=['#', '@']) data_ha3=np.loadtxt('rmsd_ha3.dat',comments=['#', '@']) data_ha4=np.loadtxt('rmsd_ha4.dat',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f')) #pl.plot(data_ha1[:,0], data_ha1[:,1], linewidth = 3) pl.plot(data_ha1[:,1]*0.02, data_ha1[:,0]/10, linewidth = 3, color='black') pl.plot(data_ha2[:,1]*0.02, data_ha2[:,0]/10, linewidth = 3, color='red') pl.plot(data_ha3[:,1]*0.02, data_ha3[:,0]/10, linewidth = 3, color='green') pl.plot(data_ha4[:,1]*0.02, data_ha4[:,0]/10, linewidth = 3, color='blue') pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('RMSD (A)', fontsize = 40) #pl.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #pl.title('RMSD Helix Alfa', fontsize=50) pl.xticks(fontsize=30) pl.yticks(fontsize=30) """ Explanation: Entrada de datos Para la entrada se deberá dar con la opción "resid X to X". End of explanation """ ### Creando el directorio para el análisis del SASA ### NOTA: se calcula con gromacs4 ya que arroja bien los resultados comparado con gromacs5 ruta_sasa = nuevaruta+'/sasa' print ( ruta_sasa ) if not os.path.exists(ruta_sasa): os.makedirs(ruta_sasa) print ('Se ha creado la ruta ===>',ruta_sasa) else: print ("La ruta "+ruta_sasa+" existe..!!!") print ( 'Nos vamos a ....', ruta_sasa ) os.chdir( ruta_sasa ) """ Explanation: SASA Creando la estructura de carpeta para el cálculo End of explanation """ print ('Ejecutando el análisis de sasa con Gromacs 4 utilizando la opción 1 (protein)...') !echo 1 1 | /opt/gromacs4/bin/g_sas -f ../output.xtc -s ../ionized.pdb -o solven-accessible-surface.xvg -oa atomic-sas.xvg -or residue-sas.xvg """ Explanation: Ejecutando el análisis de SASA con Gromacs4 End of explanation """ #Inicializando vector sasa_residuo=[] try: residue_sas = open( 'residue-sas.xvg' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in residue_sas.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (not '#' in cadena) and (not '@' in cadena): print ('Residue =>',cadena) sasa_residuo.append(sl[0]+'\t'+sl[1]+'\n') i=i+1 #Escribiendo el archivo RMSD f = open('sasa-residuo.dat', 'w') #f.write('@ title "Area per residue over the trajectory" \n') f.write('@ xaxis label " Residue " \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label "Area (nm' +"\\"+'S2'+"\\N"+')"\n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 2.5\n') f.write('@ s0 symbol 1\n') f.write('@ s0 symbol size 1.000000\n') f.write('@ s0 symbol color 1\n') f.write('@ s0 symbol pattern 1\n') f.write('@ s0 symbol fill color 2\n') f.write('@ s0 symbol fill pattern 1\n') f.write('@ s0 symbol linewidth 1.0\n') f.write('@TYPE xy \n') f.write("".join(sasa_residuo)) f.close() !xmgrace sasa-residuo.dat #Cargando la imagen generada en xmgrace Image(filename='sasa-residuo.png') """ Explanation: Creando el archivo sasa_residuo.dat para salida con XMGRACE End of explanation """ data_sasa_residue=np.loadtxt('residue-sas.xvg',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f')) pl.plot(data_sasa_residue[:,0], data_sasa_residue[:,1], '-o', color='black', markersize=25, markerfacecolor='red',markeredgecolor='black',markeredgewidth=3, linewidth = 4, ) pl.xlabel("Residue", fontsize = 30) #pl.ylabel('Area (nm2)', fontsize = 30) pl.ylabel('Area ( nm'+ r'$\ ^2$'+')' , fontsize = 40) #pl.title('Area per residue over the trajectory', fontsize=40) pl.xticks(fontsize=30) pl.yticks(fontsize=30) pl.xlim(0, len(data_sasa_residue[:,1])) """ Explanation: Cargando archivo residue-sas.xvg para su visualización en Matplotlib Se genera el gráfico de salida para matplotlib End of explanation """ #Inicializando vector sasa=[] try: sasafile = open( 'solven-accessible-surface.xvg' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in sasafile.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (not '#' in cadena) and (not '@' in cadena): #print (cadena) num=float(sl[0]) num=num/1000 sasa.append(repr(num)+'\t'+sl[1]+'\t'+sl[2]+'\t'+sl[3]+'\n') i=i+1 cel2=float(sl[2]) print(cel2) #Escribiendo el archivo RMSD f = open('sasa.dat', 'w') #f.write('@ title "Solven Accessible Surface" \n') f.write('@ xaxis label " Time (ns) " \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 3.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label "Area (nm' +"\\"+'S2'+"\\N"+')"\n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 3.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') #f.write('@ s0 legend "Hydrophobic"\n') #if (cel2>0): #f.write('@ s1 legend "Hydrophilic"\n') f.write('@TYPE xy \n') f.write("".join(sasa)) f.close() !xmgrace sasa.dat #Cargando la imagen generada en xmgrace Image(filename='sasa.png') """ Explanation: Creando el archivo sasa.dat para salida con XMGRACE End of explanation """ data_sasa=np.loadtxt('solven-accessible-surface.xvg',comments=['#', '@']) #Engrosar marco fig=pl.figure(figsize=(20, 12), dpi=100, linewidth=3.0) ax = fig.add_subplot(111) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes #ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f')) pl.xlabel("Time (ns)", fontsize = 40) pl.ylabel('Area ( nm'+ r'$\ ^2$'+')' , fontsize = 40) #pl.title('Solvent Accessible Surface', fontsize=50) pl.xticks(fontsize=30) pl.yticks(fontsize=30) dato=data_sasa[:,2] dato2=dato[0] if (dato2>0): pl.plot(data_sasa[:,0]/1000, data_sasa[:,1], linewidth = 2, color='black') pl.plot(data_sasa[:,0]/1000, data_sasa[:,2], linewidth = 2, color='red') else: pl.plot(data_sasa[:,0]/1000, data_sasa[:,1], linewidth = 2, color='black') """ Explanation: Cargando archivo solven-accessible-surface.xvg para graficar con Matplotlib End of explanation """ ### Creando el directorio para el análisis del SASA ### NOTA: se calcula con gromacs4 ya que arroja bien los resultados comparado con gromacs5 ruta_m_rmsd = nuevaruta+'/matriz' print ( ruta_m_rmsd ) if not os.path.exists(ruta_m_rmsd): os.makedirs(ruta_m_rmsd) print ('Se ha creado la ruta ===>',ruta_m_rmsd) else: print ("La ruta "+ruta_m_rmsd+" existe..!!!") print ( 'Nos vamos a ....', ruta_m_rmsd ) os.chdir( ruta_m_rmsd ) print ('\nCopiando el archivo rmsd_matrix.tcl a '+ruta_m_rmsd) source_file=ruta_scripts+'/rmsd_matriz/rmsd_matrix.tcl' dest_file=ruta_m_rmsd+'/rmsd_matrix.tcl' shutil.copy(source_file,dest_file) #print ( 'Nos vemos a ....', ruta_old_traj ) #os.chdir( ruta_old_traj ) file_dcd=ruta_old_traj+'/'+dcd_file file_psf=ruta_old_traj+'/'+psf_file print (file_dcd) print ('\nEjecutando CATDCD para obtener 100 frames de la trayectoria original....') output_catdcd=!catdcd -o 100.dcd -stride 50 $file_dcd print (output_catdcd.n) """ Explanation: MATRIZ DE RMSD End of explanation """ #Arrancando VMD para cargar el script rmsd_matrix.tcl !vmd 100.dcd $file_psf ruta_matriz=os.getcwd() if os.path.isfile('salida.dat'): print ('El archivo salida.dat existe') else: print ('El archivo salida.dat no existe.. ejecutar desde MATRIZ DE RMSD...') """ Explanation: Cargar el scrit rmsd_matrix con vmd en la nueva trayectoria Arrancar VMD, dirigirse al manú Extensions -> Tk Console, copiar y ejecutar la siguiente secuencia de comandos: tcl source rmsd_matrix.tcl rmsd_matrix -mol top -seltext "name CA" -frames all -o salida.dat exit End of explanation """ #Creando el gráfico data_matriz=np.loadtxt('salida.dat',comments=['#', '@']) print(data_matriz.shape) pl.figure(figsize=(20, 12), dpi=100) imgplot = pl.imshow(data_matriz, origin='lower', cmap=pl.cm.Greens, interpolation='nearest') #imgplot = pl.imshow(data_matriz, origin='lower', cmap=pl.cm.coolwarm, interpolation='nearest') pl.xlabel("Time (ns)", fontsize = 30) pl.ylabel('Time (ns)', fontsize = 30) #pl.suptitle('RMSD', fontsize=50) #pl.title('C-Alpha RMSD matrix', fontsize=40) pl.xticks(fontsize=20) pl.yticks(fontsize=20) pl.xlim(0, 100) pl.ylim(0, 100) pl.colorbar() """ Explanation: Graficando el archivo de salida End of explanation """ ### Creando el directorio para el análisis del RMSF #Verificando que exista la nueva carpeta para la conversión de trayectorias ruta_matriz_dm = nuevaruta+'/matriz_dm' print ( ruta_matriz_dm ) if not os.path.exists(ruta_matriz_dm): os.makedirs(ruta_matriz_dm) print ('Se ha creado la ruta ===>',ruta_matriz_dm) else: print ("La ruta "+ruta_matriz_dm+" existe..!!!") print ( 'Nos vamos a ....', ruta_matriz_dm ) os.chdir( ruta_matriz_dm ) """ Explanation: Matriz de distancia mínima End of explanation """ !echo 4 | g_mdmat -f ../output.xtc -s ../ionized.pdb -mean average -frames frames -dt 10000 """ Explanation: Calculando la matriz de distancia mínima Seleccionar el backbone (opción 4) End of explanation """ !xpm2ps -f frames.xpm -o frames.eps !xpm2ps -f average.xpm -o average.eps print('\nConvirtiendo a png...') !convert -density 600 frames.eps -resize 1024x1024 frames.png !convert -density 600 average.eps -resize 1024x1024 average.png print ('Cargando el archivo average...') Image(filename='average.png', width=800) """ Explanation: Generando los archivos para visualizarlos End of explanation """ ### Creando el directorio para el análisis de la libre energía ruta_f_energy = nuevaruta+'/free_energy' print ( ruta_f_energy ) if not os.path.exists(ruta_f_energy): os.makedirs(ruta_f_energy) print ('Se ha creado la ruta ===>',ruta_f_energy) else: print ("La ruta "+ruta_f_energy+" existe..!!!") print ( 'Nos vamos a ....', ruta_f_energy ) os.chdir( ruta_f_energy ) #Solicita la temperatura t=input('Temperatura a la cual se realizó la simulación:') temperatura=int(t) print ('Temperatura=>',temperatura) """ Explanation: Free Energy Para el cálculo de la energía libre se requiere el valor mínimo y máximo del RMSD y del radio de gyro, así como el valor de la temperatura a la cual se realizó la simulación. Estos datos son de entrada para el script del cálculo del mismo. End of explanation """ print ('Ejecutando el análisis de rmsd...') !echo 3 3 | g_rms -f ../output.xtc -s ../ionized.pdb -a avgrp.xvg print ('Ejecutando el análisis de rgyro...') !echo 3 | g_gyrate -f ../output.xtc -s ../ionized.pdb -o gyrate.xvg """ Explanation: Calculando el rmsd y el r-gyro para obtener el mínimo y máximo de cada uno de ellos. End of explanation """ print ('\nCopiando el archivo generateFES.py a '+ruta_f_energy) source_file=ruta_scripts+'/free_energy/generateFES.py' dest_file=ruta_f_energy+'/generateFES.py' shutil.copy(source_file,dest_file) #Cambiando permisos de ejecución !chmod +x generateFES.py """ Explanation: Escribiendo script a /tmp para utilizar en el cálculo End of explanation """ #Cargando valores del RMSD data_rmsd=np.loadtxt('rmsd.xvg',comments=['#', '@']) #Cargnaod valores del R-GYRO data_rgyro=np.loadtxt('gyrate.xvg',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del rmsd min_rmsd=np.amin(data_rmsd[:,1]) max_rmsd=np.amax(data_rmsd[:,1]) print ('Minimo RMSD=>',min_rmsd) print ('Máximo RMSD=>',max_rmsd) #Obteniendo los valores máximo y mínimo del r-gyro min_rgyro=np.amin(data_rgyro[:,1]) max_rgyro=np.amax(data_rgyro[:,1]) print ('Minimo RGYRO=>',min_rgyro) print ('Máximo RGYRO=>',max_rgyro) #Creando los archivos de entrada para el script np.savetxt('rmsd.dat',data_rmsd[:,1], fmt='%1.7f') np.savetxt('rgyro.dat',data_rgyro[:,1], fmt='%1.7f') !paste rgyro.dat rmsd.dat > fes.dat #Ejecutando el script de FES !python generateFES.py fes.dat $min_rgyro $max_rgyro $min_rmsd $max_rmsd 200 200 $temperatura FEES.dat #Cargando el archivo generado para plotear con matplotlib data_fes=np.loadtxt('FEES.dat',comments=['#', '@']) """ Explanation: Realizando los cálculos para la Free Energy End of explanation """ # This loads the magics for gnuplot %load_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "free_energy.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "Rg (nm) set ylabel "RMSD (nm)" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "FEES.dat" with pm3d """ Explanation: Ploteando con GNUplot End of explanation """ ### Creando el directorio para el análisis del PCA ruta_pca = nuevaruta+'/pca' print ( ruta_pca ) if not os.path.exists(ruta_pca): os.makedirs(ruta_pca) print ('Se ha creado la ruta ===>',ruta_pca) else: print ("La ruta "+ruta_pca+" existe..!!!") print ( 'Nos vamos a ....', ruta_pca ) os.chdir( ruta_pca ) #Calculando matriz de covarianza !echo 1 1 | g_covar -s ../ionized.pdb -f ../output.xtc -o eigenvalues.xvg -v eigenvectors.trr -xpma covar.xpm """ Explanation: PCA End of explanation """ !echo 1 1 | g_anaeig -s ../ionized.pdb -f ../output.xtc -v eigenvectors.trr -eig eigenvalues.xvg -first 1 -last 2 -2d 2dproj_1_2.xvg #pcaX, pcaY=np.loadtxt('2dproj_1_2.xvg',comments=['#', '@'], unpack=True) data_pca=np.loadtxt('2dproj_1_2.xvg',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del pca min_pcaX=np.amin(data_pca[:,0]) max_pcaX=np.amax(data_pca[:,0]) print ('Minimo PCA_X=>',min_pcaX) print ('Máximo PCA_X=>',max_pcaX) min_pcaY=np.amin(data_pca[:,1]) max_pcaY=np.amax(data_pca[:,1]) print ('Minimo PCA_Y=>',min_pcaY) print ('Máximo PCA_Y=>',max_pcaY) #Creando los archivos de entrada para el script np.savetxt('PCA.dat',data_pca, fmt='%1.5f') #Copiando el script generateFES de la carpeta Free_energy print ('\nCopiando el archivo generateFES.py a '+ruta_pca+ ' desde '+ ruta_f_energy) source_file=ruta_f_energy+'/generateFES.py' dest_file=ruta_pca+'/generateFES.py' shutil.copy(source_file,dest_file) #Ejecutando el script de FES !python generateFES.py PCA.dat $min_pcaX $max_pcaX $min_pcaY $max_pcaY 200 200 $temperatura FEES_PCA.dat """ Explanation: Una vez calculada la matriz el eigenvalues y eigenvectors sirven de entrada para generar el pca. El siguiente comando representa el movimiento del primer y segundo eigenvector. End of explanation """ #Volver a cargar el kernel de gnuplot para limpiar su buffer %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "pca.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "projection on eigenvector 1 (nm)" set ylabel "projection on eigenvector 2 (nm)" set title " " ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "FEES_PCA.dat" with pm3d """ Explanation: Ploteando el archivo con gnuplot End of explanation """ from htmd import * """ Explanation: Análisis de puentes di sulfuro Este aplica para 2 puente, para lo cual se utiliza el software HTMD. End of explanation """ ### Creando el directorio para el análisis de los RMSD de los puentes ruta_rmsd_diedros = nuevaruta+'/rmsd_diedros' print ( ruta_rmsd_diedros ) if not os.path.exists(ruta_rmsd_diedros): os.makedirs(ruta_rmsd_diedros) print ('Se ha creado la ruta ===>',ruta_rmsd_diedros) else: print ("La ruta "+ruta_rmsd_diedros+" existe..!!!") print ( 'Nos vamos a ....', ruta_rmsd_diedros) os.chdir( ruta_rmsd_diedros ) """ Explanation: Creando la ruta Ruta para el análisis de los datos. End of explanation """ # Cargando la molécula mol = Molecule('../ionized.pdb') # Solicitando los datos de entrada px1l=input('Índice del DB1 izquierdo:') px1r=input('Índice del DB1 derecho:') px2l=input('Índice del DB2 izquierdo:') px2r=input('Índice del DB2 derecho:') revisa1=1 revisa2=1 """ Explanation: Cargando de los puentes di sulfuro Para este análisis se deberá revisar el archivo psf_charmm.tcl de la carpeta 01_BUILD, en el cual se tiene la definición de los puentes como la siguiente: patch DISU A:4 A:22 patch DISU A:8 A:18 El número del puente se determinará de acuerdo al orden en que se encuentran definidos en este archivo, por ejemplo, la nota anterior: DB1 4-22 DB2 8-18 La entrada de datos será por los índices del lado izquierdo y derecho respectivamente, con los cuales se creará la estructura completa de cada uno de ellos tomando los valores de los indices para su respectivo análisis. End of explanation """ if (revisa1>0): #Obteniendo lado izquierdo del DB1 x1l_name=mol.get('name','resname CYS and noh and resid '+px1l) x1l_index=mol.get('index','resname CYS and noh and resid '+px1l) x1l_resid=mol.get('resid','resname CYS and noh and resid '+px1l) #Obteniendo lado derecho del DB1 x1r_name=mol.get('name','resname CYS and noh and resid '+px1r) x1r_index=mol.get('index','resname CYS and noh and resid '+px1r) x1r_resid=mol.get('resid','resname CYS and noh and resid '+px1r) if (revisa2>0): #Obteniendo el lado izquierdo del DB2 x2l_name=mol.get('name','resname CYS and noh and resid '+px2l) x2l_index=mol.get('index','resname CYS and noh and resid '+px2l) x2l_resid=mol.get('resid','resname CYS and noh and resid '+px2l) #Obteniendo el lado derecho del DB2 x2r_name=mol.get('name','resname CYS and noh and resid '+px2r) x2r_index=mol.get('index','resname CYS and noh and resid '+px2r) x2r_resid=mol.get('resid','resname CYS and noh and resid '+px2r) #Obteniendo la lista de índices de los puentes print ('Generando la lista de los índices para enviarlos') db1x1l=[] db1x2l=[] db1x3m=[] db1x2r=[] db1x1r=[] db1l_name_l=[] db1l_index_l=[] db1r_name_l=[] db1r_index_l=[] db2l_name_l=[] db2l_index_l=[] db2r_name_l=[] db2r_index_l=[] db3l_name_l=[] db3l_index_l=[] db3r_name_l=[] db3r_index_l=[] if (revisa1>0): #Obteniendo los índices del DB1 for i in range(len(x1l_name)): if (x1l_name[i]=='N' or x1l_name[i]=='CA' or x1l_name[i]=='CB' or x1l_name[i]=='SG'): db1l_name_l.append(str(x1l_name[i])) db1l_index_l.append(str(x1l_index[i])) for i in range(len(x1r_name)): if (x1r_name[i]=='N' or x1r_name[i]=='CA' or x1r_name[i]=='CB' or x1r_name[i]=='SG'): db1r_name_l.append(str(x1r_name[i])) db1r_index_l.append(str(x1r_index[i])) print ('DB1 X1L =>',db1l_name_l) print (db1l_index_l) print ('DB1 X1R =>',db1r_name_l) print (db1r_index_l) if (revisa2>0): #Obteniendo los índices del DB2 for i in range(len(x2l_name)): if (x2l_name[i]=='N' or x2l_name[i]=='CA' or x2l_name[i]=='CB' or x2l_name[i]=='SG'): db2l_name_l.append(str(x2l_name[i])) db2l_index_l.append(str(x2l_index[i])) for i in range(len(x2r_name)): if (x2r_name[i]=='N' or x2r_name[i]=='CA' or x2r_name[i]=='CB' or x2r_name[i]=='SG'): db2r_name_l.append(str(x2r_name[i])) db2r_index_l.append(str(x2r_index[i])) print ('DB2 X1L =>',db2l_name_l) print (db2l_index_l) print ('DB2 X1R =>',db2r_name_l) print (db2r_index_l) """ Explanation: Obteniendo los índices de los puentes End of explanation """ #Generando el DB1 completo ordenado filas=8 col=2 DB1_i=[] DB1_N=[] DB2_i=[] DB2_N=[] DB3_i=[] DB3_N=[] for i in range(0,filas): DB1_N.append([' ']) DB1_i.append(['0']) DB2_N.append([' ']) DB2_i.append(['0']) DB3_N.append([' ']) DB3_i.append(['0']) if (revisa1>0): #Cargando índices para el puente 1 for i in range(len(db1l_name_l)): if db1l_name_l[i]=='N': DB1_N[0] = db1l_name_l[i] DB1_i[0]='index '+db1l_index_l[i] if db1l_name_l[i]=='CA': DB1_N[1] = db1l_name_l[i] DB1_i[1]='index '+db1l_index_l[i] if db1l_name_l[i]=='CB': DB1_N[2] = db1l_name_l[i] DB1_i[2]='index '+db1l_index_l[i] if db1l_name_l[i]=='SG': DB1_N[3] = db1l_name_l[i] DB1_i[3]='index '+db1l_index_l[i] for i in range(len(db1r_name_l)): if db1r_name_l[i]=='SG': DB1_N[4] = db1r_name_l[i] DB1_i[4]='index '+db1r_index_l[i] if db1r_name_l[i]=='CB': DB1_N[5] = db1r_name_l[i] DB1_i[5]='index '+db1r_index_l[i] if db1r_name_l[i]=='CA': DB1_N[6] = db1r_name_l[i] DB1_i[6]='index '+db1r_index_l[i] if db1r_name_l[i]=='N': DB1_N[7] = db1r_name_l[i] DB1_i[7]='index '+db1r_index_l[i] print ('Puente DB1 = resid '+px1l+':'+px1r) print ('Names DB1=>',DB1_i) print ('Index DB1=>',DB1_N) print ('\n') if (revisa2>0): #Cargando índices para el puente 2 for i in range(len(db2l_name_l)): if db2l_name_l[i]=='N': DB2_N[0] = db2l_name_l[i] DB2_i[0]='index '+db2l_index_l[i] if db2l_name_l[i]=='CA': DB2_N[1] = db2l_name_l[i] DB2_i[1]='index '+db2l_index_l[i] if db2l_name_l[i]=='CB': DB2_N[2] = db2l_name_l[i] DB2_i[2]='index '+db2l_index_l[i] if db2l_name_l[i]=='SG': DB2_N[3] = db2l_name_l[i] DB2_i[3]='index '+db2l_index_l[i] for i in range(len(db2r_name_l)): if db2r_name_l[i]=='SG': DB2_N[4] = db2r_name_l[i] DB2_i[4]='index '+db2r_index_l[i] if db2r_name_l[i]=='CB': DB2_N[5] = db2r_name_l[i] DB2_i[5]='index '+db2r_index_l[i] if db2r_name_l[i]=='CA': DB2_N[6] = db2r_name_l[i] DB2_i[6]='index '+db2r_index_l[i] if db2r_name_l[i]=='N': DB2_N[7] = db2r_name_l[i] DB2_i[7]='index '+db2r_index_l[i] print ('Puente DB2 = resid '+px2l+':'+px2r) print ('Names DB2=>',DB2_i) print ('Index DB2=>',DB2_N) print ('\n') """ Explanation: Ordenando los puentes de la forma ['N', 'CA', 'CB', 'SG', 'SG', 'CB', 'CA', 'N'] End of explanation """ if (revisa1>0): #Creando script para DB1_x1l psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f1 = open('DB1_x1l.tcl', 'w') print(f1) f1.write('set psfFile '+ psf+' \n') f1.write('set dcdFile '+ dcd+' \n') f1.write('\nmol load psf $psfFile dcd $dcdFile\n') f1.write('set outfile ' +'[open ' +'db1_x1l.dat'+' w]\n') f1.write('set nf [molinfo top get numframes]\n') f1.write('\n#RMSD calculation loop\n') f1.write('set f1 [atomselect top "'+DB1_i[0]+' or '+DB1_i[1]+' or '+DB1_i[2]+' or '+DB1_i[3]+' " frame 0]\n') f1.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f1.write(' set sel [atomselect top "'+DB1_i[0]+' or '+DB1_i[1]+' or '+DB1_i[2]+' or '+DB1_i[3]+' " frame $i]\n') f1.write(' $sel move [measure fit $sel $f1]\n') f1.write(' set time [expr {$i +1}]\n') f1.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f1.write(' puts $outfile " $time"\n') f1.write('}\n') f1.write('close $outfile') f1.close() #Creando script para DB1_x2l psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f2 = open('DB1_x2l.tcl', 'w') print(f2) f2.write('set psfFile '+ psf+' \n') f2.write('set dcdFile '+ dcd+' \n') f2.write('\nmol load psf $psfFile dcd $dcdFile\n') f2.write('set outfile ' +'[open ' +'db1_x2l.dat'+' w]\n') f2.write('set nf [molinfo top get numframes]\n') f2.write('\n#RMSD calculation loop\n') f2.write('set f1 [atomselect top "'+DB1_i[1]+' or '+DB1_i[2]+' or '+DB1_i[3]+' or '+DB1_i[4]+' " frame 0]\n') f2.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f2.write(' set sel [atomselect top "'+DB1_i[1]+' or '+DB1_i[2]+' or '+DB1_i[3]+' or '+DB1_i[4]+' " frame $i]\n') f2.write(' $sel move [measure fit $sel $f1]\n') f2.write(' set time [expr {$i +1}]\n') f2.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f2.write(' puts $outfile " $time"\n') f2.write('}\n') f2.write('close $outfile') f2.close() #Creando script para DB1_x3m psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f3 = open('DB1_x3m.tcl', 'w') print(f3) f3.write('set psfFile '+ psf+' \n') f3.write('set dcdFile '+ dcd+' \n') f3.write('\nmol load psf $psfFile dcd $dcdFile\n') f3.write('set outfile ' +'[open ' +'db1_x3m.dat'+' w]\n') f3.write('set nf [molinfo top get numframes]\n') f3.write('\n#RMSD calculation loop\n') f3.write('set f1 [atomselect top "'+DB1_i[2]+' or '+DB1_i[3]+' or '+DB1_i[4]+' or '+DB1_i[5]+' " frame 0]\n') f3.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f3.write(' set sel [atomselect top "'+DB1_i[2]+' or '+DB1_i[3]+' or '+DB1_i[4]+' or '+DB1_i[5]+' " frame $i]\n') f3.write(' $sel move [measure fit $sel $f1]\n') f3.write(' set time [expr {$i +1}]\n') f3.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f3.write(' puts $outfile " $time"\n') f3.write('}\n') f3.write('close $outfile') f3.close() #Creando script para DB1_x2r psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f4 = open('DB1_x2r.tcl', 'w') print(f4) f4.write('set psfFile '+ psf+' \n') f4.write('set dcdFile '+ dcd+' \n') f4.write('\nmol load psf $psfFile dcd $dcdFile\n') f4.write('set outfile ' +'[open ' +'db1_x2r.dat'+' w]\n') f4.write('set nf [molinfo top get numframes]\n') f4.write('\n#RMSD calculation loop\n') f4.write('set f1 [atomselect top "'+DB1_i[3]+' or '+DB1_i[4]+' or '+DB1_i[5]+' or '+DB1_i[6]+' " frame 0]\n') f4.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f4.write(' set sel [atomselect top "'+DB1_i[3]+' or '+DB1_i[4]+' or '+DB1_i[5]+' or '+DB1_i[6]+' " frame $i]\n') f4.write(' $sel move [measure fit $sel $f1]\n') f4.write(' set time [expr {$i +1}]\n') f4.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f4.write(' puts $outfile " $time"\n') f4.write('}\n') f4.write('close $outfile') f4.close() #Creando script para DB1_x1r psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f5 = open('DB1_x1r.tcl', 'w') print(f5) f5.write('set psfFile '+ psf+' \n') f5.write('set dcdFile '+ dcd+' \n') f5.write('\nmol load psf $psfFile dcd $dcdFile\n') f5.write('set outfile ' +'[open ' +'db1_x1r.dat'+' w]\n') f5.write('set nf [molinfo top get numframes]\n') f5.write('\n#RMSD calculation loop\n') f5.write('set f1 [atomselect top "'+DB1_i[4]+' or '+DB1_i[5]+' or '+DB1_i[6]+' or '+DB1_i[7]+' " frame 0]\n') f5.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f5.write(' set sel [atomselect top "'+DB1_i[4]+' or '+DB1_i[5]+' or '+DB1_i[6]+' or '+DB1_i[7]+' " frame $i]\n') f5.write(' $sel move [measure fit $sel $f1]\n') f5.write(' set time [expr {$i +1}]\n') f5.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f5.write(' puts $outfile " $time"\n') f5.write('}\n') f5.write('close $outfile') f5.close() if (revisa2>0): ########################################################################################## ## Creando los archivos para DB2 ####################################################################################### #Creando script para DB2_x1l psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f6 = open('DB2_x1l.tcl', 'w') print(f6) f6.write('set psfFile '+ psf+' \n') f6.write('set dcdFile '+ dcd+' \n') f6.write('\nmol load psf $psfFile dcd $dcdFile\n') f6.write('set outfile ' +'[open ' +'db2_x1l.dat'+' w]\n') f6.write('set nf [molinfo top get numframes]\n') f6.write('\n#RMSD calculation loop\n') f6.write('set f1 [atomselect top "'+DB2_i[0]+' or '+DB2_i[1]+' or '+DB2_i[2]+' or '+DB2_i[3]+' " frame 0]\n') f6.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f6.write(' set sel [atomselect top "'+DB2_i[0]+' or '+DB2_i[1]+' or '+DB2_i[2]+' or '+DB2_i[3]+' " frame $i]\n') f6.write(' $sel move [measure fit $sel $f1]\n') f6.write(' set time [expr {$i +1}]\n') f6.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f6.write(' puts $outfile " $time"\n') f6.write('}\n') f6.write('close $outfile') f6.close() #Creando script para DB1_x2l psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f7 = open('DB2_x2l.tcl', 'w') print(f7) f7.write('set psfFile '+ psf+' \n') f7.write('set dcdFile '+ dcd+' \n') f7.write('\nmol load psf $psfFile dcd $dcdFile\n') f7.write('set outfile ' +'[open ' +'db2_x2l.dat'+' w]\n') f7.write('set nf [molinfo top get numframes]\n') f7.write('\n#RMSD calculation loop\n') f7.write('set f1 [atomselect top "'+DB2_i[1]+' or '+DB2_i[2]+' or '+DB2_i[3]+' or '+DB2_i[4]+' " frame 0]\n') f7.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f7.write(' set sel [atomselect top "'+DB2_i[1]+' or '+DB2_i[2]+' or '+DB2_i[3]+' or '+DB2_i[4]+' " frame $i]\n') f7.write(' $sel move [measure fit $sel $f1]\n') f7.write(' set time [expr {$i +1}]\n') f7.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f7.write(' puts $outfile " $time"\n') f7.write('}\n') f7.write('close $outfile') f7.close() #Creando script para DB1_x3m psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f8 = open('DB2_x3m.tcl', 'w') print(f8) f8.write('set psfFile '+ psf+' \n') f8.write('set dcdFile '+ dcd+' \n') f8.write('\nmol load psf $psfFile dcd $dcdFile\n') f8.write('set outfile ' +'[open ' +'db2_x3m.dat'+' w]\n') f8.write('set nf [molinfo top get numframes]\n') f8.write('\n#RMSD calculation loop\n') f8.write('set f1 [atomselect top "'+DB2_i[2]+' or '+DB2_i[3]+' or '+DB2_i[4]+' or '+DB2_i[5]+' " frame 0]\n') f8.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f8.write(' set sel [atomselect top "'+DB2_i[2]+' or '+DB2_i[3]+' or '+DB2_i[4]+' or '+DB2_i[5]+' " frame $i]\n') f8.write(' $sel move [measure fit $sel $f1]\n') f8.write(' set time [expr {$i +1}]\n') f8.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f8.write(' puts $outfile " $time"\n') f8.write('}\n') f8.write('close $outfile') f8.close() #Creando script para DB1_x2r psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f9 = open('DB2_x2r.tcl', 'w') print(f9) f9.write('set psfFile '+ psf+' \n') f9.write('set dcdFile '+ dcd+' \n') f9.write('\nmol load psf $psfFile dcd $dcdFile\n') f9.write('set outfile ' +'[open ' +'db2_x2r.dat'+' w]\n') f9.write('set nf [molinfo top get numframes]\n') f9.write('\n#RMSD calculation loop\n') f9.write('set f1 [atomselect top "'+DB2_i[3]+' or '+DB2_i[4]+' or '+DB2_i[5]+' or '+DB2_i[6]+' " frame 0]\n') f9.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f9.write(' set sel [atomselect top "'+DB2_i[3]+' or '+DB2_i[4]+' or '+DB2_i[5]+' or '+DB2_i[6]+' " frame $i]\n') f9.write(' $sel move [measure fit $sel $f1]\n') f9.write(' set time [expr {$i +1}]\n') f9.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f9.write(' puts $outfile " $time"\n') f9.write('}\n') f9.write('close $outfile') f9.close() #Creando script para DB1_x1r psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print(psf) f10 = open('DB2_x1r.tcl', 'w') print(f10) f10.write('set psfFile '+ psf+' \n') f10.write('set dcdFile '+ dcd+' \n') f10.write('\nmol load psf $psfFile dcd $dcdFile\n') f10.write('set outfile ' +'[open ' +'db2_x1r.dat'+' w]\n') f10.write('set nf [molinfo top get numframes]\n') f10.write('\n#RMSD calculation loop\n') f10.write('set f1 [atomselect top "'+DB2_i[4]+' or '+DB2_i[5]+' or '+DB2_i[6]+' or '+DB2_i[7]+' " frame 0]\n') f10.write('for {set i 0} {$i < $nf} {incr i 1} {\n') f10.write(' set sel [atomselect top "'+DB2_i[4]+' or '+DB2_i[5]+' or '+DB2_i[6]+' or '+DB2_i[7]+' " frame $i]\n') f10.write(' $sel move [measure fit $sel $f1]\n') f10.write(' set time [expr {$i +1}]\n') f10.write(' puts -nonewline $outfile "[measure rmsd $sel $f1]"\n') f10.write(' puts $outfile " $time"\n') f10.write('}\n') f10.write('close $outfile') f10.close() """ Explanation: Creando los archivos tcl para el cálculo del RMSD de los puentes Se crean los archivos de salida en formato tcl. End of explanation """ if (revisa1>0): #Calculando con VMD rmsd DB1 X1L !vmd -dispdev text < DB1_x1l.tcl #Calculando con VMD DB1 X2L !vmd -dispdev text < DB1_x2l.tcl #Calculando con VMD DB1 X3M !vmd -dispdev text < DB1_x3m.tcl #Calculando con VMD DB1 X2R !vmd -dispdev text < DB1_x2r.tcl #Calculando con VMD DB1 X1R !vmd -dispdev text < DB1_x1r.tcl if (revisa2>0): #Calculando con VMD rmsd DB2 X1L !vmd -dispdev text < DB2_x1l.tcl #Calculando con VMD DB2 X2L !vmd -dispdev text < DB2_x2l.tcl #Calculando con VMD DB2 X3M !vmd -dispdev text < DB2_x3m.tcl #Calculando con VMD DB2 X2R !vmd -dispdev text < DB2_x2r.tcl #Calculando con VMD DB2 X1R !vmd -dispdev text < DB2_x1r.tcl """ Explanation: Ejecutando los archivos rmsd en tcl con vmd Ejecutando los archivos en VMD End of explanation """ escale_y=[] fig = pl.figure(figsize=(25,8)) fig.subplots_adjust(hspace=.4, wspace=0.3) #Formateando los valores de los ejes #Engrosando marcos ax = fig.add_subplot(2,5,1) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,2) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,3) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,4) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,5) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,6) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,7) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,8) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,9) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax = fig.add_subplot(2,5,10) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) if (revisa1>0): #Datos de DB1 data_db1_x1l=np.loadtxt('db1_x1l.dat',comments=['#', '@']) data_db1_x2l=np.loadtxt('db1_x2l.dat',comments=['#', '@']) data_db1_x3m=np.loadtxt('db1_x3m.dat',comments=['#', '@']) data_db1_x2r=np.loadtxt('db1_x2r.dat',comments=['#', '@']) data_db1_x1r=np.loadtxt('db1_x1r.dat',comments=['#', '@']) sub1 = fig.add_subplot(251) # instead of plt.subplot(2, 2, 1) #sub1.set_title('DB1_X1L') sub1.set_xlabel('Time (ns)') sub1.set_ylabel('RMSD (nm)') sub1.plot(data_db1_x1l[:,1]*0.02, data_db1_x1l[:,0]/10, color='black', linewidth = 1, label='DB1_X1L') x1,x2,y1,y2=sub1.axis() escale_y.append(y2) sub2 = fig.add_subplot(252) #sub2.set_title('DB1_X2L') sub2.set_xlabel('Time (ns)') sub2.set_ylabel('RMSD (nm)') sub2.plot(data_db1_x2l[:,1]*0.02, data_db1_x2l[:,0]/10, color='black', linewidth = 1, label='DB1_X2L') x1,x2,y1,y2=sub2.axis() escale_y.append(y2) sub3 = fig.add_subplot(253) #sub3.set_title('DB1_X3M') sub3.set_xlabel('Time (ns)') sub3.set_ylabel('RMSD (nm)') sub3.plot(data_db1_x3m[:,1]*0.02, data_db1_x3m[:,0]/10, color='black', linewidth = 1, label='DB1_X3M') x1,x2,y1,y2=sub3.axis() escale_y.append(y2) sub4 = fig.add_subplot(254) #sub4.set_title('DB1_X2R') sub4.set_xlabel('Time (ns)') sub4.set_ylabel('RMSD (nm)') sub4.plot(data_db1_x2r[:,1]*0.02, data_db1_x2r[:,0]/10, color='black', linewidth = 1, label='DB1_X2R') x1,x2,y1,y2=sub4.axis() escale_y.append(y2) sub5 = fig.add_subplot(255) #sub5.set_title('DB1_X1R') sub5.set_xlabel('Time (ns)') sub5.set_ylabel('RMSD (nm)') sub5.plot(data_db1_x1r[:,1]*0.02, data_db1_x1r[:,0]/10, color='black', linewidth = 1, label='DB1_X1R') x1,x2,y1,y2=sub5.axis() escale_y.append(y2) if (revisa2>0): #DAtos de DB2 data_db2_x1l=np.loadtxt('db2_x1l.dat',comments=['#', '@']) data_db2_x2l=np.loadtxt('db2_x2l.dat',comments=['#', '@']) data_db2_x3m=np.loadtxt('db2_x3m.dat',comments=['#', '@']) data_db2_x2r=np.loadtxt('db2_x2r.dat',comments=['#', '@']) data_db2_x1r=np.loadtxt('db2_x1r.dat',comments=['#', '@']) #Ploteando DB2 sub6 = fig.add_subplot(256) #sub6.set_title('DB2_X1L') sub6.set_xlabel('Time (ns)') sub6.set_ylabel('RMSD (nm)') sub6.plot(data_db2_x1l[:,1]*0.02, data_db2_x1l[:,0]/10, color='red', linewidth = 1, label='DB2_X1L') x1,x2,y1,y2=sub6.axis() escale_y.append(y2) sub7 = fig.add_subplot(257) #sub7.set_title('DB2_X2L') sub7.set_xlabel('Time (ns)') sub7.set_ylabel('RMSD (nm)') sub7.plot(data_db2_x2l[:,1]*0.02, data_db2_x2l[:,0]/10, color='red', linewidth = 1, label='DB2_X2L') x1,x2,y1,y2=sub7.axis() escale_y.append(y2) sub8 = fig.add_subplot(258) #sub8.set_title('DB2_X3M') sub8.set_xlabel('Time (ns)') sub8.set_ylabel('RMSD (nm)') sub8.plot(data_db2_x3m[:,1]*0.02, data_db2_x3m[:,0]/10, color='red', linewidth = 1, label='DB2_X3M') x1,x2,y1,y2=sub8.axis() escale_y.append(y2) sub9 = fig.add_subplot(259) #sub9.set_title('DB2_X2R') sub9.set_xlabel('Time (ns)') sub9.set_ylabel('RMSD (nm)') sub9.plot(data_db2_x2r[:,1]*0.02, data_db2_x2r[:,0]/10, color='red', linewidth = 1, label='DB2_X2R') x1,x2,y1,y2=sub9.axis() escale_y.append(y2) sub10 = fig.add_subplot(2,5,10) #sub10.set_title('DB2_X1R') sub10.set_xlabel('Time (ns)') sub10.set_ylabel('RMSD (nm)') sub10.plot(data_db2_x1r[:,1]*0.02, data_db2_x1r[:,0]/10, color='red', linewidth = 1, label='DB2_X1R') x1,x2,y1,y2=sub10.axis() escale_y.append(y2) #escale_y escale_y.sort(reverse=True) escale_y ##Cambiando los ejes de las y sub1.axis((x1,x2,y1,escale_y[0])) sub2.axis((x1,x2,y1,escale_y[0])) sub3.axis((x1,x2,y1,escale_y[0])) sub4.axis((x1,x2,y1,escale_y[0])) sub5.axis((x1,x2,y1,escale_y[0])) sub6.axis((x1,x2,y1,escale_y[0])) sub7.axis((x1,x2,y1,escale_y[0])) sub8.axis((x1,x2,y1,escale_y[0])) sub9.axis((x1,x2,y1,escale_y[0])) sub10.axis((x1,x2,y1,escale_y[0])) """ Explanation: Generando los gráficos RMSD en matplotlib End of explanation """ ### Creando el directorio para el análisis de las distancias de enlace de los puentes ruta_diedros = nuevaruta+'/diedros_intra' print ( ruta_diedros ) if not os.path.exists(ruta_diedros): os.makedirs(ruta_diedros) print ('Se ha creado la ruta ===>',ruta_diedros) else: print ("La ruta "+ruta_diedros+" existe..!!!") print ( 'Nos vamos a ....', ruta_diedros) os.chdir( ruta_diedros ) """ Explanation: FREE ENERGY DIHEDRAL INTRAMOLECULAR Se calculan las distancias de los ángulos diedros para el cálculo de la free energy intramolecular End of explanation """ psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file if (revisa1>0): #Creando script para DB1_x1l d1 = open('dihed_DB1_x1l.tcl', 'w') print(d1) d1.write('set psfFile '+ psf+' \n') d1.write('set dcdFile '+ dcd+' \n') d1.write('\nmol load psf $psfFile dcd $dcdFile\n') d1.write('set outfile ' +'[open ' +'dihed_db1_x1l.dat'+' w]\n') d1.write('set nf [molinfo top get numframes]\n') d1.write(' \n') d1.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[0]+'"] get index]\n') d1.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[1]+'"] get index]\n') d1.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[2]+'"] get index]\n') d1.write('set selatoms4 [[atomselect top "protein and chain A and '+DB1_i[3]+'"] get index]\n') d1.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d1.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d1.write(' set x [measure dihed $dihed frame $i]\n') d1.write(' set time [expr {$i +1}]\n') d1.write(' puts $outfile "$time $x"\n') d1.write('}\n') d1.close() #Creando script para DB1_x2l d2 = open('dihed_DB1_x2l.tcl', 'w') print(d2) d2.write('set psfFile '+ psf+' \n') d2.write('set dcdFile '+ dcd+' \n') d2.write('\nmol load psf $psfFile dcd $dcdFile\n') d2.write('set outfile ' +'[open ' +'dihed_db1_x2l.dat'+' w]\n') d2.write('set nf [molinfo top get numframes]\n') d2.write(' \n') d2.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[1]+'"] get index]\n') d2.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[2]+'"] get index]\n') d2.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[3]+'"] get index]\n') d2.write('set selatoms4 [[atomselect top "protein and chain A and '+DB1_i[4]+'"] get index]\n') d2.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d2.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d2.write(' set x [measure dihed $dihed frame $i]\n') d2.write(' set time [expr {$i +1}]\n') d2.write(' puts $outfile "$time $x"\n') d2.write('}\n') d2.close() #Creando script para DB1_x3m d3 = open('dihed_DB1_x3m.tcl', 'w') print(d3) d3.write('set psfFile '+ psf+' \n') d3.write('set dcdFile '+ dcd+' \n') d3.write('\nmol load psf $psfFile dcd $dcdFile\n') d3.write('set outfile ' +'[open ' +'dihed_db1_x3m.dat'+' w]\n') d3.write('set nf [molinfo top get numframes]\n') d3.write(' \n') d3.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[2]+'"] get index]\n') d3.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[3]+'"] get index]\n') d3.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[4]+'"] get index]\n') d3.write('set selatoms4 [[atomselect top "protein and chain A and '+DB1_i[5]+'"] get index]\n') d3.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d3.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d3.write(' set x [measure dihed $dihed frame $i]\n') d3.write(' set time [expr {$i +1}]\n') d3.write(' puts $outfile "$time $x"\n') d3.write('}\n') d3.close() #Creando script para DB1_x2r d4 = open('dihed_DB1_x2r.tcl', 'w') print(d4) d4.write('set psfFile '+ psf+' \n') d4.write('set dcdFile '+ dcd+' \n') d4.write('\nmol load psf $psfFile dcd $dcdFile\n') d4.write('set outfile ' +'[open ' +'dihed_db1_x2r.dat'+' w]\n') d4.write('set nf [molinfo top get numframes]\n') d4.write(' \n') d4.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[3]+'"] get index]\n') d4.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[4]+'"] get index]\n') d4.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[5]+'"] get index]\n') d4.write('set selatoms4 [[atomselect top "protein and chain A and '+DB1_i[6]+'"] get index]\n') d4.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d4.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d4.write(' set x [measure dihed $dihed frame $i]\n') d4.write(' set time [expr {$i +1}]\n') d4.write(' puts $outfile "$time $x"\n') d4.write('}\n') d4.close() #Creando script para DB1_x1r d5 = open('dihed_DB1_x1r.tcl', 'w') print(d5) d5.write('set psfFile '+ psf+' \n') d5.write('set dcdFile '+ dcd+' \n') d5.write('\nmol load psf $psfFile dcd $dcdFile\n') d5.write('set outfile ' +'[open ' +'dihed_db1_x1r.dat'+' w]\n') d5.write('set nf [molinfo top get numframes]\n') d5.write(' \n') d5.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[4]+'"] get index]\n') d5.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[5]+'"] get index]\n') d5.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[6]+'"] get index]\n') d5.write('set selatoms4 [[atomselect top "protein and chain A and '+DB1_i[7]+'"] get index]\n') d5.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d5.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d5.write(' set x [measure dihed $dihed frame $i]\n') d5.write(' set time [expr {$i +1}]\n') d5.write(' puts $outfile "$time $x"\n') d5.write('}\n') d5.close() if (revisa2>0): ##################################################################### ########## Puente 2 ##########################################3 #Creando script para DB2_x1l d6 = open('dihed_DB2_x1l.tcl', 'w') print(d6) d6.write('set psfFile '+ psf+' \n') d6.write('set dcdFile '+ dcd+' \n') d6.write('\nmol load psf $psfFile dcd $dcdFile\n') d6.write('set outfile ' +'[open ' +'dihed_db2_x1l.dat'+' w]\n') d6.write('set nf [molinfo top get numframes]\n') d6.write(' \n') d6.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[0]+'"] get index]\n') d6.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[1]+'"] get index]\n') d6.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[2]+'"] get index]\n') d6.write('set selatoms4 [[atomselect top "protein and chain A and '+DB2_i[3]+'"] get index]\n') d6.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d6.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d6.write(' set x [measure dihed $dihed frame $i]\n') d6.write(' set time [expr {$i +1}]\n') d6.write(' puts $outfile "$time $x"\n') d6.write('}\n') d6.close() #Creando script para DB2_x2l d7 = open('dihed_DB2_x2l.tcl', 'w') print(d7) d7.write('set psfFile '+ psf+' \n') d7.write('set dcdFile '+ dcd+' \n') d7.write('\nmol load psf $psfFile dcd $dcdFile\n') d7.write('set outfile ' +'[open ' +'dihed_db2_x2l.dat'+' w]\n') d7.write('set nf [molinfo top get numframes]\n') d7.write(' \n') d7.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[1]+'"] get index]\n') d7.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[2]+'"] get index]\n') d7.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[3]+'"] get index]\n') d7.write('set selatoms4 [[atomselect top "protein and chain A and '+DB2_i[4]+'"] get index]\n') d7.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d7.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d7.write(' set x [measure dihed $dihed frame $i]\n') d7.write(' set time [expr {$i +1}]\n') d7.write(' puts $outfile "$time $x"\n') d7.write('}\n') d7.close() #Creando script para DB2_x3m d8 = open('dihed_DB2_x3m.tcl', 'w') print(d8) d8.write('set psfFile '+ psf+' \n') d8.write('set dcdFile '+ dcd+' \n') d8.write('\nmol load psf $psfFile dcd $dcdFile\n') d8.write('set outfile ' +'[open ' +'dihed_db2_x3m.dat'+' w]\n') d8.write('set nf [molinfo top get numframes]\n') d8.write(' \n') d8.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[2]+'"] get index]\n') d8.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[3]+'"] get index]\n') d8.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[4]+'"] get index]\n') d8.write('set selatoms4 [[atomselect top "protein and chain A and '+DB2_i[5]+'"] get index]\n') d8.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d8.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d8.write(' set x [measure dihed $dihed frame $i]\n') d8.write(' set time [expr {$i +1}]\n') d8.write(' puts $outfile "$time $x"\n') d8.write('}\n') d8.close() #Creando script para DB2_x2r d9 = open('dihed_DB2_x2r.tcl', 'w') print(d9) d9.write('set psfFile '+ psf+' \n') d9.write('set dcdFile '+ dcd+' \n') d9.write('\nmol load psf $psfFile dcd $dcdFile\n') d9.write('set outfile ' +'[open ' +'dihed_db2_x2r.dat'+' w]\n') d9.write('set nf [molinfo top get numframes]\n') d9.write(' \n') d9.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[3]+'"] get index]\n') d9.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[4]+'"] get index]\n') d9.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[5]+'"] get index]\n') d9.write('set selatoms4 [[atomselect top "protein and chain A and '+DB2_i[6]+'"] get index]\n') d9.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d9.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d9.write(' set x [measure dihed $dihed frame $i]\n') d9.write(' set time [expr {$i +1}]\n') d9.write(' puts $outfile "$time $x"\n') d9.write('}\n') d9.close() #Creando script para DB2_x1r d10 = open('dihed_DB2_x1r.tcl', 'w') print(d10) d10.write('set psfFile '+ psf+' \n') d10.write('set dcdFile '+ dcd+' \n') d10.write('\nmol load psf $psfFile dcd $dcdFile\n') d10.write('set outfile ' +'[open ' +'dihed_db2_x1r.dat'+' w]\n') d10.write('set nf [molinfo top get numframes]\n') d10.write(' \n') d10.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[4]+'"] get index]\n') d10.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[5]+'"] get index]\n') d10.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[6]+'"] get index]\n') d10.write('set selatoms4 [[atomselect top "protein and chain A and '+DB2_i[7]+'"] get index]\n') d10.write('set dihed [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] [lindex $selatoms4] ]\n') d10.write('for {set i 0} {$i < $nf} {incr i 1} {\n') d10.write(' set x [measure dihed $dihed frame $i]\n') d10.write(' set time [expr {$i +1}]\n') d10.write(' puts $outfile "$time $x"\n') d10.write('}\n') d10.close() """ Explanation: Creación de los archivos tcl para el cálculo de los ángulos diedros End of explanation """ if (revisa1>0): #Calculando con VMD rmsd DB1 X1L !vmd -dispdev text < dihed_DB1_x1l.tcl #Calculando con VMD DB1 X2L !vmd -dispdev text < dihed_DB1_x2l.tcl #Calculando con VMD DB1 X3M !vmd -dispdev text < dihed_DB1_x3m.tcl #Calculando con VMD DB1 X2R !vmd -dispdev text < dihed_DB1_x2r.tcl #Calculando con VMD DB1 X1R !vmd -dispdev text < dihed_DB1_x1r.tcl if (revisa2>0): #Calculando con VMD rmsd DB2 X1L !vmd -dispdev text < dihed_DB2_x1l.tcl #Calculando con VMD DB2 X2L !vmd -dispdev text < dihed_DB2_x2l.tcl #Calculando con VMD DB2 X3M !vmd -dispdev text < dihed_DB2_x3m.tcl #Calculando con VMD DB2 X2R !vmd -dispdev text < dihed_DB2_x2r.tcl #Calculando con VMD DB2 X1R !vmd -dispdev text < dihed_DB2_x1r.tcl print ('\nCopiando el archivo generateFES.py a '+ruta_diedros) source_file=ruta_f_energy+'/generateFES.py' dest_file=ruta_diedros+'/generateFES.py' shutil.copy(source_file,dest_file) #Cambiando permisos de ejecución !chmod +x generateFES.py """ Explanation: Ejecutando los archivos de los ángulos diedros tcl generados con VMD End of explanation """ if (revisa1>0): #Cargando valores del DB1_X1L data_db1_x1l=np.loadtxt('dihed_db1_x1l.dat',comments=['#', '@']) #Cargando valores del DB1_X1R data_db1_x1r=np.loadtxt('dihed_db1_x1r.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB1_X1L min_x1l=np.amin(data_db1_x1l[:,1]) max_x1l=np.amax(data_db1_x1l[:,1]) print ('Minimo DB1_X1L=>',min_x1l) print ('Máximo DB1_X1L=>',max_x1l) #Obteniendo los valores máximo y mínimo del DB1_X1R min_x1r=np.amin(data_db1_x1r[:,1]) max_x1r=np.amax(data_db1_x1r[:,1]) print ('Minimo DB1_X1R=>',min_x1r) print ('Máximo DB1_X1R=>',max_x1r) #Creando los archivos de entrada para el script np.savetxt('db1_x1l.dat',data_db1_x1l[:,1], fmt='%1.14f') np.savetxt('db1_x1r.dat',data_db1_x1r[:,1], fmt='%1.14f') !paste db1_x1l.dat db1_x1r.dat > DB1_x1_lr.dat #Ejecutando el script de FES !python generateFES.py DB1_x1_lr.dat $min_x1l $max_x1l $min_x1r $max_x1r 200 200 $temperatura XL1_XR1.dat ################################################################### #Cargando valores del DB1_X2l data_db1_x2l=np.loadtxt('dihed_db1_x2l.dat',comments=['#', '@']) #Cargando valores del DB1_X1R data_db1_x2r=np.loadtxt('dihed_db1_x2r.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB1_X1L min_x2l=np.amin(data_db1_x2l[:,1]) max_x2l=np.amax(data_db1_x2l[:,1]) print ('Minimo DB1_X2L=>',min_x2l) print ('Máximo DB1_X2L=>',max_x2l) #Obteniendo los valores máximo y mínimo del DB1_X1R min_x2r=np.amin(data_db1_x2r[:,1]) max_x2r=np.amax(data_db1_x2r[:,1]) print ('Minimo DB1_X2R=>',min_x2r) print ('Máximo DB1_X2R=>',max_x2r) #Creando los archivos de entrada para el script np.savetxt('db1_x2l.dat',data_db1_x2l[:,1], fmt='%1.14f') np.savetxt('db1_x2r.dat',data_db1_x2r[:,1], fmt='%1.14f') !paste db1_x2l.dat db1_x2r.dat > DB1_x2_lr.dat #Ejecutando el script de FES !python generateFES.py DB1_x2_lr.dat $min_x2l $max_x2l $min_x2r $max_x2r 200 200 $temperatura XL2_XR2.dat ###################################################################################### #Generando los archivos para X3M data_db1_x3m=np.loadtxt('dihed_db1_x3m.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB1_X1L min_x3m=np.amin(data_db1_x3m[:,1]) max_x3m=np.amax(data_db1_x3m[:,1]) print ('Minimo DB1_X3M=>',min_x3m) print ('Máximo DB1_X3M=>',max_x3m) print ('Minimo DB1_X1L=>',min_x1l) print ('Máximo DB1_X1L=>',max_x1l) print ('Minimo DB1_X2L=>',min_x2l) print ('Máximo DB1_X2L=>',max_x2l) print ('Minimo DB1_X1R=>',min_x1r) print ('Máximo DB1_X1R=>',max_x1r) print ('Minimo DB1_X2R=>',min_x2r) print ('Máximo DB1_X2R=>',max_x2r) #Creando los archivos de entrada para el script np.savetxt('db1_x3m.dat',data_db1_x3m[:,1], fmt='%1.14f') !paste db1_x3m.dat db1_x1l.dat > DB1_x3m_x1l.dat !paste db1_x3m.dat db1_x2l.dat > DB1_x3m_x2l.dat !paste db1_x3m.dat db1_x1r.dat > DB1_x3m_x1r.dat !paste db1_x3m.dat db1_x2r.dat > DB1_x3m_x2r.dat #Ejecutando el script de FES !python generateFES.py DB1_x3m_x1l.dat $min_x3m $max_x3m $min_x1l $max_x1l 200 200 $temperatura XM3_XL1.dat !python generateFES.py DB1_x3m_x2l.dat $min_x3m $max_x3m $min_x2l $max_x2l 200 200 $temperatura XM3_XL2.dat !python generateFES.py DB1_x3m_x1r.dat $min_x3m $max_x3m $min_x1r $max_x1r 200 200 $temperatura XM3_XR1.dat !python generateFES.py DB1_x3m_x2r.dat $min_x3m $max_x3m $min_x2r $max_x2r 200 200 $temperatura XM3_XR2.dat """ Explanation: Calculando la Free Energy Intramolecular para el Puente 1 End of explanation """ # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_xl1_vs_xr1.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^L_1}" set ylabel "{/=30 X@^R_1}" set title "Free Energy Surface Intramolecular DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "XL1_XR1.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_xl2_vs_xr2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^L_2}" set ylabel "{/=30 X@^R_2}" set title "Free Energy Surface Intramolecular DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "XL2_XR2.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_xm3_vs_xl1.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^L_1}" set title "Free Energy Surface Intramolecular DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "XM3_XL1.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_xm3_vs_xl2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^L_2}" set title "Free Energy Surface Intramolecular DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "XM3_XL2.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_xm3_vs_xr2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^R_2}" set title "Free Energy Surface Intramolecular DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "XM3_XR2.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_xm3_vs_xr1.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^R_1}" set title "Free Energy Surface Intramolecular DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "XM3_XR1.dat" with pm3d """ Explanation: Ploteando con GNUPLOT el Puente 1 End of explanation """ if (revisa2>0): #Cargando valores del DB2_X1L data_db2_x1l=np.loadtxt('dihed_db2_x1l.dat',comments=['#', '@']) #Cargando valores del DB1_X1R data_db2_x1r=np.loadtxt('dihed_db2_x1r.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB2_X1L min_db2_x1l=np.amin(data_db2_x1l[:,1]) max_db2_x1l=np.amax(data_db2_x1l[:,1]) print ('Minimo DB2_X1L=>',min_db2_x1l) print ('Máximo DB2_X1L=>',max_db2_x1l) #Obteniendo los valores máximo y mínimo del DB2_X1R min_db2_x1r=np.amin(data_db2_x1r[:,1]) max_db2_x1r=np.amax(data_db2_x1r[:,1]) print ('Minimo DB2_X1R=>',min_db2_x1r) print ('Máximo DB2_X1R=>',max_db2_x1r) #Creando los archivos de entrada para el script np.savetxt('db2_x1l.dat',data_db2_x1l[:,1], fmt='%1.14f') np.savetxt('db2_x1r.dat',data_db2_x1r[:,1], fmt='%1.14f') !paste db2_x1l.dat db2_x1r.dat > DB2_x1_lr.dat #Ejecutando el script de FES !python generateFES.py DB2_x1_lr.dat $min_db2_x1l $max_db2_x1l $min_db2_x1r $max_db2_x1r 200 200 $temperatura DB2_XL1_XR1.dat ################################################################### #Cargando valores del DB2_X2l data_db2_x2l=np.loadtxt('dihed_db2_x2l.dat',comments=['#', '@']) #Cargando valores del DB2_X1R data_db2_x2r=np.loadtxt('dihed_db2_x2r.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB2_X1L min_db2_x2l=np.amin(data_db2_x2l[:,1]) max_db2_x2l=np.amax(data_db2_x2l[:,1]) print ('Minimo DB2_X2L=>',min_db2_x2l) print ('Máximo DB2_X2L=>',max_db2_x2l) #Obteniendo los valores máximo y mínimo del DB2_X1R min_db2_x2r=np.amin(data_db2_x2r[:,1]) max_db2_x2r=np.amax(data_db2_x2r[:,1]) print ('Minimo DB2_X2R=>',min_db2_x2r) print ('Máximo DB2_X2R=>',max_db2_x2r) #Creando los archivos de entrada para el script np.savetxt('db2_x2l.dat',data_db2_x2l[:,1], fmt='%1.14f') np.savetxt('db2_x2r.dat',data_db2_x2r[:,1], fmt='%1.14f') !paste db2_x2l.dat db2_x2r.dat > DB2_x2_lr.dat #Ejecutando el script de FES !python generateFES.py DB2_x2_lr.dat $min_db2_x2l $max_db2_x2l $min_db2_x2r $max_db2_x2r 200 200 $temperatura DB2_XL2_XR2.dat ###################################################################################### #Cargando valores del DB2_X3M data_db2_x3m=np.loadtxt('dihed_db2_x3m.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB2_X3M min_db2_x3m=np.amin(data_db2_x3m[:,1]) max_db2_x3m=np.amax(data_db2_x3m[:,1]) print ('Minimo DB2_X3M=>',min_db2_x3m) print ('Máximo DB2_X3M=>',max_db2_x3m) print ('Minimo DB2_X1R=>',min_db2_x1r) print ('Máximo DB2_X1R=>',max_db2_x1r) print ('Minimo DB2_X2R=>',min_db2_x2r) print ('Máximo DB2_X2R=>',max_db2_x2r) print ('Minimo DB2_X1L=>',min_db2_x1l) print ('Máximo DB2_X1L=>',max_db2_x1l) print ('Minimo DB2_X2L=>',min_db2_x2l) print ('Máximo DB2_X2L=>',max_db2_x2l) #Creando los archivos de entrada para el script np.savetxt('db2_x3m.dat',data_db2_x3m[:,1], fmt='%1.14f') !paste db2_x3m.dat db2_x1r.dat > DB2_x3m_x1r.dat !paste db2_x3m.dat db2_x2r.dat > DB2_x3m_x2r.dat !paste db2_x3m.dat db2_x1l.dat > DB2_x3m_x1l.dat !paste db2_x3m.dat db2_x2l.dat > DB2_x3m_x2l.dat #Ejecutando el script de FES !python generateFES.py DB2_x3m_x1r.dat $min_db2_x3m $max_db2_x3m $min_db2_x1r $max_db2_x1r 200 200 $temperatura DB2_XM3_XR1.dat !python generateFES.py DB2_x3m_x2r.dat $min_db2_x3m $max_db2_x3m $min_db2_x2r $max_db2_x2r 200 200 $temperatura DB2_XM3_XR2.dat !python generateFES.py DB2_x3m_x1l.dat $min_db2_x3m $max_db2_x3m $min_db2_x1l $max_db2_x1l 200 200 $temperatura DB2_XM3_XL1.dat !python generateFES.py DB2_x3m_x2l.dat $min_db2_x3m $max_db2_x3m $min_db2_x2l $max_db2_x2l 200 200 $temperatura DB2_XM3_XL2.dat """ Explanation: Calculando la Free Energy intramolecular para el Puente 2 End of explanation """ # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_xl1_vs_xr1.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^L_1}" set ylabel "{/=30 X@^R_1}" set title "Free Energy Surface Intramolecular DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB2_XL1_XR1.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_xl2_vs_xr2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^L_2}" set ylabel "{/=30 X@^R_2}" set title "Free Energy Surface Intramolecular DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB2_XL2_XR2.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_xm3_vs_xl1.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^L_1}" set title "Free Energy Surface Intramolecular DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB2_XM3_XL1.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_xm3_vs_xl2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^L_2}" set title "Free Energy Surface Intramolecular DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB2_XM3_XL2.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_xm3_vs_xr2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^R_2}" set title "Free Energy Surface Intramolecular DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB2_XM3_XR2.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_xm3_vs_xr1.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 X@^M_3}" set ylabel "{/=30 X@^R_1}" set title "Free Energy Surface Intramolecular DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB2_XM3_XR1.dat" with pm3d """ Explanation: Ploteando con GNUPLOT el Puente 2 End of explanation """ ############################################ #### Intermolecular DB1- DB2 - X1L ############################################ #Creando el DB1-DB2-X1L !paste db1_x1l.dat db2_x1l.dat > DB1_DB2_x1l.dat print('Minimo DB1-X1L=>',min_x1l) print('Máximo DB1-X1L=>',max_x1l) print('Minimo DB2-X1L=>',min_db2_x1l) print('Máximo DB2-X1L=>',max_db2_x1l) #Ejecutando el script de FES !python generateFES.py DB1_DB2_x1l.dat $min_x1l $max_x1l $min_db2_x1l $max_db2_x1l 200 200 $temperatura DB1_DB2_X1L.dat ######################################### #### Intermolecular DB1- DB2 - X2L ############################################ #Creando el DB1-DB2-X2L !paste db1_x2l.dat db2_x2l.dat > DB1_DB2_x2l.dat print('Minimo DB1-X2L=>',min_x2l) print('Máximo DB1-X2L=>',max_x2l) print('Minimo DB2-X2L=>',min_db2_x2l) print('Máximo DB2-X2L=>',max_db2_x2l) #Ejecutando el script de FES !python generateFES.py DB1_DB2_x2l.dat $min_x2l $max_x2l $min_db2_x2l $max_db2_x2l 200 200 $temperatura DB1_DB2_X2L.dat ############################################ #### Intermolecular DB1- DB2 - X3M ############################################ #Creando el DB1-DB2-X3M !paste db1_x3m.dat db2_x3m.dat > DB1_DB2_x3m.dat print('Minimo DB1-X3M=>',min_x3m) print('Máximo DB1-X3M=>',max_x3m) print('Minimo DB2-X3M=>',min_db2_x3m) print('Máximo DB2-X3M=>',max_db2_x3m) #Ejecutando el script de FES !python generateFES.py DB1_DB2_x3m.dat $min_x3m $max_x3m $min_db2_x3m $max_db2_x3m 200 200 $temperatura DB1_DB2_X3M.dat ############################################ #### Intermolecular DB1- DB2 - X2R ############################################ #Creando el DB1-DB2-X2R !paste db1_x2r.dat db2_x2r.dat > DB1_DB2_x2r.dat print('Minimo DB1-X2R=>',min_x2r) print('Máximo DB1-X2R=>',max_x2r) print('Minimo DB2-X2R=>',min_db2_x2r) print('Máximo DB2-X2R=>',max_db2_x2r) #Ejecutando el script de FES !python generateFES.py DB1_DB2_x2r.dat $min_x2r $max_x2r $min_db2_x2r $max_db2_x2r 200 200 $temperatura DB1_DB2_X2R.dat ############################################ #### Intermolecular DB1- DB2 - X1R ############################################ #Creando el DB1-DB2-X1R !paste db1_x1r.dat db2_x1r.dat > DB1_DB2_x1r.dat print('Minimo DB1-X1R=>',min_x1r) print('Máximo DB1-X1R=>',max_x1r) print('Minimo DB2-X1R=>',min_db2_x1r) print('Máximo DB2-X1R=>',max_db2_x1r) #Ejecutando el script de FES !python generateFES.py DB1_DB2_x1r.dat $min_x1r $max_x1r $min_db2_x1r $max_db2_x1r 200 200 $temperatura DB1_DB2_X1R.dat """ Explanation: Free Energy Intermolecular End of explanation """ # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "DB1_DB2_X1L.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 DB1 X@^L_1}" set ylabel "{/=30 DB2 X@^L_1}" set title "Free Energy Surface Intermolecular DB1-DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB1_DB2_X1L.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "DB1_DB2_X2L.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 DB1 X@^L_2}" set ylabel "{/=30 DB2 X@^L_2}" set title "Free Energy Surface Intermolecular DB1-DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB1_DB2_X2L.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "DB1_DB2_X3M.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 DB1 X@^M_3}" set ylabel "{/=30 DB2 X@^M_3}" set title "Free Energy Surface Intermolecular DB1-DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB1_DB2_X3M.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "DB1_DB2_X2R.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set xyplane 0 set pm3d interpolate 0,0 set xlabel "{/=30 DB1 X@^R_2}" set ylabel "{/=30 DB2 X@^R_2}" set title "Free Energy Surface Intermolecular DB1-DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "DB1_DB2_X2R.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "DB1_DB2_X1R.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 DB1 X@^R_1}" set ylabel "{/=30 DB2 X@^R_1}" set title "Free Energy Surface Intermolecular DB1-DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente set cbrange[8:10] splot "DB1_DB2_X1R.dat" with pm3d """ Explanation: Ploteando la Free Energy Intermolecular puentes DB1 y DB2 End of explanation """ hist_escale_y=[] fig = pl.figure(figsize=(25,8)) fig.subplots_adjust(hspace=.4, wspace=.3) #subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None) #left = 0.125 # the left side of the subplots of the figure #right = 0.9 # the right side of the subplots of the figure #bottom = 0.1 # the bottom of the subplots of the figure #top = 0.9 # the top of the subplots of the figure #wspace = 0.2 # the amount of width reserved for blank space between subplots #hspace = 0.2 # the amount of height reserved for white space between subplots #Formateando los valores de los ejes #Engrosando marcos ax = fig.add_subplot(2,5,1) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) ax = fig.add_subplot(2,5,2) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) ax = fig.add_subplot(2,5,3) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) ax = fig.add_subplot(2,5,4) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) ax = fig.add_subplot(2,5,5) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) #Cargando valores del DB1 data_h_db1_x1l=np.loadtxt('db1_x1l.dat',comments=['#', '@']) data_h_db1_x2l=np.loadtxt('db1_x2l.dat',comments=['#', '@']) data_h_db1_x3m=np.loadtxt('db1_x3m.dat',comments=['#', '@']) data_h_db1_x2r=np.loadtxt('db1_x2r.dat',comments=['#', '@']) data_h_db1_x1r=np.loadtxt('db1_x1r.dat',comments=['#', '@']) #Cargando valores del DB2 data_h_db2_x1l=np.loadtxt('db2_x1l.dat',comments=['#', '@']) data_h_db2_x2l=np.loadtxt('db2_x2l.dat',comments=['#', '@']) data_h_db2_x3m=np.loadtxt('db2_x3m.dat',comments=['#', '@']) data_h_db2_x2r=np.loadtxt('db2_x2r.dat',comments=['#', '@']) data_h_db2_x1r=np.loadtxt('db2_x1r.dat',comments=['#', '@']) sub1 = fig.add_subplot(251) # instead of plt.subplot(2, 2, 1) sub1.set_xlabel('Angle (Degree) ', fontsize=10) sub1.set_ylabel('P(Angle)') n1, bins1, rectangles1 = sub1.hist(data_h_db1_x1l,100, normed=True, color='black',histtype='step', linewidth=3) n2, bins2, rectangles2 = sub1.hist(data_h_db2_x1l,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=sub1.axis() hist_escale_y.append(y2) sub2 = fig.add_subplot(252) # instead of plt.subplot(2, 2, 1) sub2.set_xlabel('Angle (Degree) ', fontsize=10) sub2.set_ylabel('P(Angle)') n1, bins1, rectangles1 = sub2.hist(data_h_db1_x2l,100, normed=True, color='black',histtype='step', linewidth=3) n2, bins2, rectangles2 = sub2.hist(data_h_db2_x2l,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=sub2.axis() hist_escale_y.append(y2) sub3 = fig.add_subplot(253) # instead of plt.subplot(2, 2, 1) sub3.set_xlabel('Angle (Degree) ', fontsize=10) sub3.set_ylabel('P(Angle)') n1, bins1, rectangles1 = sub3.hist(data_h_db1_x3m,100, normed=True, color='black',histtype='step', linewidth=3) n2, bins2, rectangles2 = sub3.hist(data_h_db2_x3m,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=sub3.axis() hist_escale_y.append(y2) sub4 = fig.add_subplot(254) # instead of plt.subplot(2, 2, 1) sub4.set_xlabel('Angle (Degree) ', fontsize=10) sub4.set_ylabel('P(Angle)') n1, bins1, rectangles1 = sub4.hist(data_h_db1_x2r,100, normed=True, color='black',histtype='step', linewidth=3) n2, bins2, rectangles2 = sub4.hist(data_h_db2_x2r,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=sub4.axis() hist_escale_y.append(y2) sub5 = fig.add_subplot(255) # instead of plt.subplot(2, 2, 1) sub5.set_xlabel('Angle (Degree) ', fontsize=10) sub5.set_ylabel('P(Angle)') n1, bins1, rectangles1 = sub5.hist(data_h_db1_x1r,100, normed=True, color='black',histtype='step', linewidth=3) n2, bins2, rectangles2 = sub5.hist(data_h_db2_x1r,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=sub5.axis() hist_escale_y.append(y2) #escale_y hist_escale_y.sort(reverse=True) hist_escale_y ##Cambiando los ejes de las y sub1.axis((x1,x2,y1,hist_escale_y[0])) sub2.axis((x1,x2,y1,hist_escale_y[0])) sub3.axis((x1,x2,y1,hist_escale_y[0])) sub4.axis((x1,x2,y1,hist_escale_y[0])) sub5.axis((x1,x2,y1,hist_escale_y[0])) """ Explanation: Calcular los histogramas de los diedros End of explanation """ ### Creando el directorio para el análisis de las distancias de enlace de los puentes INTERMOLECULAR ruta_bonds_puentes = nuevaruta+'/bonds_puentes' print ( ruta_bonds_puentes ) if not os.path.exists(ruta_bonds_puentes): os.makedirs(ruta_bonds_puentes) print ('Se ha creado la ruta ===>',ruta_bonds_puentes) else: print ("La ruta "+ruta_bonds_puentes+" existe..!!!") print ( 'Nos vamos a ....', ruta_bonds_puentes) os.chdir( ruta_bonds_puentes ) """ Explanation: Ángulos de Enlace de los puentes Intermolecular End of explanation """ print ('\nCopiando el archivo generateFES.py a '+ruta_bonds_puentes) source_file=ruta_scripts+'/free_energy/generateFES.py' dest_file=ruta_bonds_puentes+'/generateFES.py' shutil.copy(source_file,dest_file) #Cambiando permisos de ejecución !chmod +x generateFES.py """ Explanation: Copiando el archivo de generación de FES End of explanation """ psf=ruta_old_traj+'/'+psf_file dcd=ruta_old_traj+'/'+dcd_file print ('Puente DB1=>',DB1_N) print ('Puente DB1=>',DB1_i) print ('Puente DB2=>',DB2_N) print ('Puente DB2=>',DB2_i) puente=2 if (int(puente)==2): #Creando script para Bond X1 Left b1 = open('bond_DB1_left.tcl', 'w') print(b1) b1.write('set psfFile '+ psf+' \n') b1.write('set dcdFile '+ dcd+' \n') b1.write('\nmol load psf $psfFile dcd $dcdFile\n') b1.write('set outfile ' +'[open ' +'bond_db1_left.dat'+' w]\n') b1.write('set nf [molinfo top get numframes]\n') b1.write(' \n') b1.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[1]+'"] get index]\n') b1.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[2]+'"] get index]\n') b1.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[3]+'"] get index]\n') b1.write('set angle [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] ]\n') b1.write('for {set i 0} {$i < $nf} {incr i 1} {\n') b1.write(' set x [measure angle $angle frame $i]\n') b1.write(' set time [expr {$i +1}]\n') b1.write(' puts $outfile "$time $x"\n') b1.write('}\n') b1.close() #Creando script para Bond X1 Right b2 = open('bond_DB1_right.tcl', 'w') print(b2) b2.write('set psfFile '+ psf+' \n') b2.write('set dcdFile '+ dcd+' \n') b2.write('\nmol load psf $psfFile dcd $dcdFile\n') b2.write('set outfile ' +'[open ' +'bond_db1_right.dat'+' w]\n') b2.write('set nf [molinfo top get numframes]\n') b2.write(' \n') b2.write('set selatoms1 [[atomselect top "protein and chain A and '+DB1_i[4]+'"] get index]\n') b2.write('set selatoms2 [[atomselect top "protein and chain A and '+DB1_i[5]+'"] get index]\n') b2.write('set selatoms3 [[atomselect top "protein and chain A and '+DB1_i[6]+'"] get index]\n') b2.write('set angle [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] ]\n') b2.write('for {set i 0} {$i < $nf} {incr i 1} {\n') b2.write(' set x [measure angle $angle frame $i]\n') b2.write(' set time [expr {$i +1}]\n') b2.write(' puts $outfile "$time $x"\n') b2.write('}\n') b2.close() #Creando script para Bond DB2 X1 Left b3 = open('bond_DB2_left.tcl', 'w') print(b3) b3.write('set psfFile '+ psf+' \n') b3.write('set dcdFile '+ dcd+' \n') b3.write('\nmol load psf $psfFile dcd $dcdFile\n') b3.write('set outfile ' +'[open ' +'bond_db2_left.dat'+' w]\n') b3.write('set nf [molinfo top get numframes]\n') b3.write(' \n') b3.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[1]+'"] get index]\n') b3.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[2]+'"] get index]\n') b3.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[3]+'"] get index]\n') b3.write('set angle [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] ]\n') b3.write('for {set i 0} {$i < $nf} {incr i 1} {\n') b3.write(' set x [measure angle $angle frame $i]\n') b3.write(' set time [expr {$i +1}]\n') b3.write(' puts $outfile "$time $x"\n') b3.write('}\n') b3.close() #Creando script para Bond DB2 X1 Right b4 = open('bond_DB2_right.tcl', 'w') print(b4) b4.write('set psfFile '+ psf+' \n') b4.write('set dcdFile '+ dcd+' \n') b4.write('\nmol load psf $psfFile dcd $dcdFile\n') b4.write('set outfile ' +'[open ' +'bond_db2_right.dat'+' w]\n') b4.write('set nf [molinfo top get numframes]\n') b4.write(' \n') b4.write('set selatoms1 [[atomselect top "protein and chain A and '+DB2_i[4]+'"] get index]\n') b4.write('set selatoms2 [[atomselect top "protein and chain A and '+DB2_i[5]+'"] get index]\n') b4.write('set selatoms3 [[atomselect top "protein and chain A and '+DB2_i[6]+'"] get index]\n') b4.write('set angle [list [lindex $selatoms1] [lindex $selatoms2] [lindex $selatoms3] ]\n') b4.write('for {set i 0} {$i < $nf} {incr i 1} {\n') b4.write(' set x [measure angle $angle frame $i]\n') b4.write(' set time [expr {$i +1}]\n') b4.write(' puts $outfile "$time $x"\n') b4.write('}\n') b4.close() """ Explanation: Generando los archivos Tcl para el cálculo de los ángulos. End of explanation """ #Calculando con VMD bond DB1 Left !vmd -dispdev text < bond_DB1_left.tcl #Calculando con VMD bond DB1 Right !vmd -dispdev text < bond_DB1_right.tcl #Calculando con VMD bond DB2 Left !vmd -dispdev text < bond_DB2_left.tcl #Calculando con VMD bond DB2 Right !vmd -dispdev text < bond_DB2_right.tcl """ Explanation: Ejecutando los archivos tcl generados con VMD End of explanation """ #Cargando valores del DB1 data_bond_db1_left=np.loadtxt('bond_db1_left.dat',comments=['#', '@']) #Cargando valores del DB1_X1R data_bond_db1_right=np.loadtxt('bond_db1_right.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB1 Left min_bond1_left=np.amin(data_bond_db1_left[:,1]) max_bond1_left=np.amax(data_bond_db1_left[:,1]) print ('Minimo DB1_Left=>',min_bond1_left) print ('Máximo DB1_Left=>',max_bond1_left) #Obteniendo los valores máximo y mínimo del DB1 Right min_bond1_right=np.amin(data_bond_db1_right[:,1]) max_bond1_right=np.amax(data_bond_db1_right[:,1]) print ('Minimo DB1_Right=>',min_bond1_right) print ('Máximo DB1_Right=>',max_bond1_right) #Creando los archivos de entrada para el script np.savetxt('bond_DB1_left.dat',data_bond_db1_left[:,1], fmt='%1.14f') np.savetxt('bond_DB1_right.dat',data_bond_db1_right[:,1], fmt='%1.14f') !paste bond_DB1_left.dat bond_DB1_right.dat > angles_DB1.dat #Ejecutando el script de FES !python generateFES.py angles_DB1.dat $min_bond1_left $max_bond1_left $min_bond1_right $max_bond1_right 200 200 $temperatura Angles_DB1.dat ###################################################################3 #Cargando valores del DB2 data_bond_db2_left=np.loadtxt('bond_db2_left.dat',comments=['#', '@']) #Cargando valores del DB1_X1R data_bond_db2_right=np.loadtxt('bond_db2_right.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del DB2 Left min_bond2_left=np.amin(data_bond_db2_left[:,1]) max_bond2_left=np.amax(data_bond_db2_left[:,1]) print ('Minimo DB2_Left=>',min_bond2_left) print ('Máximo DB2_Left=>',max_bond2_left) #Obteniendo los valores máximo y mínimo del DB2 Right min_bond2_right=np.amin(data_bond_db2_right[:,1]) max_bond2_right=np.amax(data_bond_db2_right[:,1]) print ('Minimo DB2_Right=>',min_bond2_right) print ('Máximo DB2_Right=>',max_bond2_right) #Creando los archivos de entrada para el script np.savetxt('bond_DB2_left.dat',data_bond_db2_left[:,1], fmt='%1.14f') np.savetxt('bond_DB2_right.dat',data_bond_db2_right[:,1], fmt='%1.14f') !paste bond_DB2_left.dat bond_DB2_right.dat > angles_DB2.dat #Ejecutando el script de FES !python generateFES.py angles_DB2.dat $min_bond2_left $max_bond2_left $min_bond2_right $max_bond2_right 200 200 $temperatura Angles_DB2.dat """ Explanation: Calculando la Free Energy de los Bonds de los puentes End of explanation """ # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db1_a1_a2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 C@^1_{/Symbol a}}-{/=30 C@^1_{/Symbol b}}-{/=30 S@^1_{/Symbol g}}" set ylabel "{/=30 C@^2_{/Symbol a}}-{/=30 C@^2_{/Symbol b}}-{/=30 S@^2_{/Symbol g}}" set title "Free Energy Surface Angles DB1" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "Angles_DB1.dat" with pm3d # This loads the magics for gnuplot %reload_ext gnuplot_kernel #Configurando la salida para GNUplot %gnuplot inline pngcairo transparent enhanced font "arial,20" fontscale 1.0 size 1280,960; set zeroaxis;; %%gnuplot set output "db2_a1_a2.png" set palette model RGB set palette defined ( 0 '#000090',\ 1 '#000fff',\ 2 '#0090ff',\ 3 '#0fffee',\ 4 '#90ff70',\ 5 '#ffee00',\ 6 '#ff7000',\ 7 '#ee0000',\ 8 '#7f0000') set view map set dgrid3d set pm3d interpolate 0,0 set xlabel "{/=30 C@^1_{/Symbol a}}-{/=30 C@^1_{/Symbol b}}-{/=30 S@^1_{/Symbol g}}" set ylabel "{/=30 C@^2_{/Symbol a}}-{/=30 C@^2_{/Symbol b}}-{/=30 S@^2_{/Symbol g}}" set title "Free Energy Surface Angles DB2" ##Descomentar la siguiente línea de código en caso de que la escala comience con valor de 1 y ejecutar nuevamente #set cbrange[8:10] splot "Angles_DB2.dat" with pm3d """ Explanation: Ploteando la Free Energy de los ángulos con gnuplot End of explanation """ bonds_escale_y=[] #Cargando valores del DB1 data_h_db1_left=np.loadtxt('bond_DB1_left.dat',comments=['#', '@']) data_h_db1_right=np.loadtxt('bond_DB1_right.dat',comments=['#', '@']) #Cargando valores del DB2 data_h_db2_left=np.loadtxt('bond_DB2_left.dat',comments=['#', '@']) data_h_db2_right=np.loadtxt('bond_DB2_right.dat',comments=['#', '@']) #Engrosar marco figb=pl.figure(figsize=(12, 10), dpi=100, linewidth=3.0) figb.subplots_adjust(hspace=.5) ax = figb.add_subplot(221) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) ax = figb.add_subplot(222) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) ax = figb.add_subplot(223) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) ax = figb.add_subplot(224) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(4) #Formateando los valores de los ejes ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) bond1 = figb.add_subplot(221) # instead of plt.subplot(2, 2, 1) #bond1.set_title('CA1 - CB1 - SY1') bond1.set_xlabel('Angle (Degree)') bond1.set_ylabel('P (Angle)') n, bins, rectangles = bond1.hist(data_h_db1_left,100, normed=True, color='black',histtype='step',linewidth=3) x1,x2,y1,y2=bond1.axis() bonds_escale_y.append(y2) bond2 = figb.add_subplot(222) # instead of plt.subplot(2, 2, 1) #bond2.set_title('CA2 - CB2 - SY2') bond2.set_xlabel('Angle (Degree)') bond2.set_ylabel('P (Angle)') n, bins, rectangles = bond2.hist(data_h_db1_right,100, normed=True, color='black',histtype='step', linewidth=3) x1,x2,y1,y2=bond2.axis() bonds_escale_y.append(y2) bond3 = figb.add_subplot(223) # instead of plt.subplot(2, 2, 1) #bond3.set_title('CA1 - CB1 - SY1') bond3.set_xlabel('Angle (Degree)') bond3.set_ylabel('P (Angle)') n, bins, rectangles = bond3.hist(data_h_db2_left,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=bond3.axis() bonds_escale_y.append(y2) bond4 = figb.add_subplot(224) # instead of plt.subplot(2, 2, 1) #bond4.set_title('CA2 - CB2 - SY2') bond4.set_xlabel('Angle (Degree)') bond4.set_ylabel('P (Angle)') n, bins, rectangles = bond4.hist(data_h_db2_right,100, normed=True, color='red',histtype='step', linewidth=3) x1,x2,y1,y2=bond4.axis() bonds_escale_y.append(y2) #escale_y bonds_escale_y.sort(reverse=True) bonds_escale_y ##Cambiando los ejes de las y sub1.axis((x1,x2,y1,bonds_escale_y[0])) sub2.axis((x1,x2,y1,bonds_escale_y[0])) sub3.axis((x1,x2,y1,bonds_escale_y[0])) sub4.axis((x1,x2,y1,bonds_escale_y[0])) """ Explanation: Calculando los histogramas de los bonds End of explanation """ ### Creando el directorio para el análisis de los puentes ruta_clusters = nuevaruta+'/clusters' print ( ruta_clusters ) if not os.path.exists(ruta_clusters): os.makedirs(ruta_clusters) print ('Se ha creado la ruta ===>',ruta_clusters) else: print ("La ruta "+ruta_clusters+" existe..!!!") print ( 'Nos vamos a ....', ruta_clusters) os.chdir( ruta_clusters ) """ Explanation: Generación de clusters Crear la nueva ruta para calcular los clusters End of explanation """ !echo 1 1 | g_cluster -f ../output.xtc -s ../ionized.pdb -method gromos -cl out.pdb -g out.log -cutoff 0.2 """ Explanation: Calculando los clusters con la opción (1= Protein) End of explanation """ !vmd out.pdb """ Explanation: Cargando los clusters para su visualización en VMD Se cargan los clusters en VMD y se guardan sus coordenadas para cada uno de ellos haciendo uso de VMD End of explanation """ ### Creando el directorio para el análisis de colorByRMSF ruta_colorByRMSF = nuevaruta+'/colorByRMSF' print ( ruta_colorByRMSF ) if not os.path.exists(ruta_colorByRMSF): os.makedirs(ruta_colorByRMSF) print ('Se ha creado la ruta ===>',ruta_colorByRMSF) else: print ("La ruta "+ruta_colorByRMSF+" existe..!!!") print ( 'Nos vamos a ....', ruta_colorByRMSF) os.chdir( ruta_colorByRMSF ) """ Explanation: colorByRMSF Creando la carpeta para salida de datos End of explanation """ print ('\nCopiando el archivo colorByRMSF.vmd a '+ruta_colorByRMSF) source_file=ruta_scripts+'/colorByRMSF/colorByRMSF.vmd' dest_file=ruta_colorByRMSF+'/colorByRMSF.vmd' shutil.copy(source_file,dest_file) """ Explanation: Copiando el archivo a la carpeta de datos End of explanation """ print ('Ejecutando el análisis de rmsf...') !echo 1 | g_rmsf -f ../output.xtc -s ../ionized.pdb -oq bfac.pdb -o rmsf.xvg #Calculando el mínimo y máximo del rmsf #Cargando valores del RMSF data_rmsf_gcolor=np.loadtxt('rmsf.xvg',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del RMSF min_rmsf_gcolor=np.amin(data_rmsf_gcolor[:,1]) max_rmsf_gcolor=np.amax(data_rmsf_gcolor[:,1]) print ('Minimo_RMSF=>',min_rmsf_gcolor) print ('Máximo_RMSF=>',max_rmsf_gcolor) """ Explanation: Calculando el RMSF para el análisis de la proteína con la opción (1) Protein End of explanation """ # Cargando el pdb con VMD !vmd ../ionized.pdb """ Explanation: Cargar el scrit colorByRMSF.vmd en VMD Arrancar VMD, dirigirse al menú Extensions -> Tk Console, copiar y ejecutar la siguiente secuencia de comandos en el cual pondremos los valores del Mínimo_RMSF y Máximo_RMSF calculado en la celda anterior: tcl source colorByRMSF.vmd colorByRMSF top rmsf.xvg Mínimo_RMSF Máximo_RMSF ESCALA DE COLOR Dirigirse al menú Extensions -> Visualization -> Color Scale Bar y cambiar los valores de los siguientes campos: 1. Colocar el valor calculado de Mínimo_RMSF en el campo Mínimum scale value 2. Colocar el valor calculado de Máximo_RMSF en el campo Maximum scale value. 3. Seleccionar el color Black en el campo Color of labels. CAMBIAR EL COLOR DE FONDO Dirigirse al menú Graphics -> Colors , y realizar las siguientes selecciones: 1. Categories seleccionar Display 2. Names seleccionar Background 3. Colors seleccionar 8 White REMOVER EJE X,Y,Z Dirigirse al menú Display -> Axes -> Off, con el cual eliminaremos el eje de X,Y,Z. End of explanation """ print ( 'Nos vamos a ....', ruta_colorByRMSF ) os.chdir( ruta_colorByRMSF ) """ Explanation: Graficando B-Factors con Chimera End of explanation """ #Inicializando vector rmsf=[] rmsf_x=[] rmsf_y=[] try: file_Bfactor = open( 'bfac.pdb' ) new_bfactor=open('bfac_new.pdb','w') except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in file_Bfactor.readlines(): fila = linea.strip() sl = fila.split() cadena=sl[0] if (cadena=='ATOM'): if (len(sl)==12): new_bfactor.write(linea) else: x=linea[0:60] tempFactor=linea[60:66] #print (x) #print(tempFactor) y=fila[67:] #print (y) enviar=x+' '+tempFactor+y new_bfactor.write(enviar+'\n') #print(enviar) else: #print (linea) new_bfactor.write(linea) new_bfactor.close() """ Explanation: Adecuando archivo bfac.pdb para obtener la columna de B-factors End of explanation """ !gedit bfac_new.pdb """ Explanation: Revisando la estructura del archivo generado. Revisar que los campos se encuentren completamente alineados en la estructura de los campos. Guardar y salir. End of explanation """ #Inicializando vector bfactors_color=[] try: file_bfactor_color = open( 'bfac_new.pdb' ) except IOError: print ('No se pudo abrir el archivo o no existe·..') i=0 for linea in file_bfactor_color.readlines(): fila = linea.strip() sl = fila.split() if (sl[0]=='ATOM'): #print (sl[0]) idresidue=fila[23:26] bfactor=fila[60:66] #print (idresidue + '\t'+bfactor) bfactors_color.append(idresidue+'\t'+bfactor+'\n') #i=i+1 #Escribiendo el archivo BFACTOR.dat f = open('protein_bfactor.dat', 'w') #f.write('@ title "B-factors" \n') f.write('@ xaxis label " Residue" \n') f.write('@ xaxis label char size 1.480000\n') f.write('@ xaxis bar linewidth 5.0\n') f.write('@ xaxis ticklabel char size 1.480000\n') f.write('@ yaxis label "B-factors (' +"\\"+'cE'+"\\"+'C)"\n') f.write('@ yaxis label char size 1.480000\n') f.write('@ yaxis bar linewidth 5.0\n') f.write('@ yaxis ticklabel char size 1.480000\n') f.write('@ s0 line linewidth 7\n') f.write('@ s0 symbol 1\n') f.write('@ s0 symbol size 1.000000\n') f.write('@ s0 symbol color 1\n') f.write('@ s0 symbol pattern 1\n') f.write('@ s0 symbol fill color 2\n') f.write('@ s0 symbol fill pattern 1\n') f.write('@ s0 symbol linewidth 1.0\n') f.write('@TYPE xy \n') f.write("".join(bfactors_color)) f.close() !xmgrace protein_bfactor.dat #Cargando la imagen generada en xmgrace Image(filename='protein_bfactor.png') #Calculando el mínimo y máximo del rmsf #Cargando valores del RMSF data_bfactor_color=np.loadtxt('protein_bfactor.dat',comments=['#', '@']) #Obteniendo los valores máximo y mínimo del RMSF min_bfactor_color=np.amin(data_bfactor_color[:,1]) max_bfactor_color=np.amax(data_bfactor_color[:,1]) print ('Minimo_B-Factor=>',min_bfactor_color) print ('Máximo_B-Factor=>',max_bfactor_color) """ Explanation: Generando el archivo de Bfactors para todos los átomos FALTA ADECUAR PARA SACAR EL MAYOR POR RESIDUO End of explanation """ !chimera bfac_new.pdb """ Explanation: Cargando el archivo pdb con Chimera para realizar la coloración de Bfactors End of explanation """ ##Cargando la imagen generada print ('Cargando el archivo...') Image(filename='image.png') """ Explanation: Instrucciones para generar la imagen de B-factors ESTABLECER EL MODO DE VISUALIZACIÓN 1. Seleccionar del menú principal Presets -> Interactive 2 (all atoms). 2. Seleccionar del menú principal Actions -> Surface -> Show. 3. Ajustar el tamaño de la ventana principal. 4. Ajustar el tamaño y posición de la figura haciendo uso de la tecla CTRL+ Button wheel mouse. COLOREAR LOS B-FACTORS Seleccionar Tools -> Depiction -> Render by Attribute. Nos desplegará una ventana Render/Select by Attribute. 1. Del campo Attribute seleccionar bfactor. 2. En el histograma que se muestra, seleccionar la barra blanca y cambiar el color de blanco a amarillo en el campo color. 3. Pulsar el botón Apply para visualizar los cambios de coloración. 4. Pulsar OK para finalizar. FONDO BLANCO Para aplicar el fondo blanco: 1. Seleccionar del menú principal Presets->Publication_1. SALVAR LA POSICIÓN DE LA IMAGEN Una vez que se ha obtenido la imagen coloreada, ajustar la visualización rotando la imagen, con la finalidad de dejar los espacios adecuados para la inclusión de las etiquetas y la barra de color. Para salvar la posición final de la imagen: 1. Seleccionar Favorites -> Command Line. 2. En la línea de comando teclear savepos p1 Si por alguna razón movemos la posición, para restaurarla hacer lo siguiente: 1. Seleccionar Favorites -> Command Line. 2. En la línea de comando teclear reset p1 TITULO Y BARRA DE COLOR Seleccionar del menú principal Tools -> Utilities -> Color Key. El cual desplegará la ventana 2D Labels/Color Key. Para desplegar la barra de color: Seleccionar la pestaña Color Key. Cambiar el color blanco por amarillo pulsando en el botón correspondiente. Cambiar la palabra min por el valor mínimo calculado del bfactor. Cambiar la palabra max por el valor máximo calculado del bfactor. Dar click con el mouse en la parte inferior de la imagen en donde se desea visualizar la escala. Arrastrar el mouse para definir el largo y ancho de la escala. Para desplegar el título de la barra: Seleccionar la pestaña Labels. Dar click en la parte superior de barra de color para incrustar el título. Escribir el título de la barra como B-Factors(Å). Para ajustar el tamaño de letra, en el campo Font size cambiar el valor adecuado. Para desplegar el título de la imagen: Seleccionar la pestaña Labels. Dar click en la parte superior de la imagenr para incrustar el título. Escribir el título con el nombre correspondiente. Ajustar el tamaño de letra, en el campo Font size cambiar el valor adecuado. Para el título en negrita, en el campo Font style seleccionar bold. Notas: 1. Si desea cambiar una etiqueta de posición, deberá estar en la pestaña Labels, mantener pulsado el botón izquierdo del mouse sobre la etiqueta y moverla a la posición deseada. 2. Si desea eliminar una etiqueta, deberá seleccionarla en el campo de Labels y desmarcar la opción Show. SALVAR LA IMAGEN Seleccionar del menú principal File -> Save Image. El cual desplegará la ventana Save image, en el cual en el campo File name dar el nombre de image.png. SALVAR LA SESIÓN DE QUIMERA Seleccionar del menú principal File -> Save Session as. El cual desplegará la ventana Choose Session Save File, en el cual en el campo File name colocar el nombre con la extensión .py. End of explanation """ ### Creando el directorio para el análisis del SASA en el directorio de VMD print ('Nos vamos a ', ruta) os.chdir( ruta ) output_find=!find /usr/local -maxdepth 2 -type d -name vmd print (output_find) ruta_vmd=output_find[0] print (ruta_vmd) ruta_vmd_sasa = ruta_vmd+'/plugins/noarch/tcl/iceVMD1.0' print ( ruta_vmd_sasa ) if not os.path.exists(ruta_vmd_sasa): os.makedirs(ruta_vmd_sasa) print ('Se ha creado la ruta ===>',ruta_vmd_sasa) else: print ("La ruta "+ruta_vmd_sasa+" existe..!!!") print ( 'Nos vamos a ....', ruta_vmd_sasa ) os.chdir( ruta_vmd_sasa ) #Copiando los archivos generados a la carpeta plugins de VMD print ('\nCopiando los archivos generados a '+ruta_vmd_sasa) source_file=ruta_scripts+'/iceVMD1.0/colorplot.tcl' dest_file=ruta_vmd_sasa+'/colorplot.tcl' shutil.copy(source_file,dest_file) source_file=ruta_scripts+'/iceVMD1.0/multiplot.tcl' dest_file=ruta_vmd_sasa+'/multiplot.tcl' shutil.copy(source_file,dest_file) source_file=ruta_scripts+'/iceVMD1.0/pkgIndex.tcl' dest_file=ruta_vmd_sasa+'/pkgIndex.tcl' shutil.copy(source_file,dest_file) source_file=ruta_scripts+'/iceVMD1.0/vmdICE.tcl' dest_file=ruta_vmd_sasa+'/vmdICE.tcl' shutil.copy(source_file,dest_file) print('\nArchivos copiados.. Regresando a... '+nuevaruta) os.chdir( nuevaruta ) ### Creando el directorio para la graficación del sasa ruta_sasaColor = nuevaruta+'/sasaColor' print ( ruta_sasaColor ) if not os.path.exists(ruta_sasaColor): os.makedirs(ruta_sasaColor) print ('Se ha creado la ruta ===>',ruta_sasaColor) else: print ("La ruta "+ruta_sasaColor+" existe..!!!") print ( 'Nos vamos a ....', ruta_sasaColor ) os.chdir( ruta_sasaColor ) print ('\nCopiando el archivo de configuracion a '+ruta_sasaColor) source_file=ruta_scripts+'/iceVMD1.0/vmdrc' dest_file=ruta_sasaColor+'/.vmdrc' shutil.copy(source_file,dest_file) """ Explanation: Graficando SASA End of explanation """ !vmd ../ionized.psf ../output.xtc """ Explanation: Coloreando el SASA Arrancar VMD. Ventana vmdICE Dirigirse al menú Extensions -> Analysis -> vmdICE, se presentará una ventana y se deberán cambiar los valores de los siguientes campos: 1. To: Colocar el rango máximo de frames de la trayectoria. 2. Selection for Calculation: agregar a chain A and protein. 3. Pulsar en el botón SASA Single Atom y esperar a que termine el cálculo. CAMBIAR EL COLOR DE FONDO Dirigirse al menú Graphics -> Colors , y realizar las siguientes selecciones: 1. Categories seleccionar Display 2. Names seleccionar Background 3. Colors seleccionar 8 White CAMBIAR RESOLUCIÓN DE ESFERAS Dirigirse al menú Graphics - Representations, y en el campo Sphere Resolution cambiamos al valor de 50. ROTAR LA IMAGEN PARA PRESENTAR UNA MEJOR VISTA Y GUARDARLA. End of explanation """ #Borrando los archivos del vmd !rm -r $ruta_vmd_sasa """ Explanation: Restaurando configuración default de VMD End of explanation """ ### Creando el directorio para la graficación del rgyro ruta_gyroColor = nuevaruta+'/color_rgyro' print ( ruta_gyroColor ) if not os.path.exists(ruta_gyroColor): os.makedirs(ruta_gyroColor) print ('Se ha creado la ruta ===>',ruta_gyroColor) else: print ("La ruta "+ruta_gyroColor+" existe..!!!") print ( 'Nos vamos a ....', ruta_gyroColor ) os.chdir( ruta_gyroColor ) print ('\nCopiando el script colorRgyro.tcl a '+ruta_gyroColor) source_file=ruta_scripts+'/colorRgyro/colorRgyro.tcl' dest_file=ruta_gyroColor+'/colorRgyro.tcl' shutil.copy(source_file,dest_file) """ Explanation: Graficando el RGYRO End of explanation """ !vmd ../ionized.psf ../output.xtc """ Explanation: Coloreando el RGYRO Arrancar VMD, dirigirse al manú Extensions -> Tk Console, copiar y ejecutar la siguiente secuencia de comandos: tcl source colorRgyro.tcl CAMBIAR EL COLOR DE FONDO Dirigirse al menú Graphics -> Colors , y realizar las siguientes selecciones: 1. Categories seleccionar Display 2. Names seleccionar Background 3. Colors seleccionar 8 White ROTAR LA IMAGEN PARA PRESENTAR UNA MEJOR VISTA Y GUARDARLA. End of explanation """
no-fire/line-follower
line-follower/src/v1/convnet_regression_layer_play.ipynb
mit
#Create references to important directories we will use over and over import os, sys DATA_HOME_DIR = '/home/nathan/olin/spring2017/line-follower/line-follower/data' #import modules import numpy as np from glob import glob from PIL import Image from tqdm import tqdm from scipy.ndimage import zoom from keras.models import Sequential from keras.metrics import categorical_crossentropy, categorical_accuracy from keras.layers.convolutional import * from keras.preprocessing import image from keras.layers.core import Flatten, Dense from keras.optimizers import Adam from keras.layers.normalization import BatchNormalization from matplotlib import pyplot as plt import seaborn as sns %matplotlib inline import bcolz """ Explanation: Line Follower - CompRobo17 This notebook will show the general procedure to use our project data directories and how to do a regression task using convnets Imports and Directories End of explanation """ %cd $DATA_HOME_DIR path = DATA_HOME_DIR train_path=path + '/sun_apr_16_office_full_line_1' valid_path=path + '/sun_apr_16_office_full_line_2' """ Explanation: Create paths to data directories End of explanation """ def resize_vectorized4D(data, new_size=(64, 64)): """ A vectorized implementation of 4d image resizing Args: data (4D array): The images you want to resize new_size (tuple): The desired image size Returns: (4D array): The resized images """ fy, fx = np.asarray(new_size, np.float32) / data.shape[1:3] return zoom(data, (1, fy, fx, 1), order=1) # order is the order of spline interpolation def lowerHalfImage(array): """ Returns the lower half rows of an image Args: array (array): the array you want to extract the lower half from Returns: The lower half of the array """ return array[round(array.shape[0]/2):,:,:] def folder_to_numpy(image_directory_full): """ Read sorted pictures (by filename) in a folder to a numpy array. We have hardcoded the extraction of the lower half of the images as that is the relevant data USAGE: data_folder = '/train/test1' X_train = folder_to_numpy(data_folder) Args: data_folder (str): The relative folder from DATA_HOME_DIR Returns: picture_array (np array): The numpy array in tensorflow format """ # change directory print ("Moving to directory: " + image_directory_full) os.chdir(image_directory_full) # read in filenames from directory g = glob('*.png') if len(g) == 0: g = glob('*.jpg') print ("Found {} pictures".format(len(g))) # sort filenames g.sort() # open and convert images to numpy array - then extract the lower half of each image print("Starting pictures to numpy conversion") picture_arrays = np.array([lowerHalfImage(np.array(Image.open(image_path))) for image_path in g]) # reshape to tensorflow format # picture_arrays = picture_arrays.reshape(*picture_arrays.shape, 1) print ("Shape of output: {}".format(picture_arrays.shape)) # return array return picture_arrays return picture_arrays.astype('float32') def flip4DArray(array): """ Produces the mirror images of a 4D image array """ return array[..., ::-1,:] #[:,:,::-1] also works but is 50% slower def concatCmdVelFlip(array): """ Concatentaes and returns Cmd Vel array """ return np.concatenate((array, array*-1)) # multiply by negative 1 for opposite turn def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w') c.flush() def load_array(fname): return bcolz.open(fname)[:] """ Explanation: Helper Functions Throughout the notebook, we will take advantage of helper functions to cleanly process our data. End of explanation """ %cd $train_path Y_train = np.genfromtxt('cmd_vel.csv', delimiter=',')[:,1] # only use turning angle Y_train = np.concatenate((Y_train, Y_train*-1)) X_train = folder_to_numpy(train_path + '/raw') X_train = np.concatenate((X_train, flip4DArray(X_train))) """ Explanation: Data Because we are using a CNN and unordered pictures, we can flip our data and concatenate it on the end of all training and validation data to make sure we don't bias left or right turns. Training Data Extract and store the training data in X_train and Y_train End of explanation """ X_train.shape, Y_train.shape """ Explanation: Test the shape of the arrays: X_train: (N, 240, 640, 3) Y_train: (N,) End of explanation """ %cd /tmp img = Image.fromarray(X_train[0], 'RGB') img.save("temp.jpg") image.load_img("temp.jpg") """ Explanation: Visualize the training data, currently using a hacky method to display the numpy matrix as this is being run over a remote server and I can't view new windows End of explanation """ %cd $valid_path Y_valid = np.genfromtxt('cmd_vel.csv', delimiter=',')[:,1] Y_valid = np.concatenate((Y_valid, Y_valid*-1)) X_valid = folder_to_numpy(valid_path + '/raw') X_valid = np.concatenate((X_valid, flip4DArray(X_valid))) """ Explanation: Validation Data Follow the same steps for as the training data for the validation data. End of explanation """ X_valid.shape, Y_valid.shape """ Explanation: Test the shape of the arrays: X_valid: (N, 240, 640, 3) Y_valid: (N,) End of explanation """ img_rows, img_cols = (64, 64) print(img_rows) print(img_cols) X_train = resize_vectorized4D(X_train, (img_rows, img_cols)) X_valid = resize_vectorized4D(X_valid, (img_rows, img_cols)) print(X_train.shape) print(X_valid.shape) """ Explanation: Resize Data When we train the network, we don't want to be dealing with (240, 640, 3) images as they are way too big. Instead, we will resize the images to something more managable, like (64, 64, 3) or (128, 128, 3). In terms of network predictive performance, we are not concerned with the change in aspect ratio, but might want to test a (24, 64, 3) images for faster training End of explanation """ %cd /tmp img = Image.fromarray(X_train[np.random.randint(0, X_train.shape[0])], 'RGB') img.save("temp.jpg") image.load_img("temp.jpg") """ Explanation: Visualize newly resized image. End of explanation """ gen = image.ImageDataGenerator( # rescale=1. / 255 # normalize data between 0 and 1 ) """ Explanation: Batches gen allows us to normalize and augment our images. We will just use it to rescale the images. End of explanation """ train_generator = gen.flow(X_train, Y_train)#, batch_size=batch_size, shuffle=True) valid_generator = gen.flow(X_valid, Y_valid)#, batch_size=batch_size, shuffle=True) # get_batches(train_path, batch_size=batch_size, # target_size=in_shape, # gen=gen) # val_batches = get_batches(valid_path, batch_size=batch_size, # target_size=in_shape, # gen=gen) data, category = next(train_generator) print ("Shape of data: {}".format(data[0].shape)) %cd /tmp img = Image.fromarray(data[np.random.randint(0, data.shape[0])].astype('uint8'), 'RGB') img.save("temp.jpg") image.load_img("temp.jpg") """ Explanation: Next, create the train and valid generators, these are shuffle and have a batch size of 32 by default End of explanation """ in_shape = (img_rows, img_cols, 3) """ Explanation: Convnet Constants End of explanation """ def get_model(): model = Sequential([ Convolution2D(32,3,3, border_mode='same', activation='relu', input_shape=in_shape), MaxPooling2D(), Convolution2D(64,3,3, border_mode='same', activation='relu'), MaxPooling2D(), Convolution2D(128,3,3, border_mode='same', activation='relu'), MaxPooling2D(), Flatten(), Dense(2048, activation='relu'), Dense(1024, activation='relu'), Dense(512, activation='relu'), Dense(1) ]) model.compile(loss='mean_absolute_error', optimizer='adam') return model model = get_model() model.summary() """ Explanation: Model Our test model will use a VGG like structure with a few changes. We are removing the final activation function. We will also use either mean_absolute_error or mean_squared_error as our loss function for regression purposes. End of explanation """ # history = model.fit_generator(train_generator, # samples_per_epoch=train_generator.n, # nb_epoch=2500, # validation_data=valid_generator, # nb_val_samples=valid_generator.n, # verbose=True) # %cd $DATA_HOME_DIR # model.save_weights('epoche_2500.h5') %cd $DATA_HOME_DIR model.load_weights('epoche_2500.h5') len(model.layers) model.pop() len(model.layers) model.compile(loss='mean_absolute_error', optimizer='adam') model.summary() X_train_features = model.predict(X_train) X_valid_features = model.predict(X_valid) %cd $train_path save_array("X_train_features.b", X_train_features) %cd $valid_path save_array("X_train_features.b", X_valid_features) X_train_features[9] def get_model_lstm(): model = Sequential([ Convolution2D(32,3,3, border_mode='same', activation='relu', input_shape=in_shape), MaxPooling2D(), Convolution2D(64,3,3, border_mode='same', activation='relu'), MaxPooling2D(), Convolution2D(128,3,3, border_mode='same', activation='relu'), MaxPooling2D(), Flatten(), Dense(2048, activation='relu'), Dense(1024, activation='relu'), Dense(512, activation='relu'), Dense(1) ]) model.compile(loss='mean_absolute_error', optimizer='adam') return model """ Explanation: Train End of explanation """ val_plot = np.convolve(history.history['val_loss'], np.repeat(1/10, 10), mode='valid') train_plot = np.convolve(history.history['loss'], np.repeat(1/10, 10), mode='valid') sns.tsplot(val_plot) X_preds = model.predict(X_valid).reshape(X_valid.shape[0],) for i in range(len(X_valid)): print("{:07f} | {:07f}".format(Y_valid[i], X_preds[i])) X_train_preds = model.predict(X_train).reshape(X_train.shape[0],) for i in range(len(X_train_preds)): print("{:07f} | {:07f}".format(Y_train[i], X_train_preds[i])) """ Explanation: Visualize Training End of explanation """ X_preds.shape X_train_preds.shape np.savetxt("X_train_valid.csv", X_preds, fmt='%.18e', delimiter=',', newline='\n') np.savetxt("X_train_preds.csv", X_train_preds, fmt='%.18e', delimiter=',', newline='\n') """ Explanation: Notes * 32 by 32 images are too small resolution for regression * 64 by 64 seemed to work really well * Moving average plot to see val_loss over time is really nice * Can take up to 2000 epochs to reach a nice minimum End of explanation """
regata/dbda2e_py
chapters/7.ipynb
mit
import numpy as np from scipy.stats import beta as betad %matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') from dbda2e_utils import plotPost # Specify the data, to be used in the likelihood function. myData = np.concatenate((np.repeat(0,6), np.repeat(1,14))) # myData = [] # Exercicse 7.3 # myData = [0,1,1] # Exercicse 7.3 # Define the Bernoulli likelihood function, p(D|theta). def likelihood(theta, data): z = sum(data) N = len(data) pDataGivenTheta = theta**z * (1-theta)**(N-z) # The theta values passed into this function are generated at random, # and therefore might be inadvertently greater than 1 or less than 0. # The likelihood for theta > 1 or for theta < 0 is zero: if np.isscalar(theta): if (theta > 1) or (theta < 0): pDataGivenTheta = 0.0 else: pDataGivenTheta[(theta > 1) | (theta < 0)] = 0 return pDataGivenTheta # Define the prior density function. def prior(theta): pTheta = betad.pdf(theta, 1 ,1) # pTheta = (np.cos(4*np.pi*theta) + 1) ** 2 / 1.5 # Exercicse 7.3 # The theta values passed into this function are generated at random, # and therefore might be inadvertently greater than 1 or less than 0. # The prior for theta > 1 or for theta < 0 is zero: if np.isscalar(theta): if (theta > 1) or (theta < 0): pTheta = 0.0 else: pTheta[(theta > 1) | (theta < 0)] = 0 return pTheta # Define the relative probability of the target distribution, # as a function of vector theta. For our application, this # target distribution is the unnormalized posterior distribution. def targetRelProb(theta, data): targetRelProb = likelihood(theta, data) * prior(theta) return targetRelProb # Specify standard deviation of proposal distribution: proposalSD = [0.02, 0.2, 2.0][1] # Specify the length of the trajectory, i.e., the number of jumps to try: trajLength = 50000 # arbitrary large number # Initialize the vector that will store the results: trajectory = np.zeros((trajLength,)) # Specify where to start the trajectory: trajectory[0] = 0.01 # arbitrary value # trajectory[0] = 0.99 # Exercicse 7.3 # Specify the burn-in period: burnIn = int(np.ceil(0.0 * trajLength)) # arbitrary number, less than trajLength # Initialize accepted, rejected counters, just to monitor performance: nAccepted = 0 nRejected = 0 # Now generate the random walk. The 't' index is time or trial in the walk. # Specify seed to reproduce same random walk: np.random.seed(47405) for t in range(trajLength-1): currentPosition = trajectory[t] # Use the proposal distribution to generate a proposed jump. proposedJump = np.random.normal(loc=0.0, scale=proposalSD) # Compute the probability of accepting the proposed jump. pratio = targetRelProb(currentPosition + proposedJump, myData) / targetRelProb(currentPosition, myData) probAccept = min(1.0, pratio) # Generate a random uniform value from the interval [0,1] to # decide whether or not to accept the proposed jump. if np.random.uniform() < probAccept: trajectory[t+1] = currentPosition + proposedJump if t > burnIn: nAccepted += 1 else: # reject the proposed jump, stay at current position trajectory[t+1] = currentPosition # increment the rejected counter, just to monitor performance if t > burnIn: nRejected += 1 # Extract the post-burnIn portion of the trajectory. acceptedTraj = trajectory[(burnIn+1): len(trajectory)] # End of Metropolis algorithm. f, axs = plt.subplots(3,1,figsize=(10,15)) title = 'Prpsl.SD = %.2f' % proposalSD plotPost(acceptedTraj, axs[0], title) # Trajectory, a.k.a. trace plot, end of chain: idxToPlot = range((trajLength-100), trajLength) axs[1].plot(trajectory[idxToPlot] , idxToPlot, marker='.', color='cornflowerblue') axs[1].set_xlim([0.0, 1.0]) axs[1].set_title('End of Chain') axs[1].set_ylabel('Step in Chain') axs[1].set_xlabel(r'$\theta$') acc_ratio_text = r'$\frac{N_{acc}}{N_{pro}} = %.3f$' % (nAccepted/len(acceptedTraj)) axs[1].annotate(acc_ratio_text, xy=(0.05, 0.85), xycoords='axes fraction', fontsize=16) # Trajectory, a.k.a. trace plot, beginning of chain: idxToPlot = range(100) axs[2].plot(trajectory[idxToPlot] , idxToPlot, marker='.', color='cornflowerblue') axs[2].set_xlim([0.0, 1.0]) axs[2].set_title('Beginning of Chain') axs[2].set_ylabel('Step in Chain') axs[2].set_xlabel(r'$\theta$') plt.show() """ Explanation: Markov Chain Monte Carlo Exercise 7.1 Exercise 7.2 Exercise 7.3 Exercise 7.1. Purpose: Experiment with the Metropolis algorithm as displayed in Figure 7.4. Python version of BernMetrop.R End of explanation """ f, axs = plt.subplots(2,1,figsize=(10,10)) maxlags = 60 axs[0].acorr(acceptedTraj - acceptedTraj.mean(), maxlags=maxlags, color='cornflowerblue', linewidth=2.0) axs[0].set_xlim([-1, maxlags/2]) axs[0].set_xlabel('Lag') axs[0].set_ylabel('ACF') axs[0].set_title('Series acceptedTraj') Len = len(acceptedTraj) Lag = 10 trajHead = acceptedTraj[0:(Len-Lag)] trajTail = acceptedTraj[Lag:Len] axs[1].plot(trajHead , trajTail, '.', markersize=2, color='cornflowerblue') title = 'Prpsl.SD = %.2f, lag = %d, cor = %.3f' % (proposalSD, Lag, np.corrcoef(trajHead, trajTail)[0,1]) axs[1].set_title(title) plt.show() """ Explanation: Exercise 7.2 Purpose: To explore the autocorrelation function in Figure 7.12. End of explanation """ theta = np.linspace(0, 1, num=501) pTheta = (np.cos(4*np.pi*theta) + 1) ** 2 / 1.5 plt.plot(theta, pTheta, color='cornflowerblue') plt.xlabel(r'$\theta$') plt.ylabel('(cos(4*pi*theta) + 1)^2 / 1.5') plt.show() """ Explanation: Exercise 7.3 Purpose: Using a multimodal prior with the Metropolis algorithm, and seeing how chains can transition across modes or get stuck within them. Part B End of explanation """
aleph314/K2
Regex/python_regex_problems.ipynb
gpl-3.0
urls = ['http://www.domain.com', 'https://somedomain.com', 'http://my-domain-123.net', 'https://google.com', 'http://www.foo.com', 'https://bar-baz3.com', 'ftp://domain2.com'] import re # A complete match checking for the presence of some alphanumeric after the // followed # by at least one group of .alphanumeric could be 'https://\w+(.\w+)+' # I don't check if it ends with .com, .net or others but there are too many options here [m.string for u in urls for m in [re.search('https://', u)] if m] """ Explanation: Regex Problems 1. Efficiently Get a list of all complete URLs that use https protocol End of explanation """ [m.group(1) for u in urls for m in [re.search('https?://(\w+(.\w+)+)', u)] if m] """ Explanation: 2. Get domains (without protocols) (including extension, e.g. .com) for URLs with both http and https protocols. End of explanation """ languages = ['Ar','It','it','En','En_gb','jp','en_GB','EN_IE','en-NZ','en','es','ES-es'] sum([1 for c in languages for m in [re.search('^en', c, re.IGNORECASE)] if m]) """ Explanation: 3. Below is a list of language codes. Determine how many are some form of English English codes will start with En, en, or EN End of explanation """ [m.group(1).lower() + ('-' + m.group(4).upper() if m.group(2) else '')\ for c in languages for m in [re.search('(^[A-Za-z]{2})((.)([A-Za-z]{2}$))?', c)] if m] """ Explanation: 4. Fix all langauge codes so that they are formatted as such: first two letters are lower case codes with region endings use hyphen and not underscore the region endings are upper case End of explanation """
brujonildo/randomNonlinearDynamics
approximatingRatesOfChangeFromData.ipynb
cc0-1.0
import scipy as sc import matplotlib.pylab as gr %matplotlib inline """ Explanation: Calculating rates of change from data Marco Arieli Herrera-Valdez Laboratory of computational physiology and quantitative imaging Facultad de Ciencias, Universidad Nacional Autónoma de México Consider an experimental paradigm in which a variable $y$ is experimentally obtained and it varies as a function of $x$, which is manipulated experimentally. The lines below show how to calculate the rate of change of $y$ as a function of $x$. The role of experimental noise is illustrated. End of explanation """ # x and noise x= sc.arange(0,1, 0.05) noiseAmp= 0.025; noise = noiseAmp * sc.randn(len(x)) # Functional form for y k= 15.0; a=0.5; y = 1/(1+ sc.exp(k*(a-x))) yN = noise + 1/(1+ sc.exp(k*(a-x))) # gr.plot(x,yN,'b.',label=r"$(x,y)$") gr.plot(x,y,'g') gr.legend(loc="upper left",fontsize=16) """ Explanation: Now let us create a data set with some normally distributed noise that will represent data sets obtained experimentally from varying, for instance, the amount of $x$ and measuring the resulting $y$. End of explanation """ # Define a function to calculate the slopes of secant lines for the graph of y as a function of x def secantSlope(x,y,side="left"): """ Function to calculate the slopes of secant lines for the graph of y as a function of x """ s = sc.zeros(len(y)) if side=="left": s[1:] = (y[1:]-y[:-1])/(x[1:]-x[:-1]) s[0] = (y[1]-y[0])/(x[1]-x[0]) if side=="right": s[:-1] = (y[1:]-y[:-1])/(x[1:]-x[:-1]) s[0] = (y[-1]-y[-2])/(x[-1]-x[-2]) return s """ Explanation: Approximation for the derivative and inflection points To analyze the rate of change of $y$ with respect to $x$ and examine the shape of the curve describing the graph of $y$ as a function of $x$, it is necesary to calculate approximations of the derivatives $\partial_x y$ and $\partial_x^2 y$. To do so, let us define two arrays in which we can store the calculated rates of change. The rate of change of $y$ with respect to $x$ The data representing the relationship between $x$ and $y$ is an array of points of the form $(x_i,y_i)$, $i\in \left{0,...,n-1\right}$. For a first approach, let $h_i= x_{i+1}-x_i$. Then the slopes $s_i$ of the secant lines at each pair of points can be approximated by letting \begin{eqnarray} \partial_x y \approx s_i &=& \frac{y_{i+1}-y_{i}}{x_{i+1}-x_{i}} \end{eqnarray} Approximations for data sets with "large enough" samples Let us build an approximation to the derivative by averaging the upper and lower slopes of the secant lines around each point. That is, averaging \begin{eqnarray} \partial_x y \approx& =& \frac{s_{i+1}+s_i}{2} \ &=& \frac{1}{2}\left( \frac{y_{i+1}-y_{i}}{x_{i+1}-x_{i}} + \frac{y_i-y_{i-1}}{x_{i}-x_{i-1}} \right) \end{eqnarray} End of explanation """ # Setup the arrays for dy sL = secantSlope(x,y,side="left") sLN = secantSlope(x,yN,side="left") sR = secantSlope(x,y,side="right") sRN = secantSlope(x,yN,side="right") # Central approximations for the derivatives dy = (sL+sR)/2.0 dyN = (sLN+sRN)/2.0 print(dyN) """ Explanation: Now use the function that calculates the slope of the secant line (defined above) and calculate different approximations for the derivatives. End of explanation """ f= gr.figure(figsize=(13,5)); gr.ioff() ax=list(); r=1; c=2; ax.append(f.add_subplot(r,c,1)) ax.append(f.add_subplot(r,c,2)) ax[0].plot(x,dyN,'wo',markeredgecolor="r") ax[0].plot(x,dyN,'r--') ax[0].plot(x,dy,'r',label=r"$(x, \partial_x y)$") ax[0].plot(x,yN,'wo',markeredgecolor="b") ax[0].plot(x,y,'b',label=r"$(x,y)$") ax[1].plot(y,dyN,'r--') ax[1].plot(y,dy,'r',label=r"$(y,\partial_x y)$") ax[1].plot(y,dyN,'wo',markeredgecolor="r") ax[0].legend(fontsize=16,loc="upper left") ax[1].legend(fontsize=16,loc="upper right"); ax[0].set_ylim(dyN.min()-0.1,0.1+dyN.max()) ax[1].set_ylim(dyN.min()-0.1,0.1+dyN.max()) ax[1].set_xlim(y.min()-0.1,0.1+y.max()) #ax[1].set_xlim(-0.1,0.1) gr.ion(); gr.draw() print(r"The maximum value of the derivative of y with respect to x is %g"%dyN.max()) """ Explanation: Now let us illustrate the variations in the central approximation to $\partial_x y$ introduced by noise. The continuous curves illustrate the behaviour of the derivative in the absence of noise. The dashed curves show the behaviour of the derivative with noise. End of explanation """ # Arrays for the second derivative ("noisy data" and representative function) d2y = sc.arange(len(y)) d2yN = sc.arange(len(yN)) d2yN[1:]= (dyN[1:]- dyN[:-1])/(x[1:]-x[:-1]) d2y[1:]= (dy[1:]- dy[:-1])/(x[1:]-x[:-1]) print((dyN[1:]- dyN[:-1])/(x[1:]-x[:-1])) """ Explanation: Changing the noise amplitude yields different values for the maximum, which can vary widely. Change the noise amplitude to check this fact. The second derivative and the inflection points The graph of $y$ as a function of $x$ seems to have an inflection point somewhere between its upper and lower branches, around the values where $y$ increases linearly. An inflection point could be found in that range by calculating an approximation to $\partial^y_x$, the second derivative of $y$ with respect to $x$, and finding the points where that second derivative is closer to zero. To do so, we calculate approximations to the second derivative by treating the slopes of the secant lines from the data as representative points for the derivative, and using such points to calculate the slopes of secant lines. The inflection points for $y$ would be those points for which $$0=\partial^2_x y = \partial_x \left( \partial_x y \right). $$ That is, inflection points are such that the first derivative has a local maximum or a local minimum. For the data, the second-order slopes of the secant lines (slopes of secant lines from the slopes of secant lines) would almost never take the value of 0. However, we can try to estimate if there are values of $x$ for which the second-order slopes are close to zero. Another way to find inflection points, is to find the points at which the derivative has a maximum, or a minimum for the interval of interest. End of explanation """
Graphitenet/Fun-CSS-Java-Clock
റെൻസോർഫ്ളോ_.ipynb
gpl-2.0
print ("Gods name is Jehova") """ Explanation: <a href="https://colab.research.google.com/github/Graphitenet/Fun-CSS-Java-Clock/blob/master/%E0%B4%B1%E0%B5%86%E0%B5%BB%E0%B4%B8%E0%B5%8B%E0%B5%BC%E0%B4%AB%E0%B5%8D%E0%B4%B3%E0%B5%8B_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> End of explanation """ import tensorflow as tf """ Explanation: റെൻസോർഫ്ളോ വിളിക്കുന്നു tf എന്നു പേരിടുന്നു End of explanation """ mnist = tf.keras.datasets.mnist """ Explanation: ഡേറ്റയെ വിളിക്കുന്നു.... ചിത്രങ്ങൾ ...കൈയ്യെഴുത്ത് അക്കങ്ങളുടെ മെനിസ്റ്റ് ഡാറ്റാബേസ്, 60,000 ഉദാഹരണങ്ങൾ, ഒപ്പം 10,000 സാമ്പിളുകൾ End of explanation """ (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) """ Explanation: ഡേറ്റയെ തയ്യാറാകുക End of explanation """
google-research/google-research
diffusion_distillation/diffusion_distillation.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Download the diffusion_distillation repository !apt-get -qq install subversion !svn checkout https://github.com/google-research/google-research/trunk/diffusion_distillation !pip install -r diffusion_distillation/diffusion_distillation/requirements.txt --quiet import os import time import requests import functools import jax from jax.config import config import jax.numpy as jnp import flax from matplotlib import pyplot as plt import numpy as onp import tensorflow.compat.v2 as tf tf.enable_v2_behavior() from diffusion_distillation import diffusion_distillation # configure JAX to use the TPU if 'TPU_DRIVER_MODE' not in globals(): url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver_nightly' resp = requests.post(url) time.sleep(5) TPU_DRIVER_MODE = 1 config.FLAGS.jax_xla_backend = "tpu_driver" config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR'] print(config.FLAGS.jax_backend_target) """ Explanation: Progressive Distillation for Fast Sampling of Diffusion Models Code and model checkpoints for the <a href="https://openreview.net/forum?id=TIdIXIpzhoI">ICLR 2022 paper</a> by Tim Salimans and Jonathan Ho. Make sure to use a TPU when running this notebook, enabled via Runtime -> Change runtime type -> Hardware accelerator <a href="https://colab.research.google.com/github/google-research/google-research/blob/master/diffusion_distillation/diffusion_distillation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> abstract: Diffusion models have recently shown great promise for generative modeling, outperforming GANs on perceptual quality and autoregressive models at density estimation. A remaining downside is their slow sampling time: generating high quality samples takes many hundreds or thousands of model evaluations. Here we make two contributions to help eliminate this downside: First, we present new parameterizations of diffusion models that provide increased stability when using few sampling steps. Second, we present a method to distill a trained deterministic diffusion sampler, using many steps, into a new diffusion model that takes half as many sampling steps. We then keep progressively applying this distillation procedure to our model, halving the number of required sampling steps each time. On standard image generation benchmarks like CIFAR-10, ImageNet, and LSUN, we start out with state-of-the-art samplers taking as many as 8192 steps, and are able to distill down to models taking as few as 4 steps without losing much perceptual quality; achieving, for example, a FID of 3.0 on CIFAR-10 in 4 steps. Finally, we show that the full progressive distillation procedure does not take more time than it takes to train the original model, thus representing an efficient solution for generative modeling using diffusion at both train and test time. This notebook is intended as an easy way to get started with the Progressive Distillation algorithm. Reproducing the results from the paper exactly can be done using the hyperparameters in the provided config files, but this requires running at a larger scale and for longer than is practical in a notebook. Trained model checkpoints are provided and can be loaded with this notebook. End of explanation """ # create model config = diffusion_distillation.config.cifar_base.get_config() model = diffusion_distillation.model.Model(config) # init params state = jax.device_get(model.make_init_state()) state = flax.jax_utils.replicate(state) # JIT compile training step train_step = functools.partial(model.step_fn, jax.random.PRNGKey(0), True) train_step = functools.partial(jax.lax.scan, train_step) # for substeps train_step = jax.pmap(train_step, axis_name='batch', donate_argnums=(0,)) # build input pipeline total_bs = config.train.batch_size device_bs = total_bs // jax.device_count() train_ds = model.dataset.get_shuffled_repeated_dataset( split='train', batch_shape=( jax.local_device_count(), # for pmap config.train.substeps, # for lax.scan over multiple substeps device_bs, # batch size per device ), local_rng=jax.random.PRNGKey(0), augment=True) train_iter = diffusion_distillation.utils.numpy_iter(train_ds) # run training for step in range(10): batch = next(train_iter) state, metrics = train_step(state, batch) metrics = jax.device_get(flax.jax_utils.unreplicate(metrics)) metrics = jax.tree_map(lambda x: float(x.mean(axis=0)), metrics) print(metrics) """ Explanation: Train a new diffusion model End of explanation """ # create model config = diffusion_distillation.config.cifar_distill.get_config() model = diffusion_distillation.model.Model(config) # load the teacher params model.load_teacher_state(config.distillation.teacher_checkpoint_path) # init student state init_params = diffusion_distillation.utils.copy_pytree(model.teacher_state.ema_params) optim = model.make_optimizer_def().create(init_params) state = diffusion_distillation.model.TrainState( step=model.teacher_state.step, optimizer=optim, ema_params=diffusion_distillation.utils.copy_pytree(init_params), num_sample_steps=model.teacher_state.num_sample_steps//2) # build input pipeline total_bs = config.train.batch_size device_bs = total_bs // jax.device_count() train_ds = model.dataset.get_shuffled_repeated_dataset( split='train', batch_shape=( jax.local_device_count(), # for pmap config.train.substeps, # for lax.scan over multiple substeps device_bs, # batch size per device ), local_rng=jax.random.PRNGKey(0), augment=True) train_iter = diffusion_distillation.utils.numpy_iter(train_ds) steps_per_distill_iter = 10 # number of distillation steps per iteration of progressive distillation end_num_steps = 4 # eventual number of sampling steps we want to use while state.num_sample_steps >= end_num_steps: # compile training step train_step = functools.partial(model.step_fn, jax.random.PRNGKey(0), True) train_step = functools.partial(jax.lax.scan, train_step) # for substeps train_step = jax.pmap(train_step, axis_name='batch', donate_argnums=(0,)) # train the student against the teacher model print('distilling teacher using %d sampling steps into student using %d steps' % (model.teacher_state.num_sample_steps, state.num_sample_steps)) state = flax.jax_utils.replicate(state) for step in range(steps_per_distill_iter): batch = next(train_iter) state, metrics = train_step(state, batch) metrics = jax.device_get(flax.jax_utils.unreplicate(metrics)) metrics = jax.tree_map(lambda x: float(x.mean(axis=0)), metrics) print(metrics) # student becomes new teacher for next distillation iteration model.teacher_state = jax.device_get( flax.jax_utils.unreplicate(state).replace(optimizer=None)) # reset student optimizer for next distillation iteration init_params = diffusion_distillation.utils.copy_pytree(model.teacher_state.ema_params) optim = model.make_optimizer_def().create(init_params) state = diffusion_distillation.model.TrainState( step=model.teacher_state.step, optimizer=optim, ema_params=diffusion_distillation.utils.copy_pytree(init_params), num_sample_steps=model.teacher_state.num_sample_steps//2) """ Explanation: Distill a trained diffusion model End of explanation """ # list all available distilled checkpoints !gsutil ls gs://gresearch/diffusion-distillation # create imagenet model config = diffusion_distillation.config.imagenet64_base.get_config() model = diffusion_distillation.model.Model(config) # load distilled checkpoint for 8 sampling steps loaded_params = diffusion_distillation.checkpoints.restore_from_path('gs://gresearch/diffusion-distillation/imagenet_8', target=None)['ema_params'] # fix possible flax version errors ema_params = jax.device_get(model.make_init_state()).ema_params loaded_params = flax.core.unfreeze(loaded_params) loaded_params = jax.tree_map( lambda x, y: onp.reshape(x, y.shape) if hasattr(y, 'shape') else x, loaded_params, flax.core.unfreeze(ema_params)) loaded_params = flax.core.freeze(loaded_params) del ema_params # sample from the model imagenet_classes = {'malamute': 249, 'siamese': 284, 'great_white': 2, 'speedboat': 814, 'reef': 973, 'sports_car': 817, 'race_car': 751, 'model_t': 661, 'truck': 867} labels = imagenet_classes['truck'] * jnp.ones((4,), dtype=jnp.int32) samples = model.samples_fn(rng=jax.random.PRNGKey(0), labels=labels, params=loaded_params, num_steps=8) samples = jax.device_get(samples).astype(onp.uint8) # visualize samples padded_samples = onp.pad(samples, ((0,0), (1,1), (1,1), (0,0)), mode='constant', constant_values=255) nrows = int(onp.sqrt(padded_samples.shape[0])) ncols = padded_samples.shape[0]//nrows _, height, width, channels = padded_samples.shape img_grid = padded_samples.reshape(nrows, ncols, height, width, channels).swapaxes(1,2).reshape(height*nrows, width*ncols, channels) img = plt.imshow(img_grid) plt.axis('off') """ Explanation: Load a distilled model checkpoint and sample from it End of explanation """
liganega/Gongsu-DataSci
notebooks/GongSu10_Tuples.ipynb
gpl-3.0
t = (3, 50, "yellow") print(t) type(t) l = [3, 50, "yellow"] l type(l) """ Explanation: 튜플 활용 주요 내용 파이썬에 내장되어 있는 컬렉션 자료형 중에서 튜플에 대해 알아 본다. 튜플(tuples): 리스트와 비슷. 하지만 수정 불가능(immutable). * 사용 형태: 소괄호 사용 even_numbers_tuple = (2, 4, 6, 8, 10) todays_datatypes_tuple = ('list', 'tuple', 'dictionary') 특징: 임의의 자료형 값들을 섞어서 항목으로 사용 가능 mixed_tuple = (1, 'abs', [2.1, 4.5]) 인덱스 또는 슬라이싱을 이용하여 각각의 항목에 또는 여러 개의 항목에 대한 정보를 활용할 수 있다. 사용법은 문자열의 경우와 동일. 튜플은 수정 불가능하다. 즉, 불변 자료형이다. 튜플 자료형은 불변 자료형이라서 메소드가 별로 없다. 많이 사용되는 두 개이다. count(): 튜플에 포함된 특정 항목이 몇 번 나타나는지 세어 줌. index(): 특정 항목의 인덱스가 몇 번인지 확인해 줌. 오늘의 주요 예제 Byun_Sato.txt 파일에는 변사또 학생의 개인 신상정보가 아래와 같이 들어 있다. ``` 학생들의 중요 개인정보이며, 배포 금지함. Name: Byun Sato Date of Birth: 95.4.28 Email: [email protected] Department: Computer Student ID: 201700251003 ``` 파일의 내용을 읽어서 아래와 같은 형식으로 리턴하는 함수를 구현하고자 한다. {'Date of Birth': (1995, 4, 28), 'Department': 'Computer', 'Email': '[email protected]', 'Name': 'Byun Sato', 'Student ID': '201700251003'} 튜플의 기본 활용 오늘의 주요 예제의 문제를 해결하려면, 문자열과 사전 자료형과 더불어 튜플에 대해 알아 보아야 한다. 튜플은 순서쌍이라고도 불리며, 리스트와 99% 비슷한 용도를 가진다. 리스트와 다른 점은 튜플이 불변 자료형이라는 것 뿐이다. 물론, 튜플이 불변 자료형이기에 리스트 자료형이 갖고 있는 다양한 메소드를 갖지 못한다. End of explanation """ t[1] t[-1] t[:2] t[: : 2] """ Explanation: 튜플의 경우 인덱싱과 슬라이싱은 문자열 또는 리스트에서의 활용과 100% 동일 End of explanation """ a = 10, 20, 30 type(a) print(a) """ Explanation: 튜플을 사용할 때 소괄호를 생략해도 된다. 하지만 기본적으로 소괄호를 사용한다. End of explanation """ t[1] = 5 """ Explanation: 튜플은 불변 자료형이다. 리스트와는 달리 인덱싱을 사용하여 튜플 특정 원소의 값을 변경할 수 없다. End of explanation """ So_Ritgun_dob = (1996, 12, 16) """ Explanation: 튜플 자료형 활용 예제 1 절대로 변경되지 않거나 변경되어서는 안되는 값들을 저장할 때 사용 예를 들어, 생년월일, 학과 전공 등등. End of explanation """ a, b = 1, 2 a """ Explanation: 튜플 자료형 활용 예제 2 여러 개의 변수들에 여러 개의 값들을 한 줄에 동시에 할당하기 위해 사용 End of explanation """ a, b = b, a a """ Explanation: 튜플을 이용하면 두 변수에 할당된 값을 스왑(swap)하는 것이 매우 간단하다. End of explanation """ def f(x): return x**2, x**3 """ Explanation: 주의: C, C#, Java 등에서 앞서의 예제와 같은 스왑기능을 구현하려면 포인터를 사용해야 한다. 튜플 자료형 활용 예제 3 여러 개의 값들을 리턴하는 함수를 정의할 때 사용 함수의 리턴값은 무조건 하나이다. 예를 들어, 2를 입력 받아서 2의 제곱과 2의 세제곱을 동시에 리턴하는 함수는 정의할 수 없다. 하지만, 두 개의 값을 튜플로 묶어서 하나의 값으로 리턴할 수는 있다. 아래 함수는 입력받은 값의 제곱과 세제곱을 튜플로 묶어서 리턴한다. 주의: 소괄호 기호는 생략이 가능하다는 것에 주의한다. End of explanation """ a, b = f(2) a """ Explanation: 이제 아래와 같이 리턴값 각각의 항목에 변수를 할당하여 사용할 수 있다. End of explanation """ a = ('Hello', 'World') a[0] = 'Hi' a = ('Hello', 'World') a[1][0] = 'w' """ Explanation: 불변성(immutability)대 가변성(mutability) 튜플과 문자열은 불변성 자료형이다. 즉, 이미 생성된 값들을 절대 수정할 수 없다. 예를 들어, 아래와 같이 튜플의 특정 항목을 대체하려거나 문자열의 일부를 다른 문자열로 대체하려는 시도는 오류를 발생시킨다. End of explanation """ b = ('Hi', a[1]) b b = ('Hi',) + (a[1],) b """ Explanation: 만약에 튜플의 특정 항목 또는 문자열의 일부를 다른 문자열로 대체하고 싶다면, 기존의 값들을 이용하여 새로운 튜플과 문자열을 생성해야 한다. End of explanation """ a = (a[0], 'w' + a[1][1:]) a """ Explanation: 주의: 길이가 1인 튜플에는 반드시 콤마를 사용해야 한다. 그렇지 않으면 튜플로 간주하지 않는다. End of explanation """ import glob import string """ Explanation: 비유해서 설명하면, 아파트가 문자열 또는 튜플 자료형이라면 아파트를 수선할 수는 없고, 대신에 기존 아파트를 부신 다음에 새로 원하는 대로 지어야 함을 의미한다. 튜플과 사전 활용 예제 튜플과 사전을 함께 활용하면 학생들의 신상정보 등을 저장할 때 유용하다. 예를 들어 '학생이름', '학번', '생일' 등을 키로 사용하고, 키값으로는 진짜 나이, 학번, 생일 등을 저장할 수 있다. 아래 코드는 특정 디렉토리에 저장된 학생들의 신상정보 파일을 모두 읽어들여서 활용하는 프로그램을 위한 함수들을 구현하고 있다. End of explanation """ def std_record_list(dir): """ 지정된 디렉토리에 포함된 모든 학생들의 신상정보 파일명을 읽어드림. 입력값: 디렉토리 이름 - 문자열 이용. 리턴값: 학생들 신상정보 파일이름으로 구성된 리스트 """ files = glob.glob(dir + '/*.txt') return sorted(files) """ Explanation: 먼저 std_record_list 함수는 지정된 디렉토리에 포함된 모든 학생들의 신상정보 파일명을 읽어드린다. glob 모듈의 glob 함수의 활용을 기억해 두면 좋다. End of explanation """ filenames = std_record_list('data/Students_Records/') filenames """ Explanation: 위 함수를 활용하여, 'data/Students_Record' 디렉토리에 있는 모든 파일들의 이름을 확인할 수 있다. 주의: glob() 함수의 리턴값은 해당 디렉토리에 저장된 파일을 임의의 순서대로 확인하여 리스트를 만든다. 따라서, 이름 순서대로 리스트를 얻기 위해서 sorted() 함수를 활용하였다. End of explanation """ def date_of_birth(date_birth): ''' 생년월일 정보를 (년, 월, 일) 형식으로 변경하는 함수 입력값: * 생년월일 정보 문자열 - "년.월.일" 리턴값: * 생년월일 정보 튜플 - (년, 월, 일) ''' year, month, day = date_birth.split('.') year = int(year) + 1900 month = int(month) day = int(day) ymd = (year, month, day) return ymd date_of_birth("2017.09.27") """ Explanation: date_of_birth 함수는 생년월일 정보를 (년, 월, 일) 형식으로 변경하는 함수이다. End of explanation """ def record_getter(filename): ''' 지정된 학생의 신상정보를 리스트로 출력함. 각 항목은 항목명과 내용의 튜플로 구성됨 입력값: 파일명을 가리키는 경로 리턴값: 학생들의 신상정보의 각 항목을 담은 리스트 ''' std_data = [] a_file = open(filename, u"r") for line in a_file.readlines(): if line[0] == '#' or line in string.whitespace: continue else: item, value = line.split(':') item = item.strip() value = value.strip() if item.strip() == 'Date of Birth': value = date_of_birth(value) std_data.append((item, value)) return std_data """ Explanation: record_getter 함수는 지정된 학생의 신상정보를 리스트에 담아 리턴한다. 리스트의 각각의 항목은 항목명과 항목내용으로 구성된 튜플들이다. End of explanation """ record_getter('data/Students_Records/Byun_Sato.txt') """ Explanation: 예를 들어 Byun_Sato 학생의 신상정보가 아래와 같이 확인된다. End of explanation """ filenames = std_record_list('data/Students_Records/') So_data = record_getter(filenames[2]) So_data """ Explanation: 이제 위 코드를 한 군데 모아서 아래와 같이 각각의 학생의 정보를 얻을 수 있다. 아래 코드는 세 번째 학생의 정보를 확인한다. End of explanation """ for i in range( len(So_data) ): if So_data[i][0] == 'Department': print("전공은", So_data[i][1], "입니다.") break """ Explanation: 정보를 확인할 때는 튜플보다 사전이 효율적이다. 위 코드는 학생들의 신상정보의 정리해서 잘 보여준다. 하지만 소속학과, 생년월일 등에 대한 구체적인 정보를 추출하는 일은 좀 번거롭다. 예를 들어, So Ritgun 학생의 소속학과를 확인하려면 다음과 같이 해야 한다. End of explanation """ So_data_dict = {} for i in range( len(So_data) ): So_data_dict[So_data[i][0]] = So_data[i][1] So_data_dict """ Explanation: 그런데 사전을 이용하면 보다 쉽게 할 수 있다. 먼저 So_data를 사전으로 만들어보자. End of explanation """ So_data_dict['Department'] So_data_dict['Email'] """ Explanation: 그러면 소속학과 또는 좋아하는 색깔 등을 확인하는 일이 매우 쉽다. End of explanation """ So_data_dict['Residence'] = 'Anseong' So_data_dict """ Explanation: 주의: 하나의 항목의 키값을 변경하거나 새로운 (키, 값) 항목을 추가하려면 아래 형식을 이용한다. 사전이름[키] = 키값 반면에 여러 항목을 사전에 추가하려면 update() 메소드를 이용한다. End of explanation """ So_data_dict.update({'Grade': '2', 'Semester': '2'}) So_data_dict """ Explanation: 주의: 순서는 전혀 중요하지 않다. End of explanation """ del So_data_dict['Residence'] So_data_dict print(So_data_dict.pop('Grade')) print(So_data_dict.pop('Semester')) So_data_dict So_data_dict['Date of Birth'] So_data_dict['Name'] """ Explanation: 항목을 삭제하려면 del 함수 또는 pop() 메소드를 사용한다. 존재하지 않는 key를 이용할 경우 어떤 일이 일어나는지 확인하라. End of explanation """ def record_getter(filename): ''' 지정된 학생의 신상정보를 리스트로 출력함. 각 항목은 항목명과 내용의 튜플로 구성됨 입력값: 파일명을 가리키는 경로 리턴값: 학생들의 신상정보의 각 항목을 담은 사전 자료형 ''' std_data = {} a_file = open(filename, u"r") for line in a_file.readlines(): if line[0] == '#' or line in string.whitespace: continue else: item, value = line.split(':') item = item.strip() value = value.strip() if item.strip() == 'Date of Birth': value = date_of_birth(value) std_data[item] = value return std_data record_getter('data/Students_Records/Byun_Sato.txt') """ Explanation: 이제 사전 자료형을 이용하여 record_getter 함수를 수정하자. End of explanation """ filenames = std_record_list('data/Students_Records/') all_records = [] for file in filenames: data = record_getter(file) all_records.append(data) all_records """ Explanation: 아래 코드에서 all_records 변수에는 모든 학생의 신상정보를 리스트로 담고 있다. 각 항목은 각 학생의 신상정보를 담은 사전 자료형이다. End of explanation """ all_records[1]['Department'] """ Explanation: 이런 식으로 예를 들어 두 번째 학생이 소속학과를 다음처럼 확인 가능하다. End of explanation """ all_records[0]['Name'] """ Explanation: 또는 첫 번째 학생의 이름을 확인한다. End of explanation """
AllenDowney/ThinkBayes2
examples/world_cup02_soln.ipynb
mit
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import classes from thinkbayes2 from thinkbayes2 import Pmf, Suite import thinkbayes2 import thinkplot import numpy as np from scipy.special import gamma """ Explanation: Think Bayes This notebook presents example code and exercise solutions for Think Bayes. Copyright 2018 Allen B. Downey MIT License: https://opensource.org/licenses/MIT End of explanation """ class Soccer2(thinkbayes2.Suite): """Represents hypotheses about goal-scoring rates.""" def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: goal rate in goals per game data: goals scored in a game """ # FILL THIS IN! return 1 # Solution from scipy.stats import poisson class Soccer2(thinkbayes2.Suite): """Represents hypotheses about goal-scoring rates.""" def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: goal rate in goals per game data: goals scored in a game """ return poisson.pmf(data, hypo) """ Explanation: World Cup problem, part two In the final match of the 2014 FIFA World Cup, Germany defeated Argentina 1-0. How much evidence does this victory provide that Germany had the better team? What is the probability that Germany would win a rematch? Scoring in games like soccer and hockey can be modeled by a Poisson process, which assumes that each team, against a given opponent, will score goals at some goal-scoring rate, $\lambda$, and that this rate does not vary; in other words, the probability of scoring a goal is about the same at any point during the game. Based on this modeling decision, we can answer the questions by Defining a prior distribution for each team's goal-scoring rate against the other, Updating the prior based on the outcome of the game, Using the posterior distributions to compute the probability that Germany's goal-scoring rate is higher. Generating a predictive distribution for the number of goals each team would score in a rematch. I'll start with Step 2. Step 2: Updating If goal-scoring is a Poisson process, the distribution of goals per game is Poisson with parameter $\lambda$. To compute the distribution of $\lambda$ we can define a new class that inherits from thinkbayes2.Suite and provides an appropriate Likelihood function: End of explanation """ from thinkbayes2 import MakeGammaPmf xs = np.linspace(0, 8, 101) pmf = MakeGammaPmf(xs, 1.3) thinkplot.Pdf(pmf) thinkplot.decorate(xlabel='Goal-scoring rate (λ)', ylabel='PMF') pmf.Mean() suite = Soccer2(pmf); germany = suite.Copy(label='Germany') argentina = suite.Copy(label='Argentina') thinkplot.Pdf(germany) thinkplot.Pdf(argentina) thinkplot.decorate(xlabel='Goal-scoring rate (λ)', ylabel='PMF') pmf.Mean() """ Explanation: Likelihood computes the likelihood of data given hypo, where data is an observed number of goals, and hypo is a hypothetical goal-scoring rate in goals per game. We can compute the likelihood of the data by evaluating the Poisson probability mass function (PMF). Now we can get back to Step 1. Step 1: Constructing the prior Before the game starts, what should we believe about each team's goal scoring rate against each other? We could use previous tournament results to construct the priors, but to keep things simple, I'll just use the average goal-scoring rate from all matches in the tournament, which was 2.67 goals per game (total for both teams). To construct the prior, I use a gamma distribution with a mean of 1.34 goals per game. End of explanation """ germany = suite.Copy(label='Germany') argentina = suite.Copy(label='Argentina') germany.Update(1) argentina.Update(0) print('posterior mean Germany', germany.Mean()) print('posterior mean Argentina', argentina.Mean()) """ Explanation: According to this prior, the goal-scoring rates are always greater than zero, with the most likely value (a priori) near 0.5. Goal scoring rates greater than 5 are considered unlikely. Step 3: Comparing posteriors The next step is to compute the posteriors for the two teams: End of explanation """ thinkplot.Pdf(germany) thinkplot.Pdf(argentina) thinkplot.decorate(xlabel='Goal-scoring rate (λ)', ylabel='PMF') """ Explanation: Update invokes the likelihood function for each hypothetical value of $\lambda$ and updates the distribution accordingly. Since both teams scored fewer goals than the prior mean (1.4), we expect both posterior means to be lower. Here are the posteriors: End of explanation """ post_prob = germany.ProbGreater(argentina) print('posterior prob Germany > Argentina', post_prob) """ Explanation: To answer the first question, "How much evidence does this victory provide that Germany had the better team?", we can compute the posterior probability that Germany had a higher goal-scoring rate: End of explanation """ prior_odds = 1 post_odds = post_prob / (1 - post_prob) print('posterior odds Germany > Argentina', post_odds) k = post_odds / prior_odds print('Bayes factor', k) """ Explanation: Based on the prior distributions, we would have said that Germany had a 50% chance of having the better team, or 1:1 odds. Based on the posteriors, we would say that Germany has a 70% chance. We can use the ratio of the prior and posterior odds to compute the Bayes factor, which measures the strength of the evidence. End of explanation """ # Solution gdr_goals = poisson.rvs(germany.Sample(1000)) arg_goals = poisson.rvs(argentina.Sample(1000)) np.mean(gdr_goals > arg_goals) # Solution np.mean(gdr_goals == arg_goals) # Solution np.mean(gdr_goals < arg_goals) """ Explanation: The Bayes factor is about 2.3, which is generally considered weak evidence. Now on to Step 4. Step 4: Comparing posterior distributions Exercise: Write a few lines of code to Choose a random value of lam from the posterior distribution of each team. Choose a random number of goals for each team, conditioned on the value of lam you chose. Run that "simulation" many times and accumulate the distribution of wins, losses, and ties. Use the results to estimate the probability that Germany would win a rematch. End of explanation """ # Solution def PredictiveDist(suite, duration=1, label='pred'): """Computes the distribution of goals scored in a game. returns: new Pmf (mixture of Poissons) """ metapmf = thinkbayes2.Pmf() for lam, prob in suite.Items(): pred = thinkbayes2.MakePoissonPmf(lam * duration, 10) metapmf[pred] = prob mix = thinkbayes2.MakeMixture(metapmf, label=label) return mix germany_pred = PredictiveDist(germany, label='germany') argentina_pred = PredictiveDist(argentina, label='argentina'); thinkplot.Hist(germany_pred, width=0.45, align='right') thinkplot.Hist(argentina_pred, width=0.45, align='left') thinkplot.decorate(xlabel='Predicted # goals', ylabel='Pmf') """ Explanation: Instead of running simulations, you could compute the posterior predictive distributions explicitly. Write a function called PredictiveDist that takes the posterior distribution of $\lambda$ and a duration (in units of games). It should loop through the hypotheses in suite, compute the predictive distribution of goals for each hypothesis, and assemble a "meta-Pmf" which is a Pmf that maps from each predictive distribution to its probability. Finally, it should use MakeMixture to compute the mixture of the predictive distributions. End of explanation """ win = germany_pred.ProbGreater(argentina_pred) lose = germany_pred.ProbLess(argentina_pred) tie = 1 - (win + lose) print('Posterior prob Germany wins rematch', win) print('Posterior prob tie', tie) print('Posterior prob Argentina wins rematch', lose) """ Explanation: Using the predictive distributions, we can compute probabilities for the outcomes of a rematch. End of explanation """
DataPilot/notebook-miner
summary_of_work/Description.ipynb
apache-2.0
# Load the filenames hw_filenames = np.load('homework_file_names.npy') # Load the notebooks into a data structure hw_notebooks = [[NotebookMiner(filename) for filename in temp[:80]] for temp in hw_filenames] # For each homework, load all notebooks into the corpus. The second argument serves as a tag # for each notebook added. corpus = Features(hw_notebooks[0], 'hw1') corpus.add_notebooks(hw_notebooks[1], 'hw2') corpus.add_notebooks(hw_notebooks[2], 'hw3') corpus.add_notebooks(hw_notebooks[3], 'hw4') corpus.add_notebooks(hw_notebooks[4], 'hw5') """ Explanation: Description of experiments on homework corpus We have five homeworks, with a minimum of 80 notebooks per homework. Given a notebook, we want to know if we can we predict which homework it came from. We test with three different feature sets, use Random Forest with 400 iterations and depth 2 as our classifier, and use 10 fold cross validation. Generating the corpus The following is the code used to generate the corpus, along with comments describing the code. This is done using a pipeline object inspired by sklearn. End of explanation """ ''' Step 1: GetASTFeatures: This class is responsible for getting additional features for each cell of our notebook. The original information is the source code, so this function adds some features related to the AST, including the actual AST. ''' gastf = GetASTFeatures() ''' Step 2: ResampleByNode: This class resamples the notebooks using the AST feature that was created. Each cell has n distinct trees in the AST (corresponding to n top level lines of code), so this class splits each cell into n different parts, so we can perform more fine grained operations. ''' rbn = ResampleByNode() ''' Step 3: GetImports: This class works on the ASTs to normalize variable names, gather information about the imports from the notebook, and the functions called in each line of code. ''' gi = GetImports() ''' Pipeline: The pipeline collects the above classes and runs our corpus through each one sequentially. ''' pipe = Pipeline([gastf, rbn, gi]) corpus = pipe.transform(corpus) """ Explanation: Baseline End of explanation """ gastf = GetASTFeatures() rbn = ResampleByNode() gi = GetImports() ''' Step 4: ASTGraphReducer: This is the class responsible for generating the templates from the ASTs. For each AST, it creates a feature that corresponds to the template that 'covers' that line of code (or None if the AST was not able to be reduced). ''' agr = ASTGraphReducer(a, threshold=8, split_call=False) pipe = Pipeline([gastf, rbn, gi, agr]) corpus = pipe.transform(corpus) """ Explanation: RESULTS For the baseline, we used the function names that were gathered from 'GetImports' as our features (1687 different functions called throughout the corpus), using CountVectorizer from sklearn, and got a mean accuracy of .32 using 10-fold cross validation. Simple Templates Now, we have the intuition that it is not only important which functions are called, but also which functions are called near each other, so we created an encoding system for lines of code to get a higher level feature. This method involves iteratively collapsing the leaves of each AST based on commonly occuring leaves until only one node is left in the tree. End of explanation """ gastf = GetASTFeatures() rbn = ResampleByNode() gi = GetImports() agr = ASTGraphReducer(a, threshold=8, split_call=False) ''' Step 5: FrequentItemsets: This class is responsible for computing the frequent itemsets. ''' fi = FrequentItemsets() pipe = Pipeline([gastf, rbn, gi, agr, fi]) corpus = pipe.transform(corpus) """ Explanation: RESULTS For this classifer, we used the templates from 'ASTGraphReducer' as our features (we found 1188 different templates), using CountVectorizer from sklearn, and got a mean accuracy of .35 using 10-fold cross validation. Higher Order Templates While the templates improved on our baseline, we wanted to take further advantage of locality by creating higher order templates corresponding to commonly co-occuring simple templates. In order to do this, we took advantage of the natural split of the notebooks by cell, and found frequent itemsets using the cells as the buckets and the templates as the items. Then, for each notebook, we determined which frequent itemsets appeared for any cell in that notebook, and used the list of distinct frequent itemsets as our features. End of explanation """
navaro1/deep-learning
first-neural-network/Your_first_neural_network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save data for approximately the last 21 days test_data = data[-21*24:] # Now remove the test data from the data set data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days or so of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** -0.5, (self.input_nodes, self.hidden_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** -0.5, (self.hidden_nodes, self.output_nodes)) self.lr = learning_rate #### TODO: Set self.activation_function to your implemented sigmoid function #### # # Note: in Python, you can define a function with a lambda expression, # as shown below. self.activation_function = lambda x: 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation. ### If the lambda code above is not something you're familiar with, # You can uncomment out the following three lines and put your # implementation there instead. # # def sigmoid(x): # return 0 # Replace 0 with your sigmoid calculation here # self.activation_function = sigmoid def train(self, features, targets): ''' Train the network on batch of features and targets. Arguments --------- features: 2D array, each row is one data record, each column is a feature targets: 1D array of target values ''' n_records = features.shape[0] delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) for X, y in zip(features, targets): #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer - Replace these values with your calculations. hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with your calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error - Replace this value with your calculations. error = y - final_outputs # Output layer error is the difference between desired target and actual output. hidden_error = np.dot(error, self.weights_hidden_to_output.T) output_error_term = error hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs) # Weight step (hidden to output) delta_weights_h_o += output_error_term * hidden_outputs[:, None] # Weight step (input to hidden) delta_weights_i_h += hidden_error_term * X[:, None] # TODO: Update the weights - Replace these values with your calculations. self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records def run(self, features): ''' Run a forward pass through the network with input features Arguments --------- features: 1D array of feature values ''' #### Implement the forward pass here #### # TODO: Hidden layer - replace these values with the appropriate calculations. hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with the appropriate calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. <img src="assets/neural_network.png" width=300px> The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import unittest inputs = np.array([[0.5, -0.2, 0.1]]) targets = np.array([[0.4]]) test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]]) test_w_h_o = np.array([[0.3], [-0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328], [-0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, -0.20185996], [0.39775194, 0.50074398], [-0.29887597, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Unit tests Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project. End of explanation """ import sys ### Set the hyperparameters here ### iterations = 1000000 learning_rate = 0.0025 hidden_nodes = 25 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train': [], 'validation': []} for ii in range(iterations): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt'] network.train(X, y) # Printing out the training progress train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values) val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values) sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) sys.stdout.flush() losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() _ = plt.ylim() """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of iterations This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features).T*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """
PyDataMadrid2016/Conference-Info
workshops_materials/20160408_1100_Pandas_for_beginners/tutorial/EN - Tutorial 02 - IO.ipynb
mit
# First, imports import os import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import display np.random.seed(19760812) %matplotlib inline ipath = os.path.join('Datos', 'mast.txt') wind = pd.read_csv(ipath) wind.head(3) wind = pd.read_csv(ipath, sep = "\s*") # When we work with text separated by whitespaces we can use the keyword delim_whitespace: # wind = pd.read_csv(path, delim_whitespace = True) wind.head(3) cols = ['Date', 'time', 'wspd', 'wspd_max', 'wdir', 'x1', 'x2', 'x3', 'x4', 'x5', 'wspd_std'] wind = pd.read_csv(ipath, sep = "\s*", names = cols) wind.head(3) cols = ['Date', 'time', 'wspd', 'wspd_max', 'wdir', 'x1', 'x2', 'x3', 'x4', 'x5', 'wspd_std'] wind = pd.read_csv(ipath, sep = "\s*", names = cols, parse_dates = [[0, 1]]) wind.head(3) """ Explanation: In pandas we have several possibilities to read data and several possibilities to write data. Let's read some wind data In the Datos folder you can find a file mast.txt with the following format: 130904 0000 2.21 2.58 113.5 999.99 999.99 99.99 9999.99 9999.99 0.11 130904 0010 1.69 2.31 99.9 999.99 999.99 99.99 9999.99 9999.99 0.35 130904 0020 1.28 1.50 96.0 999.99 999.99 99.99 9999.99 9999.99 0.08 130904 0030 1.94 2.39 99.2 999.99 999.99 99.99 9999.99 9999.99 0.26 130904 0040 2.17 2.67 108.4 999.99 999.99 99.99 9999.99 9999.99 0.23 130904 0050 2.25 2.89 105.0 999.99 999.99 99.99 9999.99 9999.99 0.35 ... We can read in the following manner: End of explanation """ cols = ['Date', 'time', 'wspd', 'wspd_max', 'wdir', 'x1', 'x2', 'x3', 'x4', 'x5', 'wspd_std'] wind = pd.read_csv(ipath, sep = "\s*", names = cols, parse_dates = [[0, 1]], index_col = 0) wind.head(3) cols = ['Date', 'time', 'wspd', 'wspd_max', 'wdir', 'x1', 'x2', 'x3', 'x4', 'x5', 'wspd_std'] wind = pd.read_csv(ipath, sep = "\s*", names = cols, parse_dates = {'timestamp': [0, 1]}, index_col = 0) wind.head(3) # The previous code is equivalent to cols = ['Date', 'time', 'wspd', 'wspd_max', 'wdir', 'x1', 'x2', 'x3', 'x4', 'x5', 'wspd_std'] wind = pd.read_csv(ipath, sep = "\s*", names = cols, parse_dates = [[0, 1]], index_col = 0) wind.index.name = 'Timestamp' wind.head(3) # in the previous cell code you can change 0's and 1's on # parse_dates and index_col with the names of the columns # test it!!! help(pd.read_csv) """ Explanation: <div class="alert alert-danger"> <p>Depending of your operative system dates can be right or not. Don't worry now about this. Later we will work on this.</p> </div> End of explanation """ tmp = pd.DataFrame([1,10,100, 1000], index = [1,1,2,2], columns = ['values']) tmp print(tmp['values'][1], tmp['values'][2], sep = '\n') """ Explanation: With very few lines of code we read a text file with data separated by whitespaces, we transformed two columns to have dates and that dates are now the index (we only can have one record each time),... ¡¡Warning!! repeated indexes <br> <div class="alert alert-danger"> <h3>Note:</h3> <p>Nothing prevents from having repeated indexes. Take care as it cn be a source of errors.</p> </div> End of explanation """ # An example with error in dates: index = [ '01/01/2015 00:00', '02/01/2015 00:00', '03/01/2015 00:00', '04/01/2015 00:00', '05/01/2015 00:00', '06/01/2015 00:00', '07/01/2015 00:00', '08/01/2015 00:00', '09/01/2015 00:00', '10/01/2015 00:00', '11/01/2015 00:00', '12/01/2015 00:00', '13/01/2015 00:00', '14/01/2015 00:00', '15/01/2015 00:00' ] values = np.random.randn(len(index)) tmp = pd.DataFrame(values, index = pd.to_datetime(index), columns = ['col1']) display(tmp) tmp.plot.line(figsize = (12, 6)) """ Explanation: Warning!! when you convert to dates from strings <br> <div class="alert alert-danger"> <h3>Note:</h3> <p>If you let pandas to parse the dates take care and write tests as it is easy to find errors in the <b>automagic</b> conversion.</p> </div> End of explanation """ import datetime as dt import io def dateparser(date): date, time = date.split() DD, MM, YY = date.split('/') hh, mm = time.split(':') return dt.datetime(int(YY), int(MM), int(DD), int(hh), int(mm)) virtual_file = io.StringIO("""01/01/2015 00:00, 1 02/01/2015 00:00, 2 03/01/2015 00:00, 3 04/01/2015 00:00, 4 05/01/2015 00:00, 5 06/01/2015 00:00, 6 07/01/2015 00:00, 7 08/01/2015 00:00, 8 09/01/2015 00:00, 9 10/01/2015 00:00, 10 11/01/2015 00:00, 11 12/01/2015 00:00, 12 13/01/2015 00:00, 13 14/01/2015 00:00, 14 15/01/2015 00:00, 15 """) tmp_wrong = pd.read_csv(virtual_file, parse_dates = [0], index_col = 0, names = ['Date', 'values']) virtual_file = io.StringIO("""01/01/2015 00:00, 1 02/01/2015 00:00, 2 03/01/2015 00:00, 3 04/01/2015 00:00, 4 05/01/2015 00:00, 5 06/01/2015 00:00, 6 07/01/2015 00:00, 7 08/01/2015 00:00, 8 09/01/2015 00:00, 9 10/01/2015 00:00, 10 11/01/2015 00:00, 11 12/01/2015 00:00, 12 13/01/2015 00:00, 13 14/01/2015 00:00, 14 15/01/2015 00:00, 15 """) tmp_right = pd.read_csv(virtual_file, parse_dates = True, index_col = 0, names = ['Date', 'values'], date_parser = dateparser) display(tmp_wrong) display(tmp_right) """ Explanation: To avoid the previous error we can write our own date parser on, for instance, pd.read_csv: End of explanation """ opath = os.path.join('Datos', 'mast_2.csv') #wind.to_csv(opath) wind.iloc[0:100].to_csv(opath) """ Explanation: Let's save the result in csv format End of explanation """ #wind.to_json(opath.replace('csv', 'json')) wind.iloc[0:100].to_json(opath.replace('csv', 'json')) """ Explanation: ... or in json format End of explanation """ # Si son muchos datos no os lo recomiendo, es lento #wind.to_html(opath.replace('csv', 'html')) wind.iloc[0:100].to_html(opath.replace('csv', 'html')) """ Explanation: ... or to an HTML table End of explanation """ writer = pd.ExcelWriter(opath.replace('csv', 'xlsx')) #wind.to_excel(writer, sheet_name= "Mi hoja 1") wind.iloc[0:100].to_excel(writer, sheet_name= "Mi hoja 1") writer.save() # Now that we have files with json, html, xlsx,..., formats you can practice what we have learn opening them # using the pd.read_* functions """ Explanation: ... or to an xlsx format Here you should have xlsxwriter, openpyxl, wlrd/xlwt,..., installed. End of explanation """
planet-os/notebooks
nasa-opennex/Example of Kolkata warming from Webinar.ipynb
mit
%matplotlib inline import numpy as np import pandas as pd import urllib2 import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') plt.rcParams['figure.figsize'] = (10.0, 8.0) """ Explanation: Compare Climate Scenarios 1. Preliminaries End of explanation """ def load_data(unique_id): data = pd.read_csv(urllib2.urlopen("http://opennex/dataset/%s/data.csv" % (unique_id))) for col in ['Model', 'Scenario', 'Variable']: data[col] = data[col].astype('category') data['Date'] = data['Date'].astype('datetime64') data['Temperature'] = data['Value'] - 273.15 return data """ Explanation: 2. Load the data We have a function that pulls the data that we chose in the UI directly into the program using the container that we're connected to by docker-compose End of explanation """ data = load_data("Ky3KN") """ Explanation: Replace the argument below with the unique ID of the dataset that you've chosen in the web UI. End of explanation """ data.shape data.apply(lambda x: [x.unique()]) """ Explanation: 3. Examine the data End of explanation """ colors = {'historical':'black', 'rcp45':'green', 'rcp85':'red'} def do_graph(df): model = df.loc[1,'Model'] df['Month'] = df['Date'].map(lambda d: "%d-%02d-01" % (d.year, d.month)).astype('datetime64') by_month = df.groupby(['Month', 'Scenario']).aggregate(np.mean).reset_index() by_month['Year'] = by_month['Month'].map(lambda d: "%d-01-01" % (d.year)).astype('datetime64') by_year = by_month.groupby(['Year', 'Scenario']).aggregate(max).loc[:,['Temperature']] groups = by_year.reset_index().set_index('Year').groupby('Scenario') for key, grp in groups: plt.plot(grp.index, grp['Temperature'], color=colors[key], label=key) plt.legend(loc='best') plt.title("Maximum mean temperature for warmest month using model %s" % (model)) plt.xlabel("Year") plt.ylabel("Temperature (Celsius)") plt.show() do_graph(data) """ Explanation: 4. Graph the data End of explanation """
datactive/bigbang
examples/git-analysis/Git Collection.ipynb
mit
url = "http://mail.python.org/pipermail/scipy-dev/" arx = Archive(url,archive_dir="../archives") repo = repo_loader.get_repo("bigbang") full_info = repo.commit_data; act = arx.data.groupby("Date").size(); act = act.resample("D", how=np.sum) act = act[act.index.year <= 2014] act_week = act.resample("W", how=np.sum) print((full_info["Parent Commit"])) """ Explanation: One of the newest features of BigBang is the ability to analyze git info for each project. For now, we mostly just look at commits over time. We can also analyze individual committers to run cohort visualization. First, make sure that you've collected git and mail data. For now, we are looking at scipy, but you can analyze any git repo you'd like by loading its info. Below, we load the mail and git data into data tables. End of explanation """ fig = plt.figure(figsize=(10, 7.5)); commits_per_day = repo.commits_per_day() commits_per_week = repo.commits_per_week() commits_per_day.plot() fig = plt.figure(figsize=(10, 7.5)); commits_per_week.plot() """ Explanation: The code below graphs the commits per day and commits per week for scipy. As we will see later, consolidating commits into larger time periods allows for smoother graphs. As you can see, the weekly graph is slightly smoother. We will find some more ways to smoothen these lines. End of explanation """ fig = plt.figure(figsize=(10, 7.5)); simp = 5 convulation_array = [1.0/(simp) for n in range(simp)]; c_array = np.convolve(commits_per_week, convulation_array, "same") e_array = np.convolve(act_week, convulation_array, "same"); plt.plot(act_week.index, e_array) # The Blue plt.plot(commits_per_week.index, c_array) # The Green fig.axes[0].xaxis_date() """ Explanation: With some convolution, the two jutted graphs make much more sense. This graphs commits per week and emails per week. The fact that we have the git and mail data for the same project lets us analyze the relationship between emails and commits. We can look at whether or not weeks where there is a lot of emailing are followed by weeks of many commits. We can even go down to the individual level and analyze each commiter/emailer with questions like "Is a person less likely to commit if they email a lot?" End of explanation """ plt.figure(figsize=(10, 7.5)); df = repo.by_committer(); if (len(df > 20)): df = df[len(df)-20:] df.plot(kind="bar") """ Explanation: This is the top 20 (or fewer) committers to a project. An interesting question to answer for the future would be whether or not these committers are more likely to be in the same cohort. End of explanation """ n = 5 import numpy as np def first_commit_fn(df): if (len(df) < 1): return; else: return df dataFrame = full_info commits_by_time = dataFrame.groupby(["Committer Name", dataFrame['Time'].map(lambda x: x.toordinal()/100)], sort=True).size(); time = dataFrame.groupby(dataFrame['Time'].map(lambda x: x.toordinal()/100)).size().order(); first_commits = dataFrame.groupby("Committer Name").min().sort("Time"); commits_by_time = (commits_by_time.reindex(index = time.index.values, level=1, fill_value=0)) cohorts = np.array_split(first_commits, n); convulation_array = [.1,.1,.1,.1,.1,.1,.1,.1,.1,.1]; cohort_activity = [(commits_by_time.loc[cohort.index.values].sum(None, False, 1, False)).reindex(index = time.index.values) for cohort in cohorts]; for i in range(len(cohort_activity)): cohort_activity[i] = np.convolve(cohort_activity[i], convulation_array) to_graph = pd.DataFrame(cohort_activity).transpose() to_graph.plot(kind="bar",stacked=True, linewidth=0) byCommitter = repo.by_committer(); totalCohortCommits = []; for cohort in cohorts: cohortPeople = byCommitter.reindex(cohort.index); totalCohortCommits.append(cohortPeople.sum()) commitsPerCohort = pd.DataFrame(totalCohortCommits); commitsPerCohort.transpose().plot(kind="bar") """ Explanation: Below, one can see cohort visualization of commits. Each cohort is a group of commiters that started working on the project around the same time. The first cohort is the first 1/5th of people to start committing, the second cohort is the second 1/5th of people to start committing, and so on. For Scipy, the first cohort of commiters tends to dominate, while the second has recently taken some more charge. End of explanation """
NervanaSystems/neon_course
answers/05 Model Architectures-ANSWER_KEY.ipynb
apache-2.0
from neon.callbacks.callbacks import Callbacks from neon.initializers import Gaussian from neon.layers import GeneralizedCost, Affine, BranchNode, Multicost, SingleOutputTree from neon.models import Model from neon.optimizers import GradientDescentMomentum from neon.transforms import Rectlin, Logistic, Softmax from neon.transforms import CrossEntropyBinary, CrossEntropyMulti, Misclassification from neon.backends import gen_backend """ Explanation: Model Architectures: Part 1 Neon supports the ability to build more complex models than just a linear list of layers. In this series of notebooks, you will implement several models and understand how data should be passed when a model may have multiple inputs/outputs. Tree Models Neon supports models with a main trunk that includes branch points to leaf nodes. In this scenario, the models takes a single input but produces multiple outputs that can be matched against multiple targets. For example, consider the below topology: cost1 cost3 | / m_l4 b2_l2 | / | ___b2_l1 |/ m_l3 cost2 | / m_l2 b1_l2 | / | ___b1_l1 |/ | m_l1 | | data Suppose we wanted to apply this model to the MNIST dataset. The MNIST data iterator returns, for each minibatch, a tuple of tensors (X, Y). Since there are multiple outputs, the single target labels Y are used to match against all these outputs. Alternatively, we could write a custom iterator that yields for each minibatch, a nested tuple (X, (Y1, Y2, Y3)). Then, each target label will mapped to its respective output layer. We will guide you through implementing such a branching model. We first import all the needed ingredients: End of explanation """ be = gen_backend(batch_size=128) from neon.data import MNIST mnist = MNIST(path='data/') train_set = mnist.train_iter valid_set = mnist.valid_iter """ Explanation: We also set up the backend and load the data. End of explanation """ # define common parameters as dictionary (see above) init_norm = Gaussian(loc=0.0, scale=0.01) normrelu = dict(init=init_norm, activation=Rectlin()) normsigm = dict(init=init_norm, activation=Logistic(shortcut=True)) normsoft = dict(init=init_norm, activation=Softmax()) # define your branch nodes b1 = BranchNode(name="b1") b2 = BranchNode(name="b2") # define the main trunk (cost1 above) p1 = [Affine(nout=100, name="m_l1", **normrelu), b1, Affine(nout=32, name="m_l2", **normrelu), Affine(nout=16, name="m_l3", **normrelu), b2, Affine(nout=10, name="m_l4", **normsoft)] # define the branch (cost2) p2 = [b1, Affine(nout=16, name="b1_l1", **normrelu), Affine(nout=10, name="b1_l2", **normsoft)] # define the branch (cost3) p3 = [b2, Affine(nout=16, name="b2_l1", **normrelu), Affine(nout=10, name="b2_l2", **normsoft)] # build the model as a Tree alphas = [1, 0.25, 0.25] model = Model(layers=SingleOutputTree([p1, p2, p3], alphas=alphas)) """ Explanation: Now its your turn! Set up the branch nodes and layer structure above. Some tips: - Use Affine layers. - You can choose your hidden unit sizes, just make sure that the three final output layers have 10 units for the 10 categories in the MNIST dataset. - The three final output layers should also use Softmax activation functions to ensure that the probability sums to 1. As a reminder, to define a single layer, we need a weight initialization and an activation function: ``` define a layers layer1 = Affine(nout=100, init=Gaussian(0.01), activation=Rectlin()) alternative, you can take advantage of common parameters by constructing a dictionary: normrelu = dict(init=init_norm, activation=Rectlin()) pass the dictionary to the layers as keyword arguments using the ** syntax. layer1 = Affine(nout=100, normrelu) layer2 = Affine(nout=10, normrelu) ``` To set up a simple Tree: ``` define a branch mode b1 = BranchNode() define the main trunk path1 = [layer1, b1, layer2] define the branch path2 = [b1, layer3] build the model as a Tree alphas are the weights given to the branches of Tree during backpropagation. model = Model(layers=SingleOutputTree([path1, path2]), alphas = [1, 1]) ``` We have included below skeleton of the code for you to fill out to build the model above. End of explanation """ cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyMulti())]) """ Explanation: Now let's fit our model! First, set up multiple costs for each of the three branches using MultiCost: End of explanation """ model.initialize(train_set, cost) print model """ Explanation: To test that your model was constructed properly, we first initialize the model with a dataset (so that it configures the layer shapes appropriately) and a cost, then print the model. End of explanation """ # setup optimizer optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9) # setup standard fit callbacks callbacks = Callbacks(model, eval_set=valid_set, eval_freq=1) model.fit(train_set, optimizer=optimizer, num_epochs=10, cost=cost, callbacks=callbacks) """ Explanation: Then, we set up the remaining components and run fit! End of explanation """
tensorflow/docs-l10n
site/ja/io/tutorials/colorspace.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow IO Authors. End of explanation """ !pip install tensorflow-io """ Explanation: 色空間変換 <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/io/tutorials/colorspace"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/io/tutorials/colorspace.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/io/tutorials/colorspace.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示{</a></td> <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/io/tutorials/colorspace.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td> </table> 概要 コンピュータビジョンでは、選択した色空間がモデルの性能を大きく左右することがあります。最も一般的な色空間はRGBですが、多くの場合はYUV、YCbCr、XYZ (CIE)などの他の色空間に切り替えると、モデルの性能が向上します。 tensorflow-ioパッケージは、画像データの準備や拡張に使用できる色空間変換 API のリストを提供しています。 セットアップ 必要なパッケージをインストールし、ランタイムを再起動する End of explanation """ !curl -o sample.jpg -L https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg !ls -ls sample.jpg """ Explanation: サンプル画像をダウンロードする このチュートリアルで使用する画像例は雪の中の猫ですが、任意の JPEG 画像で置き換えても構いません。 以下のように画像をダウンロードし、sample.jpgとしてローカルディスクに保存します。 End of explanation """ import tensorflow as tf import tensorflow_io as tfio image = tf.image.decode_jpeg(tf.io.read_file('sample.jpg')) print(image.shape, image.dtype) """ Explanation: 使い方 画像ファイルを読み込む 画像を読み取り、形状が(213, 320, 3)のuint8テンソルにデコードします。 End of explanation """ import matplotlib.pyplot as plt plt.figure() plt.imshow(image) plt.axis('off') plt.show() """ Explanation: 画像は以下の方法で表示できます。 End of explanation """ grayscale = tfio.experimental.color.rgb_to_grayscale(image) print(grayscale.shape, grayscale.dtype) # use tf.squeeze to remove last channel for plt.imshow to display: plt.figure() plt.imshow(tf.squeeze(grayscale, axis=-1), cmap='gray') plt.axis('off') plt.show() """ Explanation: RGB からグレースケールに変換する tfio.experimental.color.rgb_to_grayscaleを使用してRGB画像をGrayscaleに変換し、チャンネル数を 3 から 1 に減らすことができます。 End of explanation """ bgr = tfio.experimental.color.rgb_to_bgr(image) print(bgr.shape, bgr.dtype) plt.figure() plt.imshow(bgr) plt.axis('off') plt.show() """ Explanation: RGB から BGR に変換する 画像ソフトやカメラのメーカーによってはBGRを好む場合がありますが、tfio.experimental.color.rgb_to_bgrを使用して BGR に変換することができます。 End of explanation """ # convert to float32 image_float32 = tf.cast(image, tf.float32) / 255.0 xyz_float32 = tfio.experimental.color.rgb_to_xyz(image_float32) # convert back uint8 xyz = tf.cast(xyz_float32 * 255.0, tf.uint8) print(xyz.shape, xyz.dtype) plt.figure() plt.imshow(xyz) plt.axis('off') plt.show() """ Explanation: RGB から CIE XYZ に変換する CIE XYZ(または CIE 1931 XYZ)は、多くの画像処理プログラムで使用されている一般的な色空間です。以下ではtfio.experimental.color.rgb_to__xyzを使用して、RGB からCIE XYZに変換しています。tfio.experimental.color.rgb_to_xyzは[0, 1]の範囲の浮動小数点入力を想定しているため、追加の前処理が必要なので注意してください。 End of explanation """ ycbcr = tfio.experimental.color.rgb_to_ycbcr(image) print(ycbcr.shape, ycbcr.dtype) plt.figure() plt.imshow(ycbcr, cmap='gray') plt.axis('off') plt.show() """ Explanation: RGB から YCbCr に変換する 最後に、多くのビデオシステムではYCbCrがデフォルトの色空間です。YCbCrへの変換は、tfio.experimental.color.rgb_to_ycbcrを使用して行います。 End of explanation """ y, cb, cr = ycbcr[:,:,0], ycbcr[:,:,1], ycbcr[:,:,2] # Y' component plt.figure() plt.imshow(y, cmap='gray') plt.axis('off') plt.show() # Cb component plt.figure() plt.imshow(cb, cmap='gray') plt.axis('off') plt.show() # Cr component plt.figure() plt.imshow(cr, cmap='gray') plt.axis('off') plt.show() """ Explanation: さらに面白いことに、YCbCrは各成分が知覚的に意味のある情報を持つY'(ルマ)、Cb(青色差クロマ)、Cr(赤色差クロマ)という成分に分解することができます。 End of explanation """
mne-tools/mne-tools.github.io
0.19/_downloads/4a39dd4a31cad8a0e098b02526b9c3d3/plot_covariance_whitening_dspm.ipynb
bsd-3-clause
# Author: Denis A. Engemann <[email protected]> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import spm_face from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.cov import compute_covariance print(__doc__) """ Explanation: Demonstrate impact of whitening on source estimates This example demonstrates the relationship between the noise covariance estimate and the MNE / dSPM source amplitudes. It computes source estimates for the SPM faces data and compares proper regularization with insufficient regularization based on the methods described in [1]. The example demonstrates that improper regularization can lead to overestimation of source amplitudes. This example makes use of the previous, non-optimized code path that was used before implementing the suggestions presented in [1]. This example does quite a bit of processing, so even on a fast machine it can take a couple of minutes to complete. <div class="alert alert-danger"><h4>Warning</h4><p>Please do not copy the patterns presented here for your own analysis, this is example is purely illustrative.</p></div> References .. [1] Engemann D. and Gramfort A. (2015) Automated model selection in covariance estimation and spatial whitening of MEG and EEG signals, vol. 108, 328-342, NeuroImage. End of explanation """ data_path = spm_face.data_path() subjects_dir = data_path + '/subjects' raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds' raw = io.read_raw_ctf(raw_fname % 1) # Take first run # To save time and memory for this demo, we'll just use the first # 2.5 minutes (all we need to get 30 total events) and heavily # resample 480->60 Hz (usually you wouldn't do either of these!) raw = raw.crop(0, 150.).load_data() picks = mne.pick_types(raw.info, meg=True, exclude='bads') raw.filter(None, 20.) events = mne.find_events(raw, stim_channel='UPPT001') event_ids = {"faces": 1, "scrambled": 2} tmin, tmax = -0.2, 0.5 baseline = (None, 0) reject = dict(mag=3e-12) # Make forward trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif' src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif' bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif' forward = mne.make_forward_solution(raw.info, trans, src, bem) del src # inverse parameters conditions = 'faces', 'scrambled' snr = 3.0 lambda2 = 1.0 / snr ** 2 clim = dict(kind='value', lims=[0, 2.5, 5]) """ Explanation: Get data End of explanation """ samples_epochs = 5, 15, method = 'empirical', 'shrunk' colors = 'steelblue', 'red' evokeds = list() stcs = list() methods_ordered = list() for n_train in samples_epochs: # estimate covs based on a subset of samples # make sure we have the same number of conditions. events_ = np.concatenate([events[events[:, 2] == id_][:n_train] for id_ in [event_ids[k] for k in conditions]]) events_ = events_[np.argsort(events_[:, 0])] epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks, baseline=baseline, preload=True, reject=reject, decim=8) epochs_train.equalize_event_counts(event_ids) assert len(epochs_train) == 2 * n_train # We know some of these have too few samples, so suppress warning # with verbose='error' noise_covs = compute_covariance( epochs_train, method=method, tmin=None, tmax=0, # baseline only return_estimators=True, rank=None, verbose='error') # returns list # prepare contrast evokeds = [epochs_train[k].average() for k in conditions] del epochs_train, events_ # do contrast # We skip empirical rank estimation that we introduced in response to # the findings in reference [1] to use the naive code path that # triggered the behavior described in [1]. The expected true rank is # 274 for this dataset. Please do not do this with your data but # rely on the default rank estimator that helps regularizing the # covariance. stcs.append(list()) methods_ordered.append(list()) for cov in noise_covs: inverse_operator = make_inverse_operator(evokeds[0].info, forward, cov, loose=0.2, depth=0.8) assert len(inverse_operator['sing']) == 274 # sanity check stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM", pick_ori=None) for e in evokeds) stc = stc_a - stc_b methods_ordered[-1].append(cov['method']) stcs[-1].append(stc) del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b del raw, forward # save some memory """ Explanation: Estimate covariances End of explanation """ fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 5)) for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))): # compute stc based on worst and best ax_dynamics = axes[1] for stc, ax, method, kind, color in zip(stcs[ni], axes[::2], methods_ordered[ni], ['best', 'worst'], colors): brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim, initial_time=0.175, background='w', foreground='k') brain.show_view('ven') im = brain.screenshot() brain.close() ax.axis('off') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.imshow(im) ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2)) # plot spatial mean stc_mean = stc.data.mean(0) ax_dynamics.plot(stc.times * 1e3, stc_mean, label='{0} ({1})'.format(method, kind), color=color) # plot spatial std stc_var = stc.data.std(0) ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var, stc_mean + stc_var, alpha=0.2, color=color) # signal dynamics worst and best ax_dynamics.set(title='{0} epochs'.format(n_train * 2), xlabel='Time (ms)', ylabel='Source Activation (dSPM)', xlim=(tmin * 1e3, tmax * 1e3), ylim=(-3, 3)) ax_dynamics.legend(loc='upper left', fontsize=10) fig.subplots_adjust(hspace=0.2, left=0.01, right=0.99, wspace=0.03) """ Explanation: Show the resulting source estimates End of explanation """
tiagoantao/biopython-notebook
notebooks/09 - Accessing NCBIs Entrez databases.ipynb
mit
from Bio import Entrez Entrez.email = "[email protected]" """ Explanation: Source of the materials: Biopython cookbook (adapted) <font color='red'>Status: Draft</font> Accessing NCBI’s Entrez databases Entrez Guidelines EInfo: Obtaining information about the Entrez databases ESearch: Searching the Entrez databases EPost: Uploading a list of identifiers EFetch: Downloading full records from Entrez History and WebEnv Specialized parsers Examples Entrez (http://www.ncbi.nlm.nih.gov/Entrez) is a data retrieval system that provides users access to NCBI’s databases such as PubMed, GenBank, GEO, and many others. You can access Entrez from a web browser to manually enter queries, or you can use Biopython’s Bio.Entrez module for programmatic access to Entrez. The latter allows you for example to search PubMed or download GenBank records from within a Python script. The Bio.Entrez module makes use of the Entrez Programming Utilities (also known as EUtils), consisting of eight tools that are described in detail on NCBI’s page at http://www.ncbi.nlm.nih.gov/entrez/utils/. Each of these tools corresponds to one Python function in the Bio.Entrez module, as described in the sections below. This module makes sure that the correct URL is used for the queries, and that not more than one request is made every three seconds, as required by NCBI. The output returned by the Entrez Programming Utilities is typically in XML format. To parse such output, you have several options: Use Bio.Entrez’s parser to parse the XML output into a Python object; Use the DOM (Document Object Model) parser in Python’s standard library; Use the SAX (Simple API for XML) parser in Python’s standard library; Read the XML output as raw text, and parse it by string searching and manipulation. For the DOM and SAX parsers, see the Python documentation. The parser in Bio.Entrez is discussed below. NCBI uses DTD (Document Type Definition) files to describe the structure of the information contained in XML files. Most of the DTD files used by NCBI are included in the Biopython distribution. The Bio.Entrez parser makes use of the DTD files when parsing an XML file returned by NCBI Entrez. Occasionally, you may find that the DTD file associated with a specific XML file is missing in the Biopython distribution. In particular, this may happen when NCBI updates its DTD files. If this happens, Entrez.read will show a warning message with the name and URL of the missing DTD file. The parser will proceed to access the missing DTD file through the internet, allowing the parsing of the XML file to continue. However, the parser is much faster if the DTD file is available locally. For this purpose, please download the DTD file from the URL in the warning message and place it in the directory ...site-packages/Bio/Entrez/DTDs, containing the other DTD files. If you don’t have write access to this directory, you can also place the DTD file in ~/.biopython/Bio/Entrez/DTDs, where ~ represents your home directory. Since this directory is read before the directory ...site-packages/Bio/Entrez/DTDs, you can also put newer versions of DTD files there if the ones in ...site-packages/Bio/Entrez/DTDs become outdated. Alternatively, if you installed Biopython from source, you can add the DTD file to the source code’s Bio/Entrez/DTDs directory, and reinstall Biopython. This will install the new DTD file in the correct location together with the other DTD files. The Entrez Programming Utilities can also generate output in other formats, such as the Fasta or GenBank file formats for sequence databases, or the MedLine format for the literature database, discussed in Section Specialized parsers. Entrez Guidelines Before using Biopython to access the NCBI’s online resources (via Bio.Entrez or some of the other modules), please read the NCBI’s Entrez User Requirements. If the NCBI finds you are abusing their systems, they can and will ban your access! To paraphrase: For any series of more than 100 requests, do this at weekends or outside USA peak times. This is up to you to obey. Use the http://eutils.ncbi.nlm.nih.gov address, not the standard NCBI Web address. Biopython uses this web address. Make no more than three requests every seconds (relaxed from at most one request every three seconds in early 2009). This is automatically enforced by Biopython. Use the optional email parameter so the NCBI can contact you if there is a problem. You can either explicitly set this as a parameter with each call to Entrez (e.g. include <span>email=“[email protected]”</span> in the argument list), or you can set a global email address: End of explanation """ from Bio import Entrez Entrez.tool = "MyLocalScript" """ Explanation: <span>Bio.Entrez</span> will then use this email address with each call to Entrez. The <span>example.com</span> address is a reserved domain name specifically for documentation (RFC 2606). Please DO NOT use a random email – it’s better not to give an email at all. The email parameter will be mandatory from June 1, 2010. In case of excessive usage, NCBI will attempt to contact a user at the e-mail address provided prior to blocking access to the E-utilities. If you are using Biopython within some larger software suite, use the tool parameter to specify this. You can either explicitly set the tool name as a parameter with each call to Entrez (e.g. include <span>tool=“MyLocalScript”</span> in the argument list), or you can set a global tool name: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.einfo() result = handle.read() print(result) """ Explanation: The tool parameter will default to Biopython. For large queries, the NCBI also recommend using their session history feature (the WebEnv session cookie string, see Section History and WebEnv). This is only slightly more complicated. In conclusion, be sensible with your usage levels. If you plan to download lots of data, consider other options. For example, if you want easy access to all the human genes, consider fetching each chromosome by FTP as a GenBank file, and importing these into your own BioSQL database (see Section [sec:BioSQL]). EInfo: Obtaining information about the Entrez databases einfo source EInfo provides field index term counts, last update, and available links for each of NCBI’s databases. In addition, you can use EInfo to obtain a list of all database names accessible through the Entrez utilities. The variable result now contains a list of databases in XML format: End of explanation """ from Bio import Entrez handle = Entrez.einfo() record = Entrez.read(handle) """ Explanation: Since this is a fairly simple XML file, we could extract the information it contains simply by string searching. Using Bio.Entrez’s parser instead, we can directly parse this XML file into a Python object: End of explanation """ record.keys() """ Explanation: Now record is a dictionary with exactly one key: End of explanation """ record["DbList"] """ Explanation: The values stored in this key is the list of database names shown in the XML above: End of explanation """ from Bio import Entrez handle = Entrez.einfo(db="pubmed") record = Entrez.read(handle) record["DbInfo"]["Description"] record['DbInfo'].keys() handle = Entrez.einfo(db="pubmed") record = Entrez.read(handle) record["DbInfo"]["Description"] record["DbInfo"]["Count"] record["DbInfo"]["LastUpdate"] """ Explanation: For each of these databases, we can use EInfo again to obtain more information: End of explanation """ for field in record["DbInfo"]["FieldList"]: print("%(Name)s, %(FullName)s, %(Description)s" % field) """ Explanation: Try record["DbInfo"].keys() for other information stored in this record. One of the most useful is a list of possible search fields for use with ESearch: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.esearch(db="pubmed", term="biopython") record = Entrez.read(handle) record["IdList"] record """ Explanation: That’s a long list, but indirectly this tells you that for the PubMed database, you can do things like Jones[AUTH] to search the author field, or Sanger[AFFL] to restrict to authors at the Sanger Centre. This can be very handy - especially if you are not so familiar with a particular database. ESearch: Searching the Entrez databases To search any of these databases, we use Bio.Entrez.esearch(). For example, let’s search in PubMed for publications related to Biopython: End of explanation """ handle = Entrez.esearch(db="nucleotide", term="Cypripedioideae[Orgn] AND matK[Gene]") record = Entrez.read(handle) record["Count"] record["IdList"] """ Explanation: In this output, you see seven PubMed IDs (including 19304878 which is the PMID for the Biopython application), which can be retrieved by EFetch (see section EFetch: Downloading full records from Entrez). You can also use ESearch to search GenBank. Here we’ll do a quick search for the matK gene in Cypripedioideae orchids (see Section [sec:entrez-einfo] about EInfo for one way to find out which fields you can search in each Entrez database): End of explanation """ # nlmcatalog # handle = Entrez.esearch(db="nlmcatalog", term="computational") # record = Entrez.read(handle) # record["Count"] handle = Entrez.esearch(db="nlmcatalog", term="biopython[Journal]", RetMax='20') record = Entrez.read(handle) print("{} computational Journals found".format(record["Count"])) print("The first 20 are\n{}".format(record['IdList'])) """ Explanation: Each of the IDs (126789333, 37222967, 37222966, …) is a GenBank identifier. See section EFetch: Downloading full records from Entrez for information on how to actually download these GenBank records. Note that instead of a species name like Cypripedioideae[Orgn], you can restrict the search using an NCBI taxon identifier, here this would be txid158330[Orgn]. This isn’t currently documented on the ESearch help page - the NCBI explained this in reply to an email query. You can often deduce the search term formatting by playing with the Entrez web interface. For example, including complete[prop] in a genome search restricts to just completed genomes. As a final example, let’s get a list of computational journal titles: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are id_list = ["19304878", "18606172", "16403221", "16377612", "14871861", "14630660"] print(Entrez.epost("pubmed", id=",".join(id_list)).read()) """ Explanation: Again, we could use EFetch to obtain more information for each of these journal IDs. ESearch has many useful options — see the ESearch help page for more information. EPost: Uploading a list of identifiers EPost uploads a list of UIs for use in subsequent search strategies; see the EPost help page for more information. It is available from Biopython through the Bio.Entrez.epost() function. To give an example of when this is useful, suppose you have a long list of IDs you want to download using EFetch (maybe sequences, maybe citations – anything). When you make a request with EFetch your list of IDs, the database etc, are all turned into a long URL sent to the server. If your list of IDs is long, this URL gets long, and long URLs can break (e.g. some proxies don’t cope well). Instead, you can break this up into two steps, first uploading the list of IDs using EPost (this uses an “HTML post” internally, rather than an “HTML get”, getting round the long URL problem). With the history support, you can then refer to this long list of IDs, and download the associated data with EFetch. Let’s look at a simple example to see how EPost works – uploading some PubMed identifiers: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are id_list = ["19304878", "18606172", "16403221", "16377612", "14871861", "14630660"] search_results = Entrez.read(Entrez.epost("pubmed", id=",".join(id_list))) webenv = search_results["WebEnv"] query_key = search_results["QueryKey"] """ Explanation: The returned XML includes two important strings, QueryKey and WebEnv which together define your history session. You would extract these values for use with another Entrez call such as EFetch: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.esummary(db="nlmcatalog", term="[journal]", id="101660833") record = Entrez.read(handle) info = record[0]['TitleMainList'][0] print("Journal info\nid: {}\nTitle: {}".format(record[0]["Id"], info["Title"])) """ Explanation: Section History and WebEnv shows how to use the history feature. ESummary: Retrieving summaries from primary IDs ESummary retrieves document summaries from a list of primary IDs (see the ESummary help page for more information). In Biopython, ESummary is available as Bio.Entrez.esummary(). Using the search result above, we can for example find out more about the journal with ID 30367: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.efetch(db="nucleotide", id="186972394", rettype="gb", retmode="text") print(handle.read()) """ Explanation: EFetch: Downloading full records from Entrez EFetch is what you use when you want to retrieve a full record from Entrez. This covers several possible databases, as described on the main EFetch Help page. For most of their databases, the NCBI support several different file formats. Requesting a specific file format from Entrez using Bio.Entrez.efetch() requires specifying the rettype and/or retmode optional arguments. The different combinations are described for each database type on the pages linked to on NCBI efetch webpage (e.g. literature, sequences and taxonomy). One common usage is downloading sequences in the FASTA or GenBank/GenPept plain text formats (which can then be parsed with Bio.SeqIO, see Sections [sec:SeqIO_GenBank_Online] and EFetch: Downloading full records from Entrez). From the Cypripedioideae example above, we can download GenBank record 186972394 using Bio.Entrez.efetch: End of explanation """ from Bio import Entrez, SeqIO handle = Entrez.efetch(db="nucleotide", id="186972394", rettype="gb", retmode="text") record = SeqIO.read(handle, "genbank") handle.close() print(record) """ Explanation: The arguments rettype="gb" and retmode="text" let us download this record in the GenBank format. Note that until Easter 2009, the Entrez EFetch API let you use “genbank” as the return type, however the NCBI now insist on using the official return types of “gb” or “gbwithparts” (or “gp” for proteins) as described on online. Also not that until Feb 2012, the Entrez EFetch API would default to returning plain text files, but now defaults to XML. Alternatively, you could for example use rettype="fasta" to get the Fasta-format; see the EFetch Sequences Help page for other options. Remember – the available formats depend on which database you are downloading from - see the main EFetch Help page. If you fetch the record in one of the formats accepted by Bio.SeqIO (see Chapter [chapter:Bio.SeqIO]), you could directly parse it into a SeqRecord: End of explanation """ import os from Bio import SeqIO from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are filename = "gi_186972394.gbk" if not os.path.isfile(filename): # Downloading... with Entrez.efetch(db="nucleotide",id="186972394",rettype="gb", retmode="text") as net_handle: with open(filename, "w") as out_handle: out_handle.write(net_handle.read()) print("Saved") print("Parsing...") record = SeqIO.read(filename, "genbank") print(record) """ Explanation: Note that a more typical use would be to save the sequence data to a local file, and then parse it with Bio.SeqIO. This can save you having to re-download the same file repeatedly while working on your script, and places less load on the NCBI’s servers. For example: End of explanation """ from Bio import Entrez handle = Entrez.efetch(db="nucleotide", id="186972394", retmode="xml") record = Entrez.read(handle) handle.close() record[0]["GBSeq_definition"] record[0]["GBSeq_source"] """ Explanation: To get the output in XML format, which you can parse using the Bio.Entrez.read() function, use retmode="xml": End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" pmid = "19304878" record = Entrez.read(Entrez.elink(dbfrom="pubmed", id=pmid)) print(record[0].keys()) print('The record is from the {} database.'.format(record[0]["DbFrom"])) print('The IdList is {}.'.format(record[0]["IdList"])) """ Explanation: So, that dealt with sequences. For examples of parsing file formats specific to the other databases (e.g. the MEDLINE format used in PubMed), see Section Specialized parsers. If you want to perform a search with Bio.Entrez.esearch(), and then download the records with Bio.Entrez.efetch(), you should use the WebEnv history feature – see Section History and WebEnv. ELink: Searching for related items in NCBI Entrez ELink, available from Biopython as Bio.Entrez.elink(), can be used to find related items in the NCBI Entrez databases. For example, you can us this to find nucleotide entries for an entry in the gene database, and other cool stuff. Let’s use ELink to find articles related to the Biopython application note published in Bioinformatics in 2009. The PubMed ID of this article is 19304878: End of explanation """ print('There are {} search results'.format(len(record[0]["LinkSetDb"]))) for linksetdb in record[0]["LinkSetDb"]: print(linksetdb["DbTo"], linksetdb["LinkName"], len(linksetdb["Link"])) """ Explanation: The record variable consists of a Python list, one for each database in which we searched. Since we specified only one PubMed ID to search for, record contains only one item. This item is a dictionary containing information about our search term, as well as all the related items that were found: The "LinkSetDb" key contains the search results, stored as a list consisting of one item for each target database. In our search results, we only find hits in the PubMed database (although sub-divided into categories): End of explanation """ record[0]["LinkSetDb"][0]["Link"][0] """ Explanation: The actual search results are stored as under the "Link" key. In total, 110 items were found under standard search. Let’s now at the first search result: End of explanation """ record[0]["LinkSetDb"][0]["Link"][1] """ Explanation: This is the article we searched for, which doesn’t help us much, so let’s look at the second search result: End of explanation """ for link in record[0]["LinkSetDb"][0]["Link"]: print(link["Id"]) """ Explanation: This paper, with PubMed ID 14630660, is about the Biopython PDB parser. We can use a loop to print out all PubMed IDs: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.egquery(term="biopython") record = Entrez.read(handle) for row in record["eGQueryResult"]: print(row["DbName"], row["Count"]) """ Explanation: Now that was nice, but personally I am often more interested to find out if a paper has been cited. Well, ELink can do that too – at least for journals in Pubmed Central (see Section [sec:elink-citations]). For help on ELink, see the ELink help page. There is an entire sub-page just for the link names, describing how different databases can be cross referenced. EGQuery: Global Query - counts for search terms EGQuery provides counts for a search term in each of the Entrez databases (i.e. a global query). This is particularly useful to find out how many items your search terms would find in each database without actually performing lots of separate searches with ESearch (see the example in [subsec:entrez_example_genbank] below). In this example, we use Bio.Entrez.egquery() to obtain the counts for “Biopython”: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.espell(term="biopythooon") record = Entrez.read(handle) record["Query"] record["CorrectedQuery"] """ Explanation: See the EGQuery help page for more information. ESpell: Obtaining spelling suggestions ESpell retrieves spelling suggestions. In this example, we use Bio.Entrez.espell() to obtain the correct spelling of Biopython: End of explanation """ from Bio import Entrez from Bio.Entrez.Parser import NotXMLError handle = open("data/NC_005816.fna", 'rb') # a Fasta file try: record = Entrez.read(handle) except NotXMLError as e: print('We are expecting to get NotXMLError') print(e) """ Explanation: See the ESpell help page for more information. The main use of this is for GUI tools to provide automatic suggestions for search terms. Parsing huge Entrez XML files The Entrez.read function reads the entire XML file returned by Entrez into a single Python object, which is kept in memory. To parse Entrez XML files too large to fit in memory, you can use the function Entrez.parse. This is a generator function that reads records in the XML file one by one. This function is only useful if the XML file reflects a Python list object (in other words, if Entrez.read on a computer with infinite memory resources would return a Python list). For example, you can download the entire Entrez Gene database for a given organism as a file from NCBI’s ftp site. These files can be very large. As an example, on September 4, 2009, the file Homo_sapiens.ags.gz, containing the Entrez Gene database for human, had a size of 116576 kB. This file, which is in the ASN format, can be converted into an XML file using NCBI’s gene2xml program (see NCBI’s ftp site for more information): ``` gene2xml -b T -i Homo_sapiens.ags -o Homo_sapiens.xml ``` The resulting XML file has a size of 6.1 GB. Attempting Entrez.read on this file will result in a MemoryError on many computers. The XML file Homo_sapiens.xml consists of a list of Entrez gene records, each corresponding to one Entrez gene in human. Entrez.parse retrieves these gene records one by one. You can then print out or store the relevant information in each record by iterating over the records. For example, this script iterates over the Entrez gene records and prints out the gene numbers and names for all current genes: <font color='red'>TODO: need alternate example, download option or ...</font> python from Bio import Entrez handle = open("Homo_sapiens.xml") records = Entrez.parse(handle) python for record in records: status = record['Entrezgene_track-info']['Gene-track']['Gene-track_status'] if status.attributes['value']=='discontinued': continue geneid = record['Entrezgene_track-info']['Gene-track']['Gene-track_geneid'] genename = record['Entrezgene_gene']['Gene-ref']['Gene-ref_locus'] print(geneid, genename) This will print: ``` 1 A1BG 2 A2M 3 A2MP 8 AA 9 NAT1 10 NAT2 11 AACP 12 SERPINA3 13 AADAC 14 AAMP 15 AANAT 16 AARS 17 AAVS1 ... ``` Handling errors Three things can go wrong when parsing an XML file: The file may not be an XML file to begin with; The file may end prematurely or otherwise be corrupted; The file may be correct XML, but contain items that are not represented in the associated DTD. The first case occurs if, for example, you try to parse a Fasta file as if it were an XML file: End of explanation """ from Bio import Entrez handle = open("data/einfo3.xml", 'rb') record = Entrez.read(handle, validate=False) """ Explanation: Here, the parser didn’t find the &lt;?xml ... tag with which an XML file is supposed to start, and therefore decides (correctly) that the file is not an XML file. When your file is in the XML format but is corrupted (for example, by ending prematurely), the parser will raise a CorruptedXMLError. Here is an example of an XML file that ends prematurely: ```xml <?xml version="1.0"?> <!DOCTYPE eInfoResult PUBLIC "-//NLM//DTD eInfoResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eInfo_020511.dtd"> <eInfoResult> <DbList> <DbName>pubmed</DbName> <DbName>protein</DbName> <DbName>nucleotide</DbName> <DbName>nuccore</DbName> <DbName>nucgss</DbName> <DbName>nucest</DbName> <DbName>structure</DbName> <DbName>genome</DbName> <DbName>books</DbName> <DbName>cancerchromosomes</DbName> <DbName>cdd</DbName> ``` which will generate the following traceback: ```python ExpatError Traceback (most recent call last) /Users/vincentdavis/anaconda/envs/py35/lib/python3.5/site-packages/Bio/Entrez/Parser.py in read(self, handle) 214 try: --> 215 self.parser.ParseFile(handle) 216 except expat.ExpatError as e: ExpatError: syntax error: line 1, column 0 During handling of the above exception, another exception occurred: NotXMLError Traceback (most recent call last) <ipython-input-63-ac0523d72453> in <module>() ----> 1 Entrez.read(handle) /Users/vincentdavis/anaconda/envs/py35/lib/python3.5/site-packages/Bio/Entrez/init.py in read(handle, validate) 419 from .Parser import DataHandler 420 handler = DataHandler(validate) --> 421 record = handler.read(handle) 422 return record 423 /Users/vincentdavis/anaconda/envs/py35/lib/python3.5/site-packages/Bio/Entrez/Parser.py in read(self, handle) 223 # We have not seen the initial <!xml declaration, so probably 224 # the input data is not in XML format. --> 225 raise NotXMLError(e) 226 try: 227 return self.object NotXMLError: Failed to parse the XML data (syntax error: line 1, column 0). Please make sure that the input data are in XML format. ``` Note that the error message tells you at what point in the XML file the error was detected. The third type of error occurs if the XML file contains tags that do not have a description in the corresponding DTD file. This is an example of such an XML file: ```xml <?xml version="1.0"?> <!DOCTYPE eInfoResult PUBLIC "-//NLM//DTD eInfoResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eInfo_020511.dtd"> <eInfoResult> <DbInfo> <DbName>pubmed</DbName> <MenuName>PubMed</MenuName> <Description>PubMed bibliographic record</Description> <Count>20161961</Count> <LastUpdate>2010/09/10 04:52</LastUpdate> <FieldList> <Field> ... </Field> </FieldList> <DocsumList> <Docsum> <DsName>PubDate</DsName> <DsType>4</DsType> <DsTypeName>string</DsTypeName> </Docsum> <Docsum> <DsName>EPubDate</DsName> ... </DbInfo> </eInfoResult> ``` In this file, for some reason the tag &lt;DocsumList&gt; (and several others) are not listed in the DTD file eInfo_020511.dtd, which is specified on the second line as the DTD for this XML file. By default, the parser will stop and raise a ValidationError if it cannot find some tag in the DTD: python from Bio import Entrez handle = open("data/einfo3.xml", 'rb') record = Entrez.read(handle) ```python ValidationError Traceback (most recent call last) <ipython-input-65-cfb96ec3d2ca> in <module>() 1 from Bio import Entrez 2 handle = open("data/einfo3.xml", 'rb') ----> 3 record = Entrez.read(handle) /Users/vincentdavis/anaconda/envs/py35/lib/python3.5/site-packages/Bio/Entrez/init.py in read(handle, validate) 419 from .Parser import DataHandler 420 handler = DataHandler(validate) --> 421 record = handler.read(handle) 422 return record 423 /Users/vincentdavis/anaconda/envs/py35/lib/python3.5/site-packages/Bio/Entrez/Parser.py in read(self, handle) 213 raise IOError("Can't parse a closed handle") 214 try: --> 215 self.parser.ParseFile(handle) 216 except expat.ExpatError as e: 217 if self.parser.StartElementHandler: -------src-dir--------/Python-3.5.1/Modules/pyexpat.c in StartElement() /Users/vincentdavis/anaconda/envs/py35/lib/python3.5/site-packages/Bio/Entrez/Parser.py in startElementHandler(self, name, attrs) 348 # Element not found in DTD 349 if self.validating: --> 350 raise ValidationError(name) 351 else: 352 # this will not be stored in the record ValidationError: Failed to find tag 'DocsumList' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False. ``` Optionally, you can instruct the parser to skip such tags instead of raising a ValidationError. This is done by calling Entrez.read or Entrez.parse with the argument validate equal to False: End of explanation """ from Bio import Medline with open("data/pubmed_result1.txt") as handle: record = Medline.read(handle) """ Explanation: Of course, the information contained in the XML tags that are not in the DTD are not present in the record returned by Entrez.read. Specialized parsers The Bio.Entrez.read() function can parse most (if not all) XML output returned by Entrez. Entrez typically allows you to retrieve records in other formats, which may have some advantages compared to the XML format in terms of readability (or download size). To request a specific file format from Entrez using Bio.Entrez.efetch() requires specifying the rettype and/or retmode optional arguments. The different combinations are described for each database type on the NCBI efetch webpage. One obvious case is you may prefer to download sequences in the FASTA or GenBank/GenPept plain text formats (which can then be parsed with Bio.SeqIO, see Sections [sec:SeqIO_GenBank_Online] and EFetch: Downloading full records from Entrez). For the literature databases, Biopython contains a parser for the MEDLINE format used in PubMed. Parsing Medline records {#subsec:entrez-and-medline} You can find the Medline parser in Bio.Medline. Suppose we want to parse the file pubmed_result1.txt, containing one Medline record. You can find this file in Biopython’s Tests\Medline directory. The file looks like this: ``` PMID- 12230038 OWN - NLM STAT- MEDLINE DA - 20020916 DCOM- 20030606 LR - 20041117 PUBM- Print IS - 1467-5463 (Print) VI - 3 IP - 3 DP - 2002 Sep TI - The Bio* toolkits--a brief overview. PG - 296-302 AB - Bioinformatics research is often difficult to do with commercial software. The Open Source BioPerl, BioPython and Biojava projects provide toolkits with ... ``` We first open the file and then parse it: End of explanation """ record["PMID"] record["AB"] """ Explanation: The record now contains the Medline record as a Python dictionary: End of explanation """ help(record) """ Explanation: The key names used in a Medline record can be rather obscure; use End of explanation """ from Bio import Medline with open("data/pubmed_result2.txt") as handle: for record in Medline.parse(handle): print(record["TI"]) """ Explanation: for a brief summary. To parse a file containing multiple Medline records, you can use the parse function instead: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.esearch(db="pubmed", term="biopython") record = Entrez.read(handle) record["IdList"] """ Explanation: Instead of parsing Medline records stored in files, you can also parse Medline records downloaded by Bio.Entrez.efetch. For example, let’s look at all Medline records in PubMed related to Biopython: End of explanation """ idlist = record["IdList"] handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text") """ Explanation: We now use Bio.Entrez.efetch to download these Medline records: End of explanation """ from Bio import Medline records = Medline.parse(handle) for record in records: print(record["AU"]) """ Explanation: Here, we specify rettype="medline", retmode="text" to obtain the Medline records in plain-text Medline format. Now we use Bio.Medline to parse these records: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.esearch(db="pubmed", term="biopython") record = Entrez.read(handle) idlist = record["IdList"] handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="xml") records = Entrez.read(handle) for record in records: print(record["MedlineCitation"]["Article"]["ArticleTitle"]) """ Explanation: For comparison, here we show an example using the XML format: End of explanation """ from Bio import Geo handle = open("data/GSE16.txt") records = Geo.parse(handle) for record in records: print(record) """ Explanation: Note that in both of these examples, for simplicity we have naively combined ESearch and EFetch. In this situation, the NCBI would expect you to use their history feature, as illustrated in Section History and WebEnv. Parsing GEO records GEO (Gene Expression Omnibus) is a data repository of high-throughput gene expression and hybridization array data. The Bio.Geo module can be used to parse GEO-formatted data. The following code fragment shows how to parse the example GEO file GSE16.txt into a record and print the record: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.esearch(db="gds", term="GSE16") record = Entrez.read(handle) record["Count"] record["IdList"] """ Explanation: You can search the “gds” database (GEO datasets) with ESearch: End of explanation """ # from Bio import UniGene # input = open("data/myunigenefile.data") # record = UniGene.read(input) """ Explanation: From the Entrez website, UID “200000016” is GDS16 while the other hit “100000028” is for the associated platform, GPL28. Unfortunately, at the time of writing the NCBI don’t seem to support downloading GEO files using Entrez (not as XML, nor in the Simple Omnibus Format in Text (SOFT) format). However, it is actually pretty straight forward to download the GEO files by FTP or HTTP from http://ftp.ncbi.nih.gov/pub/geo/ instead. In this case you might want http://ftp.ncbi.nih.gov/pub/geo/DATA/SOFT/by_series/GSE16/GSE16_family.soft.gz (a compressed file, see the Python module gzip). Parsing UniGene records UniGene is an NCBI database of the transcriptome, with each UniGene record showing the set of transcripts that are associated with a particular gene in a specific organism. A typical UniGene record looks like this: ``` ID Hs.2 TITLE N-acetyltransferase 2 (arylamine N-acetyltransferase) GENE NAT2 CYTOBAND 8p22 GENE_ID 10 LOCUSLINK 10 HOMOL YES EXPRESS bone| connective tissue| intestine| liver| liver tumor| normal| soft tissue/muscle tissue tumor| adult RESTR_EXPR adult CHROMOSOME 8 STS ACC=PMC310725P3 UNISTS=272646 STS ACC=WIAF-2120 UNISTS=44576 STS ACC=G59899 UNISTS=137181 ... STS ACC=GDB:187676 UNISTS=155563 PROTSIM ORG=10090; PROTGI=6754794; PROTID=NP_035004.1; PCT=76.55; ALN=288 PROTSIM ORG=9796; PROTGI=149742490; PROTID=XP_001487907.1; PCT=79.66; ALN=288 PROTSIM ORG=9986; PROTGI=126722851; PROTID=NP_001075655.1; PCT=76.90; ALN=288 ... PROTSIM ORG=9598; PROTGI=114619004; PROTID=XP_519631.2; PCT=98.28; ALN=288 SCOUNT 38 SEQUENCE ACC=BC067218.1; NID=g45501306; PID=g45501307; SEQTYPE=mRNA SEQUENCE ACC=NM_000015.2; NID=g116295259; PID=g116295260; SEQTYPE=mRNA SEQUENCE ACC=D90042.1; NID=g219415; PID=g219416; SEQTYPE=mRNA SEQUENCE ACC=D90040.1; NID=g219411; PID=g219412; SEQTYPE=mRNA SEQUENCE ACC=BC015878.1; NID=g16198419; PID=g16198420; SEQTYPE=mRNA SEQUENCE ACC=CR407631.1; NID=g47115198; PID=g47115199; SEQTYPE=mRNA SEQUENCE ACC=BG569293.1; NID=g13576946; CLONE=IMAGE:4722596; END=5'; LID=6989; SEQTYPE=EST; TRACE=44157214 ... SEQUENCE ACC=AU099534.1; NID=g13550663; CLONE=HSI08034; END=5'; LID=8800; SEQTYPE=EST // ``` This particular record shows the set of transcripts (shown in the SEQUENCE lines) that originate from the human gene NAT2, encoding en N-acetyltransferase. The PROTSIM lines show proteins with significant similarity to NAT2, whereas the STS lines show the corresponding sequence-tagged sites in the genome. To parse UniGene files, use the Bio.UniGene module: <font color='red'>TODO: Need a working example</font> End of explanation """ # record.ID # record.title """ Explanation: The record returned by UniGene.read is a Python object with attributes corresponding to the fields in the UniGene record. For example, End of explanation """ # record.sts[0].acc # record.sts[0].unists """ Explanation: The EXPRESS and RESTR_EXPR lines are stored as Python lists of strings: ``` ['bone', 'connective tissue', 'intestine', 'liver', 'liver tumor', 'normal', 'soft tissue/muscle tissue tumor', 'adult'] ``` Specialized objects are returned for the STS, PROTSIM, and SEQUENCE lines, storing the keys shown in each line as attributes: End of explanation """ # from Bio import UniGene # input = open("unigenerecords.data") # records = UniGene.parse(input) # for record in records: # print(record.ID) """ Explanation: and similarly for the PROTSIM and SEQUENCE lines. To parse a file containing more than one UniGene record, use the parse function in Bio.UniGene: <font color='red'>TODO: Need a working example</font> End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.egquery(term="orchid") record = Entrez.read(handle) for row in record["eGQueryResult"]: if row["DbName"]=="pubmed": print(row["Count"]) """ Explanation: Using a proxy Normally you won’t have to worry about using a proxy, but if this is an issue on your network here is how to deal with it. Internally, Bio.Entrez uses the standard Python library urllib for accessing the NCBI servers. This will check an environment variable called http_proxy to configure any simple proxy automatically. Unfortunately this module does not support the use of proxies which require authentication. You may choose to set the http_proxy environment variable once (how you do this will depend on your operating system). Alternatively you can set this within Python at the start of your script, for example: ``` import os os.environ["http_proxy"] = "http://proxyhost.example.com:8080" ``` See the urllib documentation for more details. Examples PubMed and Medline {#subsec:pub_med} If you are in the medical field or interested in human issues (and many times even if you are not!), PubMed (http://www.ncbi.nlm.nih.gov/PubMed/) is an excellent source of all kinds of goodies. So like other things, we’d like to be able to grab information from it and use it in Python scripts. In this example, we will query PubMed for all articles having to do with orchids (see section [sec:orchids] for our motivation). We first check how many of such articles there are: End of explanation """ handle = Entrez.esearch(db="pubmed", term="orchid", retmax=463) record = Entrez.read(handle) idlist = record["IdList"] print("The first 10 Id's containing all of the PubMed IDs of articles related to orchids:\n {}".format(idlist[:10])) """ Explanation: Now we use the Bio.Entrez.efetch function to download the PubMed IDs of these 463 articles: End of explanation """ from Bio import Medline handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline") records = Medline.parse(handle) """ Explanation: Now that we’ve got them, we obviously want to get the corresponding Medline records and extract the information from them. Here, we’ll download the Medline records in the Medline flat-file format, and use the Bio.Medline module to parse them: End of explanation """ records = list(records) """ Explanation: NOTE - We’ve just done a separate search and fetch here, the NCBI much prefer you to take advantage of their history support in this situation. See Section History and WebEnv. Keep in mind that records is an iterator, so you can iterate through the records only once. If you want to save the records, you can convert them to a list: End of explanation """ for record in records: print("title:", record.get("TI", "?")) print("authors:", record.get("AU", "?")) print("source:", record.get("SO", "?")) print("") """ Explanation: Let’s now iterate over the records to print out some information about each record: End of explanation """ search_author = "Waits T" for record in records: if not "AU" in record: continue if search_author in record["AU"]: print("Author %s found: %s" % (search_author, record["SO"])) """ Explanation: The output for this looks like: ``` title: Sex pheromone mimicry in the early spider orchid (ophrys sphegodes): patterns of hydrocarbons as the key mechanism for pollination by sexual deception [In Process Citation] authors: ['Schiestl FP', 'Ayasse M', 'Paulus HF', 'Lofstedt C', 'Hansson BS', 'Ibarra F', 'Francke W'] source: J Comp Physiol [A] 2000 Jun;186(6):567-74 ``` Especially interesting to note is the list of authors, which is returned as a standard Python list. This makes it easy to manipulate and search using standard Python tools. For instance, we could loop through a whole bunch of entries searching for a particular author with code like the following: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.egquery(term="Cypripedioideae") record = Entrez.read(handle) for row in record["eGQueryResult"]: if row["DbName"]=="nuccore": print(row["Count"]) """ Explanation: Hopefully this section gave you an idea of the power and flexibility of the Entrez and Medline interfaces and how they can be used together. Searching, downloading, and parsing Entrez Nucleotide records {#subsec:entrez_example_genbank} Here we’ll show a simple example of performing a remote Entrez query. In section [sec:orchids] of the parsing examples, we talked about using NCBI’s Entrez website to search the NCBI nucleotide databases for info on Cypripedioideae, our friends the lady slipper orchids. Now, we’ll look at how to automate that process using a Python script. In this example, we’ll just show how to connect, get the results, and parse them, with the Entrez module doing all of the work. First, we use EGQuery to find out the number of results we will get before actually downloading them. EGQuery will tell us how many search results were found in each of the databases, but for this example we are only interested in nucleotides: End of explanation """ from Bio import Entrez handle = Entrez.esearch(db="nucleotide", term="Cypripedioideae", retmax=814) record = Entrez.read(handle) """ Explanation: So, we expect to find 814 Entrez Nucleotide records (this is the number I obtained in 2008; it is likely to increase in the future). If you find some ridiculously high number of hits, you may want to reconsider if you really want to download all of them, which is our next step: End of explanation """ print(record.keys()) """ Explanation: Here, record is a Python dictionary containing the search results and some auxiliary information. Just for information, let’s look at what is stored in this dictionary: End of explanation """ print(record["Count"]) """ Explanation: First, let’s check how many results were found: End of explanation """ len(record["IdList"]) """ Explanation: which is the number we expected. The 814 results are stored in record['IdList']: End of explanation """ record["IdList"][:5] """ Explanation: Let’s look at the first five results: End of explanation """ idlist = ",".join(record["IdList"][:5]) print(idlist) handle = Entrez.efetch(db="nucleotide", id=idlist, retmode="xml") records = Entrez.read(handle) len(records) """ Explanation: [sec:entrez-batched-efetch] We can download these records using efetch. While you could download these records one by one, to reduce the load on NCBI’s servers, it is better to fetch a bunch of records at the same time, shown below. However, in this situation you should ideally be using the history feature described later in Section History and WebEnv. End of explanation """ print(records[0].keys()) print(records[0]["GBSeq_primary-accession"]) print(records[0]["GBSeq_other-seqids"]) print(records[0]["GBSeq_definition"]) print(records[0]["GBSeq_organism"]) """ Explanation: Each of these records corresponds to one GenBank record. End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.egquery(term="Opuntia AND rpl16") record = Entrez.read(handle) for row in record["eGQueryResult"]: if row["DbName"]=="nuccore": print(row["Count"]) """ Explanation: You could use this to quickly set up searches – but for heavy usage, see Section History and WebEnv. Searching, downloading, and parsing GenBank records {#sec:entrez-search-fetch-genbank} The GenBank record format is a very popular method of holding information about sequences, sequence features, and other associated sequence information. The format is a good way to get information from the NCBI databases at http://www.ncbi.nlm.nih.gov/. In this example we’ll show how to query the NCBI databases,to retrieve the records from the query, and then parse them using Bio.SeqIO - something touched on in Section [sec:SeqIO_GenBank_Online]. For simplicity, this example does not take advantage of the WebEnv history feature – see Section History and WebEnv for this. First, we want to make a query and find out the ids of the records to retrieve. Here we’ll do a quick search for one of our favorite organisms, Opuntia (prickly-pear cacti). We can do quick search and get back the GIs (GenBank identifiers) for all of the corresponding records. First we check how many records there are: End of explanation """ handle = Entrez.esearch(db="nuccore", term="Opuntia AND rpl16") record = Entrez.read(handle) gi_list = record["IdList"] gi_list """ Explanation: Now we download the list of GenBank identifiers: End of explanation """ gi_str = ",".join(gi_list) handle = Entrez.efetch(db="nuccore", id=gi_str, rettype="gb", retmode="text") """ Explanation: Now we use these GIs to download the GenBank records - note that with older versions of Biopython you had to supply a comma separated list of GI numbers to Entrez, as of Biopython 1.59 you can pass a list and this is converted for you: End of explanation """ text = handle.read() print(text) """ Explanation: If you want to look at the raw GenBank files, you can read from this handle and print out the result: End of explanation """ from Bio import SeqIO handle = Entrez.efetch(db="nuccore", id=gi_str, rettype="gb", retmode="text") records = SeqIO.parse(handle, "gb") """ Explanation: In this case, we are just getting the raw records. To get the records in a more Python-friendly form, we can use Bio.SeqIO to parse the GenBank data into SeqRecord objects, including SeqFeature objects (see Chapter [chapter:Bio.SeqIO]): End of explanation """ for record in records: print("%s, length %i, with %i features" \ % (record.name, len(record), len(record.features))) """ Explanation: We can now step through the records and look at the information we are interested in: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" # Always tell NCBI who you are handle = Entrez.esearch(db="Taxonomy", term="Cypripedioideae") record = Entrez.read(handle) record["IdList"] record["IdList"][0] """ Explanation: Using these automated query retrieval functionality is a big plus over doing things by hand. Although the module should obey the NCBI’s max three queries per second rule, the NCBI have other recommendations like avoiding peak hours. See Section [sec:entrez-guidelines]. In particular, please note that for simplicity, this example does not use the WebEnv history feature. You should use this for any non-trivial search and download work, see Section History and WebEnv. Finally, if plan to repeat your analysis, rather than downloading the files from the NCBI and parsing them immediately (as shown in this example), you should just download the records once and save them to your hard disk, and then parse the local file. Finding the lineage of an organism Staying with a plant example, let’s now find the lineage of the Cypripedioideae orchid family. First, we search the Taxonomy database for Cypripedioideae, which yields exactly one NCBI taxonomy identifier: End of explanation """ handle = Entrez.efetch(db="Taxonomy", id="158330", retmode="xml") records = Entrez.read(handle) """ Explanation: Now, we use efetch to download this entry in the Taxonomy database, and then parse it: End of explanation """ records[0].keys() """ Explanation: Again, this record stores lots of information: End of explanation """ records[0]["Lineage"] """ Explanation: We can get the lineage directly from this record: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" search_handle = Entrez.esearch(db="nucleotide",term="Opuntia[orgn] and rpl16", usehistory="y") search_results = Entrez.read(search_handle) search_handle.close() """ Explanation: The record data contains much more than just the information shown here - for example look under LineageEx instead of Lineage and you’ll get the NCBI taxon identifiers of the lineage entries too. Using the history and WebEnv Often you will want to make a series of linked queries. Most typically, running a search, perhaps refining the search, and then retrieving detailed search results. You can do this by making a series of separate calls to Entrez. However, the NCBI prefer you to take advantage of their history support - for example combining ESearch and EFetch. Another typical use of the history support would be to combine EPost and EFetch. You use EPost to upload a list of identifiers, which starts a new history session. You then download the records with EFetch by referring to the session (instead of the identifiers). Searching for and downloading sequences using the history Suppose we want to search and download all the Opuntia rpl16 nucleotide sequences, and store them in a FASTA file. As shown in Section [sec:entrez-search-fetch-genbank], we can naively combine Bio.Entrez.esearch() to get a list of GI numbers, and then call Bio.Entrez.efetch() to download them all. However, the approved approach is to run the search with the history feature. Then, we can fetch the results by reference to the search results - which the NCBI can anticipate and cache. To do this, call Bio.Entrez.esearch() as normal, but with the additional argument of usehistory="y", End of explanation """ gi_list = search_results["IdList"] count = int(search_results["Count"]) assert count == len(gi_list) print("The WebEnv is {}".format(search_results["WebEnv"])) print("The QueryKey is {}".format(search_results["QueryKey"])) """ Explanation: When you get the XML output back, it will still include the usual search results. However, you also get given two additional pieces of information, the WebEnv session cookie, and the QueryKey: End of explanation """ from Bio import Entrez Entrez.email = "[email protected]" pmid = "14630660" results = Entrez.read(Entrez.elink(dbfrom="pubmed", db="pmc", LinkName="pubmed_pmc_refs", from_uid=pmid)) pmc_ids = [link["Id"] for link in results[0]["LinkSetDb"][0]["Link"]] pmc_ids """ Explanation: Having stored these values in variables <span>session_cookie</span> and <span>query_key</span> we can use them as parameters to Bio.Entrez.efetch() instead of giving the GI numbers as identifiers. While for small searches you might be OK downloading everything at once, it is better to download in batches. You use the <span>retstart</span> and <span>retmax</span> parameters to specify which range of search results you want returned (starting entry using zero-based counting, and maximum number of results to return). Sometimes you will get intermittent errors from Entrez, HTTPError 5XX, we use a try except pause retry block to address this. For example, ``` from Bio import Entrez import time try: from urllib.error import HTTPError # for Python 3 except ImportError: from urllib2 import HTTPError # for Python 2 batch_size = 3 out_handle = open("orchid_rpl16.fasta", "w") for start in range(0, count, batch_size): end = min(count, start+batch_size) print("Going to download record %i to %i" % (start+1, end)) attempt = 1 while attempt <= 3: try: fetch_handle = Entrez.efetch(db="nucleotide", rettype="fasta", retmode="text", retstart=start, retmax=batch_size, webenv=webenv, query_key=query_key) except HTTPError as err: if 500 <= err.code <= 599: print("Received error from server %s" % err) print("Attempt %i of 3" % attempt) attempt += 1 time.sleep(15) else: raise data = fetch_handle.read() fetch_handle.close() out_handle.write(data) out_handle.close() ``` For illustrative purposes, this example downloaded the FASTA records in batches of three. Unless you are downloading genomes or chromosomes, you would normally pick a larger batch size. Searching for and downloading abstracts using the history Here is another history example, searching for papers published in the last year about the Opuntia, and then downloading them into a file in MedLine format: ``` from Bio import Entrez import time try: from urllib.error import HTTPError # for Python 3 except ImportError: from urllib2 import HTTPError # for Python 2 Entrez.email = "[email protected]" search_results = Entrez.read(Entrez.esearch(db="pubmed", term="Opuntia[ORGN]", reldate=365, datetype="pdat", usehistory="y")) count = int(search_results["Count"]) print("Found %i results" % count) batch_size = 10 out_handle = open("recent_orchid_papers.txt", "w") for start in range(0,count,batch_size): end = min(count, start+batch_size) print("Going to download record %i to %i" % (start+1, end)) attempt = 1 while attempt <= 3: try: fetch_handle = Entrez.efetch(db="pubmed",rettype="medline", retmode="text",retstart=start, retmax=batch_size, webenv=search_results["WebEnv"], query_key=search_results["QueryKey"]) except HTTPError as err: if 500 <= err.code <= 599: print("Received error from server %s" % err) print("Attempt %i of 3" % attempt) attempt += 1 time.sleep(15) else: raise data = fetch_handle.read() fetch_handle.close() out_handle.write(data) out_handle.close() ``` At the time of writing, this gave 28 matches - but because this is a date dependent search, this will of course vary. As described in Section [subsec:entrez-and-medline] above, you can then use Bio.Medline to parse the saved records. Searching for citations {#sec:elink-citations} Back in Section [sec:elink] we mentioned ELink can be used to search for citations of a given paper. Unfortunately this only covers journals indexed for PubMed Central (doing it for all the journals in PubMed would mean a lot more work for the NIH). Let’s try this for the Biopython PDB parser paper, PubMed ID 14630660: End of explanation """ results2 = Entrez.read(Entrez.elink(dbfrom="pmc", db="pubmed", LinkName="pmc_pubmed", from_uid=",".join(pmc_ids))) pubmed_ids = [link["Id"] for link in results2[0]["LinkSetDb"][0]["Link"]] pubmed_ids """ Explanation: Great - eleven articles. But why hasn’t the Biopython application note been found (PubMed ID 19304878)? Well, as you might have guessed from the variable names, there are not actually PubMed IDs, but PubMed Central IDs. Our application note is the third citing paper in that list, PMCID 2682512. So, what if (like me) you’d rather get back a list of PubMed IDs? Well we can call ELink again to translate them. This becomes a two step process, so by now you should expect to use the history feature to accomplish it (Section History and WebEnv). But first, taking the more straightforward approach of making a second (separate) call to ELink: End of explanation """
flaviocordova/udacity_deep_learn_project
image-classification/dlnd_image_classification.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import problem_unittests as tests import tarfile cifar10_dataset_folder_path = 'cifar-10-batches-py' # Use Floyd's cifar-10 dataset if present floyd_cifar10_location = '/input/cifar-10/python.tar.gz' if isfile(floyd_cifar10_location): tar_gz_path = floyd_cifar10_location else: tar_gz_path = 'cifar-10-python.tar.gz' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(tar_gz_path): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', tar_gz_path, pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open(tar_gz_path) as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) """ Explanation: Image Classification In this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images. Get the Data Run the following cell to download the CIFAR-10 dataset for python. End of explanation """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper import numpy as np # Explore the dataset batch_id = 1 sample_id = 5 helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id) """ Explanation: Explore the Data The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following: * airplane * automobile * bird * cat * deer * dog * frog * horse * ship * truck Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch. Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions. End of explanation """ def normalize(x): """ Normalize a list of sample image data in the range of 0 to 1 : x: List of image data. The image shape is (32, 32, 3) : return: Numpy array of normalize data """ return x / 255 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_normalize(normalize) """ Explanation: Implement Preprocess Functions Normalize In the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x. End of explanation """ def one_hot_encode(x): """ One hot encode a list of sample labels. Return a one-hot encoded vector for each label. : x: List of sample Labels : return: Numpy array of one-hot encoded labels """ result = np.zeros((len(x), 10)) for i in range(len(x)): result[i, x[i]] = 1 return result """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_one_hot_encode(one_hot_encode) """ Explanation: One-hot encode Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function. Hint: Don't reinvent the wheel. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode) """ Explanation: Randomize Data As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset. Preprocess all the data and save it Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import pickle import problem_unittests as tests import helper # Load the Preprocessed Validation data valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb')) """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ import tensorflow as tf def neural_net_image_input(image_shape): """ Return a Tensor for a batch of image input : image_shape: Shape of the images : return: Tensor for image input. """ # TODO: Implement Function new_dim = a = (None,) + image_shape return tf.placeholder(tf.float32, shape=new_dim, name='x') def neural_net_label_input(n_classes): """ Return a Tensor for a batch of label input : n_classes: Number of classes : return: Tensor for label input. """ # TODO: Implement Function return tf.placeholder(tf.float32, shape=(None, n_classes), name='y') def neural_net_keep_prob_input(): """ Return a Tensor for keep probability : return: Tensor for keep probability. """ # TODO: Implement Function return tf.placeholder(tf.float32, name='keep_prob') """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tf.reset_default_graph() tests.test_nn_image_inputs(neural_net_image_input) tests.test_nn_label_inputs(neural_net_label_input) tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input) """ Explanation: Build the network For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project. Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup. However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d. Let's begin! Input The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions * Implement neural_net_image_input * Return a TF Placeholder * Set the shape using image_shape with batch size set to None. * Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder. * Implement neural_net_label_input * Return a TF Placeholder * Set the shape using n_classes with batch size set to None. * Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder. * Implement neural_net_keep_prob_input * Return a TF Placeholder for dropout keep probability. * Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder. These names will be used at the end of the project to load your saved model. Note: None for shapes in TensorFlow allow for a dynamic size. End of explanation """ def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides): """ Apply convolution then max pooling to x_tensor :param x_tensor: TensorFlow Tensor :param conv_num_outputs: Number of outputs for the convolutional layer :param conv_ksize: kernal size 2-D Tuple for the convolutional layer :param conv_strides: Stride 2-D Tuple for convolution :param pool_ksize: kernal size 2-D Tuple for pool :param pool_strides: Stride 2-D Tuple for pool : return: A tensor that represents convolution and max pooling of x_tensor """ PADDING = 'SAME' weight_dim = conv_ksize + (x_tensor.get_shape()[3].value, conv_num_outputs) weights = tf.Variable(tf.truncated_normal(weight_dim, mean=0, stddev=0.1)) bias = tf.Variable(tf.zeros(conv_num_outputs)) strides_dim = [1, conv_strides[0], conv_strides[1], 1] conv_layer = tf.nn.conv2d(x_tensor, weights, strides=strides_dim, padding=PADDING) conv_layer = tf.nn.bias_add(conv_layer, bias) conv_layer = tf.nn.relu(conv_layer) conv_layer = tf.nn.max_pool(conv_layer, \ [1, pool_ksize[0], pool_ksize[1], 1], \ [1, pool_strides[0], pool_strides[1],1], \ PADDING) # TODO: Implement Function return conv_layer """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_con_pool(conv2d_maxpool) """ Explanation: Convolution and Max Pooling Layer Convolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling: * Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor. * Apply a convolution to x_tensor using weight and conv_strides. * We recommend you use same padding, but you're welcome to use any padding. * Add bias * Add a nonlinear activation to the convolution. * Apply Max Pooling using pool_ksize and pool_strides. * We recommend you use same padding, but you're welcome to use any padding. Note: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer, but you can still use TensorFlow's Neural Network package. You may still use the shortcut option for all the other layers. End of explanation """ def flatten(x_tensor): """ Flatten x_tensor to (Batch Size, Flattened Image Size) : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions. : return: A tensor of size (Batch Size, Flattened Image Size). """ # TODO: Implement Function new_len = x_tensor.get_shape()[1].value * x_tensor.get_shape()[2].value * x_tensor.get_shape()[3].value new_t = tf.reshape(x_tensor, [-1, new_len]) return new_t """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_flatten(flatten) """ Explanation: Flatten Layer Implement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. End of explanation """ def fully_conn(x_tensor, num_outputs): """ Apply a fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function num_inputs = x_tensor.get_shape()[1].value W = tf.Variable(tf.random_normal([num_inputs, num_outputs], mean=0, stddev=0.1)) B = tf.Variable(tf.zeros(num_outputs)) layer = tf.add(tf.matmul(x_tensor, W), B) layer = tf.nn.relu(layer) return layer """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_fully_conn(fully_conn) """ Explanation: Fully-Connected Layer Implement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. End of explanation """ def output(x_tensor, num_outputs): """ Apply a output layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function num_inputs = x_tensor.get_shape()[1].value W = tf.Variable(tf.random_normal([num_inputs, num_outputs], mean=0, stddev=0.1)) B = tf.Variable(tf.random_normal([num_outputs])) output = tf.add(tf.matmul(x_tensor, W), B) return output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_output(output) """ Explanation: Output Layer Implement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. Note: Activation, softmax, or cross entropy should not be applied to this. End of explanation """ def conv_net(x, keep_prob): """ Create a convolutional neural network model : x: Placeholder tensor that holds image data. : keep_prob: Placeholder tensor that hold dropout keep probability. : return: Tensor that represents logits """ # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers # Play around with different number of outputs, kernel size and stride # Function Definition from Above: # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) layers = [{'outputs':16, 'o_ksize':(2, 2), 'o_strides':(1, 1), 'p_ksize':(2,2), 'p_strides':(1,1)}, {'outputs':32, 'o_ksize':(4, 4), 'o_strides':(1, 1), 'p_ksize':(2,2), 'p_strides':(2,2)}, {'outputs':64, 'o_ksize':(4, 4), 'o_strides':(1, 1), 'p_ksize':(2,2), 'p_strides':(2,2)}] for l in layers: x = conv2d_maxpool(x, l['outputs'], l['o_ksize'], l['o_strides'], l['p_ksize'], l['p_strides']) #print('layer: ', x) # TODO: Apply a Flatten Layer # Function Definition from Above: # flatten(x_tensor) x = flatten(x) # TODO: Apply 1, 2, or 3 Fully Connected Layers # Play around with different number of outputs # Function Definition from Above: # fully_conn(x_tensor, num_outputs) x = fully_conn(x, 128) # add dropout !!! x = tf.nn.dropout(x, keep_prob=keep_prob) # TODO: Apply an Output Layer # Set this to the number of classes # Function Definition from Above: # output(x_tensor, num_outputs) # TODO: return output return output(x, 10) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ ############################## ## Build the Neural Network ## ############################## # Remove previous weights, bias, inputs, etc.. tf.reset_default_graph() # Inputs x = neural_net_image_input((32, 32, 3)) y = neural_net_label_input(10) keep_prob = neural_net_keep_prob_input() # Model logits = conv_net(x, keep_prob) # Name logits Tensor, so that is can be loaded from disk after training logits = tf.identity(logits, name='logits') # Loss and Optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer().minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') tests.test_conv_net(conv_net) """ Explanation: Create Convolutional Model Implement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model: Apply 1, 2, or 3 Convolution and Max Pool layers Apply a Flatten Layer Apply 1, 2, or 3 Fully Connected Layers Apply an Output Layer Return the output Apply TensorFlow's Dropout to one or more layers in the model using keep_prob. End of explanation """ def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch): """ Optimize the session on a batch of images and labels : session: Current TensorFlow session : optimizer: TensorFlow optimizer function : keep_probability: keep probability : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data """ # TODO: Implement Function session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob:keep_probability}) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_train_nn(train_neural_network) """ Explanation: Train the Neural Network Single Optimization Implement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following: * x for image input * y for labels * keep_prob for keep probability for dropout This function will be called for each batch, so tf.global_variables_initializer() has already been called. Note: Nothing needs to be returned. This function is only optimizing the neural network. End of explanation """ def print_stats(session, feature_batch, label_batch, cost, accuracy): """ Print information about loss and validation accuracy : session: Current TensorFlow session : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data : cost: TensorFlow cost function : accuracy: TensorFlow accuracy function """ # TODO: Implement Function # Calculate Training and Validation accuracy # valid_acc = sess.run(accuracy , feed_dict={x: valid_features, y: valid_labels, keep_prob: 0.5}) global_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels ,keep_prob:1.0}) # loss = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 0.5}) # kept keep_prob global_cost = session.run(cost, feed_dict={x: feature_batch, y: label_batch ,keep_prob:1.0}) print('Accuracy: {:.3g},\t Cost: {:.3g}'.format(global_accuracy, global_cost)) # batch_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels ,keep_prob:1.0}) # batch_cost = session.run(cost, feed_dict={x: valid_features, y: valid_labels ,keep_prob:1.0}) # Log batches # print('\tBATCH Accuracy: {:.3g},\t Cost: {:.3g}'.format(batch_accuracy, batch_cost)) """ Explanation: Show Stats Implement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy. End of explanation """ # TODO: Tune Parameters epochs = 15 batch_size = 256 keep_probability = .5 """ Explanation: Hyperparameters Tune the following parameters: * Set epochs to the number of iterations until the network stops learning or start overfitting * Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory: * 64 * 128 * 256 * ... * Set keep_probability to the probability of keeping a node using dropout End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ print('Checking the Training on a Single Batch...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): batch_i = 1 for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i)) #, end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) """ Explanation: Train on a Single CIFAR-10 Batch Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ save_model_path = './image_classification' print('Training...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): # Loop over all batches n_batches = 5 for batch_i in range(1, n_batches + 1): for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # Save Model saver = tf.train.Saver() save_path = saver.save(sess, save_model_path) """ Explanation: Fully Train the Model Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import tensorflow as tf import pickle import helper import random # Set batch size if not already set try: if batch_size: pass except NameError: batch_size = 64 save_model_path = './image_classification' n_samples = 4 top_n_predictions = 3 def test_model(): """ Test the saved model against the test dataset """ test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb')) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load model loader = tf.train.import_meta_graph(save_model_path + '.meta') loader.restore(sess, save_model_path) # Get Tensors from loaded model loaded_x = loaded_graph.get_tensor_by_name('x:0') loaded_y = loaded_graph.get_tensor_by_name('y:0') loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') loaded_logits = loaded_graph.get_tensor_by_name('logits:0') loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0') # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size): test_batch_acc_total += sess.run( loaded_acc, feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0}) test_batch_count += 1 print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count)) # Print Random Samples random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples))) random_test_predictions = sess.run( tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions), feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0}) helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions) test_model() """ Explanation: Checkpoint The model has been saved to disk. Test Model Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters. End of explanation """
astro4dev/OAD-Data-Science-Toolkit
Teaching Materials/Machine Learning/Supervised Learning/Examples/PPC/Predicting_Pulsar_Candidates.ipynb
gpl-3.0
# For numerical stuff import pandas as pd # Plotting import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 7.0) # Some preprocessing utilities from sklearn.cross_validation import train_test_split # Data splitting from sklearn.utils import shuffle # The different classifiers from sklearn.neighbors import KNeighborsClassifier # Nearest Neighbor - Analogizer from sklearn.naive_bayes import GaussianNB # Bayesian Classifier - Bayesian from sklearn.neural_network import MLPClassifier # Neural Network - Connectionist # Model result function from sklearn.metrics import classification_report,accuracy_score """ Explanation: Build simple models to predict pulsar candidates In this notebook we will look at building machine learning models to predict Pulsar Candidate. The data comes from Rob Lyon at Manchester. This data is publically available. For more information check out https://figshare.com/articles/HTRU2/3080389/1 Lets start with the basic imports End of explanation """ data = pd.read_csv('Data/pulsar.csv') # Show some information print ('Dataset has %d rows and %d columns including features and labels'%(data.shape[0],data.shape[1])) """ Explanation: Load dataset Data is a csv file with each column as features and rows as samples of positive and negative candidates Class label is the last column where "1" correspondes to true pulsar candidate and "0" a false candidate End of explanation """ print (data.columns.values[0:-1]) """ Explanation: Lets print the feature names End of explanation """ ax = plt.figure().gca(projection='3d') ax.scatter3D(data['std_pf'], data['mean_dm'], data['mean_int_pf'],c=data['class'],alpha=.25) ax.set_xlabel('std_pf') ax.set_ylabel('mean_dm') ax.set_zlabel('mean_int_pf') """ Explanation: Do a scatter plot End of explanation """ # Lets shuffle the rows of the data 10 times for i in range(10): data = shuffle(data) # Now split the dataset into seperate variabels for features and labels features = data.ix[:,data.columns != 'class'].values # All columns except class labels = data['class'].values # Class labels """ Explanation: Get the features and labels End of explanation """ # Do a 70 - 30 split of the whole data for training and testing # The last argument specifies the fraction of samples for testing train_data,test_data,train_labels,test_labels = train_test_split(features,labels,test_size=.3) #Print some info print ('Number of training data points : %d'%(train_data.shape[0])) print ('Number of testing data points : %d'%(test_data.shape[0])) """ Explanation: Split data to training and validation sets End of explanation """ # K nearest neighbor knn = KNeighborsClassifier() knn.fit(train_data,train_labels) """ Explanation: Lets do the training on different algorithms We will be using the following algorithms k-Nearest Neighbours (KNN) [ https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm ] Naive Bayes Classifier [ https://en.wikipedia.org/wiki/Naive_Bayes_classifier ] Multilayer Neural Network [ https://en.wikipedia.org/wiki/Multilayer_perceptron ] Lets start with default model parameters for each classifier. Check the link above each block for function definition Scikit KNN http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html End of explanation """ # Naive Bayes nb = GaussianNB() nb.fit(train_data,train_labels) """ Explanation: Scikit Naive Bayes http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html End of explanation """ # MLP mlp = MLPClassifier(solver='sgd',hidden_layer_sizes=(5, 1)) mlp.fit(train_data,train_labels) """ Explanation: Scikit MLP https://en.wikipedia.org/wiki/Multilayer_perceptron End of explanation """ # Pretty function to test a model and print accuracy score def evaluate(model,modelname,test_data,test_labels): predictions = model.predict(test_data) # Do the actual prediction print('====================================================') print('Classification Report for %s'%modelname) print('===================================================') print(classification_report(test_labels,predictions,target_names=['Non Pulsar','Pulsar'])) print('\n The model is %f accurate' %(accuracy_score(test_labels,predictions)*100)) print('====================================================\n\n') # Making some stuff easy models =[knn,nb,mlp] model_names =['KNN','Naive Bayes','Neural Network'] """ Explanation: Fancy function to print results for model evaluation End of explanation """ for i in range(0,3): evaluate(models[i],model_names[i],test_data,test_labels) """ Explanation: Now Lets test each classifier and disply their accuracy End of explanation """
max-ionov/rucoref
notebooks/first-mention.ipynb
lgpl-3.0
%cd '/Users/max/Projects/Coreference/' %cd 'rucoref' from anaphoralib.corpora import rueval from anaphoralib.tagsets import multeast from anaphoralib.experiments.base import BaseClassifier from anaphoralib import utils from anaphoralib.experiments import utils as exp_utils %cd '..' from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from imblearn.over_sampling import BorderlineSMOTE import numpy as np %matplotlib inline lists_dir = 'CICLing-2016/wordlists' texts_dir = 'Corpus-2015/Tokens.txt' gs_dir = 'Corpus-2015/Groups.txt' tagset = multeast random_state = 42 """ Explanation: Experiment for the paper "Features for discourse-new referent detection in Russian Replication of CICLing-2016 paper (Toldova and Ionov 2016) To reproduce this experiment you will need: 1. RuCor corpus (from 2015-10-29) 2. Python modules: * scikit-learn (v. 0.22.1) * imbalanced-learn (v. 0.6.2) * matplotlib (v. 3.1.3) 2. anaphoralib Python module Since anaphoralib is in an early stage of development, there is no way to install it yet, so in order to import it, you should cd to the folder with the module. Paths to the corpus should be updated accordingly. End of explanation """ rucoref = rueval.RuCorefCorpus(multeast, rueval) exp_utils.load_corpus(rucoref, texts_dir, gs_dir) rucoref.groups[0][:30] rucoref.print_stats() rucoref.create_indices() """ Explanation: Reading the texts from GS and matching them to actual texts Loading chains and GS End of explanation """ def load_list(filename): data = set() with open(filename, encoding='utf-8') as inp_file: for line in inp_file: data.add(line.strip('\r\n')) return data import os wordlists = {} for filename in os.listdir(lists_dir): wordlists[filename.replace('.txt', '')] = load_list(os.path.join(lists_dir, filename)) print(wordlists.keys()) """ Explanation: Loading special lists Special lists load from the directory stored in lists_dir End of explanation """ import collections word_index = [] group_index = [] for i, text in enumerate(rucoref.texts): word_index.append(collections.defaultdict(set)) group_index.append(collections.defaultdict(set)) for word in text: word_index[-1]['_'.join(word.lemma)].add(word.offset) for group in rucoref.groups[i]: for g in group.iter_groups(): group_index[-1]['_'.join(g.lemma)].add(g.offset) print('\n'.join(list(group_index[0].keys())[:30])) """ Explanation: Building indices and dictionaries Building additional indices (of all words and all groups): End of explanation """ adjectives = set() for text in rucoref.texts: for word in text: if tagset.pos_filters['adj'](word) and (len(word.tag) < 7 or word.tag[6] == 'f'): adjectives.add('_'.join(word.lemma)) adjectives = list(adjectives) adjectives pronouns = set() for text in rucoref.texts: for word in text: if tagset.pos_filters['pronoun'](word): pronouns.add('_'.join(word.lemma)) pronouns = list(pronouns) pronouns """ Explanation: Building sets of adjectives and pronouns for feature selection: End of explanation """ import re class FirstMentionClassifier(BaseClassifier): def __init__(self): super(FirstMentionClassifier, self).__init__() self.feat_zones_ = ('struct', 'string', 'lists') self.stats = {'str_matches_before', 'head_matches_before', 'n_adj', 'len_np'} self.rx_lat = re.compile('[A-Za-z]') self.pronouns = {u"его", u"ее", u"её", u"ей", u"ему", u"ею", u"им", u"ими", u"их", u"которая", u"которого", u"которое", u"которой", u"котором", u"которому", u"которую", u"которые", u"который", u"которым", u"которыми", u"которых", u"него", u"нее", u"неё", u"ней", u"нем", u"нём", u"нему", u"нею", u"ним", u"ними", u"них", u"он", u"она", u"они", u"оно", u"свое", u"своё", u"своего", u"своей", u"своем", u"своём", u"своему", u"своею", u"свой", u"свои", u"своим", u"своими", u"своих", u"свою", u"своя", u"себе", u"себя", u"собой", u"собою"} self.clear_stats() def get_feature_vector(self, corpus, group, i_text, save_feature_names=False): if save_feature_names: self.feature_names_ = [] vctr = [] group_lemma = '_'.join(group.lemma) group_occurrences = group_index[i_text][group_lemma] if group_lemma in group_index[i_text] else [] head_index = group.head head_lemma = group.lemma[group.head] head_occurrences = word_index[i_text][head_lemma] if head_lemma in word_index[i_text] else [] head_offset = group.head_offset group_words = group.words if group.type != 'word' else [group] str_matches_before = sum(1 for occ in group_occurrences if occ < group.offset) head_matches_before = sum(1 for occ in head_occurrences if occ < group.offset) adj_in_group = [word for word in group_words[:head_index+1] if tagset.pos_filters['adj'](word)] self.stats['str_matches_before'].append(str_matches_before) self.stats['head_matches_before'].append(head_matches_before) self.stats['n_adj'].append("{}: {}".format(len(adj_in_group), group_lemma)) self.stats['len_np'].append("{}: {}".format(len(group_words), group_lemma)) if 'string' in self.feat_zones_: vctr.append(('str_match_before=0', str_matches_before == 0)) vctr.append(('str_match_before<2', str_matches_before < 2)) vctr.append(('str_match_before<3', str_matches_before < 3)) vctr.append(('str_match_before>2', str_matches_before > 2)) vctr.append(('head_match_before=0', head_matches_before == 0)) vctr.append(('head_match_before<2', head_matches_before < 2)) vctr.append(('head_match_before<3', head_matches_before < 3)) vctr.append(('head_match_before>2', head_matches_before > 2)) vctr.append(('uppercase', all(word.isupper() and len(word) > 1 for word in group.wordform))) #vctr.append(('capitalized', any(word[0].isupper() and len(group.wordform) > 1 for word in group.wordform[1:]))) vctr.append(('latin', any(self.rx_lat.search(word) for word in group.wordform))) vctr.append(('is_proper_noun', corpus.tagset.pos_filters['properNoun'](group))) #vctr.append(('is_pronoun', group.lemma[0] in pronouns)) vctr.append(('is_pronoun', group.wordform[0] in self.pronouns)) #vctr.append(('is_pronoun', multeast.pos_filters['pronoun'](group) or group.wordform[0] in pronouns)) self.n_pronouns += 1 if 'struct' in self.feat_zones_: i_word = corpus.words_index[i_text][group.offset] left_word = corpus.texts[i_text][i_word - 1] if i_word > 0 else None right_word = corpus.texts[i_text][i_word + len(group.wordform) + 1] \ if i_word + len(group.wordform) + 1 < len(corpus.texts[i_text]) else None vctr.append(('conj', bool((left_word and corpus.tagset.pos_filters['conj'](left_word)) or (right_word and corpus.tagset.pos_filters['conj'](right_word))))) vctr.append(('len_np<2', len(group.tags) < 2)) vctr.append(('len_np>2', len(group.tags) > 2)) vctr.append(('n_adj=0', len(adj_in_group) == 0)) vctr.append(('n_adj>1', len(adj_in_group) > 1)) vctr.append(('n_adj>2', len(adj_in_group) > 2)) if 'lists' in self.feat_zones_: for l in wordlists: vctr.append(('in_list_{}'.format(l), any(lemma in wordlists[l] for lemma in group.lemma[:head_index+1]))) if save_feature_names: self.feature_names_ = [feat[0] for feat in vctr] return [int(feat[1]) for feat in vctr] def prepare_data(self, corpus, random_state=42, test_size=0.3, feature_zones=None): if feature_zones: self.feat_zones_ = feature_zones self.n_pronouns = 0 self.stats['class'] = [] self.groups = [] self.x_data = [] self.y_data = [] self.cur_data_ = 'Binary, filtered singletons' self.class_names_ = ('non-first', 'first') save_features = True for i_text, text in enumerate(corpus.texts): for i, mention in enumerate(corpus.mentions[i_text]): if i not in rucoref.gs_index[i_text]: continue cur_gs_group_id = corpus.gs_index[i_text][i] cur_chain = corpus.gs[i_text]['chains'][corpus.chains_index[i_text][cur_gs_group_id]] self.y_data.append(self.class_names_.index('first') if cur_gs_group_id == cur_chain[0] else self.class_names_.index('non-first')) group = corpus.heads_index[i_text][mention.offset] self.x_data.append(self.get_feature_vector(corpus, group, i_text, save_features)) self.groups.append(group) self.stats['class'].append(self.class_names_[self.y_data[-1]]) save_features = False pronoun_index = self.feature_names_.index('is_pronoun') if self.x_data[-1][pronoun_index]: self.x_data.pop() self.y_data.pop() self.groups.pop() for key in self.stats: self.stats[key].pop() continue del self.x_data[-1][pronoun_index] super(FirstMentionClassifier, self).prepare_data(corpus, random_state, test_size) del self.feature_names_[pronoun_index] class_numbers = [sum(1 for item in self.y_data if item == cur_class) for cur_class in range(len(self.class_names_))] self.ratio = float(min(class_numbers) / float(max(class_numbers))) """ Explanation: Creating a classifier End of explanation """ first_mention_clf = FirstMentionClassifier() first_mention_clf.prepare_data(rucoref, random_state=random_state, test_size=0.3) first_mention_clf.stats.keys() """ Explanation: Training and testing End of explanation """ def baseline_predict(data): y_pred = np.zeros(len(data)) for i, row in enumerate(data): y_pred[i] = row[0] == 1 return y_pred first_mention_clf.test(y_pred=baseline_predict(first_mention_clf.x_data_test), test_name='baseline') """ Explanation: Baseline Baseline condition: NP is a first mention if there is no such exact string in the text before End of explanation """ first_mention_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string',)) clf = RandomForestClassifier(n_estimators=500, random_state=random_state) sampler = BorderlineSMOTE(sampling_strategy=first_mention_clf.ratio, kind='borderline-1', random_state=random_state) first_mention_clf.fit(clf, sampler) first_mention_clf.test(test_name='string features') first_mention_clf.print_stats() """ Explanation: String features End of explanation """ first_mention_clf = FirstMentionClassifier() first_mention_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct')) clf = RandomForestClassifier(n_estimators=500, random_state=random_state) sampler = BorderlineSMOTE(sampling_strategy=first_mention_clf.ratio, kind='borderline-1', random_state=random_state) first_mention_clf.fit(clf, sampler) first_mention_clf.test(test_name='string+struct features') """ Explanation: String + Struct features End of explanation """ first_mention_clf = FirstMentionClassifier() first_mention_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists')) clf = RandomForestClassifier(n_estimators=500, random_state=random_state) sampler = BorderlineSMOTE(sampling_strategy=first_mention_clf.ratio, kind='borderline-1', random_state=random_state) first_mention_clf.fit(clf, sampler) first_mention_clf.test(test_name='all features') """ Explanation: All features End of explanation """ first_mention_clf = FirstMentionClassifier() first_mention_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists')) regr = LogisticRegression(random_state=random_state, max_iter=250) sampler = BorderlineSMOTE(sampling_strategy=first_mention_clf.ratio, kind='borderline-1', random_state=random_state) first_mention_clf.fit(regr, sampler) for i, feat_name in enumerate(first_mention_clf.feature_names_): print('{}: {:.4f}'.format(feat_name, regr.coef_[0,i])) """ Explanation: Calculating feature importances End of explanation """ import sklearn.feature_extraction.text adj_vectorizer = sklearn.feature_extraction.text.CountVectorizer(vocabulary=adjectives) pron_vectorizer = sklearn.feature_extraction.text.CountVectorizer(vocabulary=pronouns) def additional_features(data, vectorizer): additional_features = np.zeros(shape=(len(data), len(vectorizer.vocabulary))) for i, row in enumerate(data): additional_features[i,:] = vectorizer.transform([u' '.join(row.lemma)]).toarray() return additional_features from sklearn.preprocessing import MinMaxScaler def rank_to_dict(ranks, names, order=1): minmax = MinMaxScaler() ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0] ranks = map(lambda x: round(x, 4), ranks) return dict(zip(names, ranks )) add_data_x = additional_features(first_mention_clf.groups_train, adj_vectorizer) adj_clf = RandomForestClassifier(random_state=random_state) adj_clf.fit(add_data_x, first_mention_clf.y_data_train) ranks = rank_to_dict(adj_clf.feature_importances_, adj_vectorizer.vocabulary) for feat_name in sorted(ranks, key=lambda f: ranks[f], reverse=True): print(feat_name, ranks[feat_name]) """ Explanation: Additional actions Counting feature importances for bag-of-adjectives classifier End of explanation """ %matplotlib inline import matplotlib.pyplot as plt import matplotlib import seaborn as sns import anaphoralib.experiments.utils first_mention_clf = FirstMentionClassifier() first_mention_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists')) feature_distributions = {} for feat_name in first_mention_clf.stats: feature_distributions[feat_name] = {cls: [] for cls in first_mention_clf.class_names_ + ('total',)} for i, elem in enumerate(first_mention_clf.stats['class']): feature_distributions[feat_name][elem].append(first_mention_clf.stats[feat_name][i]) feature_distributions[feat_name]['total'].append(first_mention_clf.stats[feat_name][i]) import os anaphoralib.experiments.utils.latexify(columns=2) for feat_name in feature_distributions: if feat_name == 'class': continue anaphoralib.experiments.utils.plot_feature_distribution(feature_distributions[feat_name], range(7), first_mention_clf.class_names_, x_label=feat_name.replace('_', '\\_'), filename=os.path.join('CICLing-2016', feat_name)) from sklearn.model_selection import learning_curve from sklearn.metrics import make_scorer, f1_score from sklearn.utils import shuffle first_mention_clf = FirstMentionClassifier() first_mention_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists')) clf = RandomForestClassifier(n_estimators=500, random_state=random_state) shuffled_x_data, shuffled_y_data = shuffle(first_mention_clf.x_data, first_mention_clf.y_data, random_state=random_state) train_sizes_abs, train_scores, test_scores = learning_curve(clf, shuffled_x_data, shuffled_y_data, cv=3, scoring=make_scorer(f1_score, pos_label=1)) anaphoralib.experiments.utils.latexify(columns=2) anaphoralib.experiments.utils.plot_learning_curve(train_sizes_abs, train_scores, test_scores, score_name='f1', filename=os.path.join('CICLing-2016', 'learning_curve_plot')) """ Explanation: Getting feature distributions End of explanation """
enbanuel/phys202-2015-work
assignments/assignment04/MatplotlibExercises.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np """ Explanation: Visualization 1: Matplotlib Basics Exercises End of explanation """ x = np.random.rand(100) y = np.random.rand(100) plt.scatter(x, y, color='orange', s=70, alpha=0.8) plt.xlabel('X') plt.ylabel('Y') plt.title('X v. Y Scatter') """ Explanation: Scatter plots Learn how to use Matplotlib's plt.scatter function to make a 2d scatter plot. Generate random data using np.random.randn. Style the markers (color, size, shape, alpha) appropriately. Include an x and y label and title. End of explanation """ x = np.random.rand(100) plt.hist(x, bins=25, color='green') plt.xlabel('X') plt.ylabel('Y(X)') plt.title('Random Histogram') """ Explanation: Histogram Learn how to use Matplotlib's plt.hist function to make a 1d histogram. Generate randpom data using np.random.randn. Figure out how to set the number of histogram bins and other style options. Include an x and y label and title. End of explanation """
Kaggle/learntools
notebooks/sql/raw/tut4.ipynb
apache-2.0
#$HIDE_INPUT$ from google.cloud import bigquery # Create a "Client" object client = bigquery.Client() # Construct a reference to the "nhtsa_traffic_fatalities" dataset dataset_ref = client.dataset("nhtsa_traffic_fatalities", project="bigquery-public-data") # API request - fetch the dataset dataset = client.get_dataset(dataset_ref) # Construct a reference to the "accident_2015" table table_ref = dataset_ref.table("accident_2015") # API request - fetch the table table = client.get_table(table_ref) # Preview the first five lines of the "accident_2015" table client.list_rows(table, max_results=5).to_dataframe() """ Explanation: Introduction So far, you've learned how to use several SQL clauses. For instance, you know how to use SELECT to pull specific columns from a table, along with WHERE to pull rows that meet specified criteria. You also know how to use aggregate functions like COUNT(), along with GROUP BY to treat multiple rows as a single group. Now you'll learn how to change the order of your results using the ORDER BY clause, and you'll explore a popular use case by applying ordering to dates. To illustrate what you'll learn in this tutorial, we'll work with a slightly modified version of our familiar pets table. ORDER BY ORDER BY is usually the last clause in your query, and it sorts the results returned by the rest of your query. Notice that the rows are not ordered by the ID column. We can quickly remedy this with the query below. The ORDER BY clause also works for columns containing text, where the results show up in alphabetical order. You can reverse the order using the DESC argument (short for 'descending'). The next query sorts the table by the Animal column, where the values that are last in alphabetic order are returned first. Dates Next, we'll talk about dates, because they come up very frequently in real-world databases. There are two ways that dates can be stored in BigQuery: as a DATE or as a DATETIME. The DATE format has the year first, then the month, and then the day. It looks like this: YYYY-[M]M-[D]D YYYY: Four-digit year [M]M: One or two digit month [D]D: One or two digit day So 2019-01-10 is interpreted as January 10, 2019. The DATETIME format is like the date format ... but with time added at the end. EXTRACT Often you'll want to look at part of a date, like the year or the day. You can do this with EXTRACT. We'll illustrate this with a slightly different table, called pets_with_date. The query below returns two columns, where column Day contains the day corresponding to each entry the Date column from the pets_with_date table: SQL is very smart about dates, and we can ask for information beyond just extracting part of the cell. For example, this query returns one column with just the week in the year (between 1 and 53) for each date in the Date column: You can find all the functions you can use with dates in BigQuery in this documentation under "Date and time functions". Example: Which day of the week has the most fatal motor accidents? Let's use the US Traffic Fatality Records database, which contains information on traffic accidents in the US where at least one person died. We'll investigate the accident_2015 table. Here is a view of the first few rows. (We have hidden the corresponding code. To take a peek, click on the "Code" button below.) End of explanation """ # Query to find out the number of accidents for each day of the week query = """ SELECT COUNT(consecutive_number) AS num_accidents, EXTRACT(DAYOFWEEK FROM timestamp_of_crash) AS day_of_week FROM `bigquery-public-data.nhtsa_traffic_fatalities.accident_2015` GROUP BY day_of_week ORDER BY num_accidents DESC """ """ Explanation: Let's use the table to determine how the number of accidents varies with the day of the week. Since: - the consecutive_number column contains a unique ID for each accident, and - the timestamp_of_crash column contains the date of the accident in DATETIME format, we can: - EXTRACT the day of the week (as day_of_week in the query below) from the timestamp_of_crash column, and - GROUP BY the day of the week, before we COUNT the consecutive_number column to determine the number of accidents for each day of the week. Then we sort the table with an ORDER BY clause, so the days with the most accidents are returned first. End of explanation """ # Set up the query (cancel the query if it would use too much of # your quota, with the limit set to 1 GB) safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**9) query_job = client.query(query, job_config=safe_config) # API request - run the query, and convert the results to a pandas DataFrame accidents_by_day = query_job.to_dataframe() # Print the DataFrame accidents_by_day """ Explanation: As usual, we run it as follows: End of explanation """
GoogleCloudPlatform/python-docs-samples
notebooks/tutorials/cloud-ml-engine/Training and prediction with scikit-learn.ipynb
apache-2.0
!gcloud services enable ml.googleapis.com !gcloud services enable compute.googleapis.com """ Explanation: Training and prediction with scikit-learn This notebook demonstrates how to use AI Platform to train a simple classification model using scikit-learn, and then deploy the model to get predictions. You train the model to predict a person's income level based on the Census Income data set. Before you jump in, let’s cover some of the different tools you’ll be using: AI Platform is a managed service that enables you to easily build machine learning models that work on any type of data, of any size. Cloud Storage is a unified object storage for developers and enterprises, from live data serving to data analytics/ML to data archiving. Cloud SDK is a command line tool which allows you to interact with Google Cloud products. This notebook introduces several gcloud and gsutil commands, which are part of the Cloud SDK. Note that shell commands in a notebook must be prepended with a !. Set up your environment Enable the required APIs In order to use AI Platform, confirm that the required APIs are enabled: End of explanation """ BUCKET_NAME = 'your-new-bucket' """ Explanation: Create a storage bucket Buckets are the basic containers that hold your data. Everything that you store in Cloud Storage must be contained in a bucket. You can use buckets to organize your data and control access to your data. Start by defining a globally unique name. For more information about naming buckets, see Bucket name requirements. End of explanation """ !gsutil mb gs://$BUCKET_NAME/ """ Explanation: In the examples below, the BUCKET_NAME variable is referenced in the commands using $. Create the new bucket with the gsutil mb command: End of explanation """ !mkdir census_training """ Explanation: About the data The Census Income Data Set that this sample uses for training is provided by the UC Irvine Machine Learning Repository. Census data courtesy of: Lichman, M. (2013). UCI Machine Learning Repository http://archive.ics.uci.edu/ml. Irvine, CA: University of California, School of Information and Computer Science. This dataset is publicly available for anyone to use under the following terms provided by the Dataset Source - http://archive.ics.uci.edu/ml - and is provided "AS IS" without any warranty, express or implied, from Google. Google disclaims all liability for any damages, direct or indirect, resulting from the use of the dataset. The data used in this tutorial is located in a public Cloud Storage bucket: gs://cloud-samples-data/ml-engine/sklearn/census_data/ The training file is adult.data (download) and the evaluation file is adult.test (download). The evaluation file is not used in this tutorial. Create training application package The easiest (and recommended) way to create a training application package is to use gcloud to package and upload the application when you submit your training job. This method allows you to create a very simple file structure with only two files. For this tutorial, the file structure of your training application package should appear similar to the following: census_training/ __init__.py train.py Create a directory locally: End of explanation """ !touch ./census_training/__init__.py """ Explanation: Create a blank file named __init__.py: End of explanation """ %%writefile ./census_training/train.py import argparse import pickle import pandas as pd from google.cloud import storage from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelBinarizer parser = argparse.ArgumentParser() parser.add_argument("--bucket-name", help="The bucket name", required=True) arguments, unknown = parser.parse_known_args() bucket_name = arguments.bucket_name # Define the format of your input data, including unused columns. # These are the columns from the census data files. COLUMNS = ( 'age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income-level' ) # Categorical columns are columns that need to be turned into a numerical value # to be used by scikit-learn CATEGORICAL_COLUMNS = ( 'workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country' ) # Create a Cloud Storage client to download the census data storage_client = storage.Client() # Download the data public_bucket = storage_client.bucket('cloud-samples-data') blob = public_bucket.blob('ml-engine/sklearn/census_data/adult.data') blob.download_to_filename('adult.data') # Load the training census dataset with open("./adult.data", "r") as train_data: raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS) # Removing the whitespaces in categorical features for col in CATEGORICAL_COLUMNS: raw_training_data[col] = raw_training_data[col].apply(lambda x: str(x).strip()) # Remove the column we are trying to predict ('income-level') from our features # list and convert the DataFrame to a lists of lists train_features = raw_training_data.drop("income-level", axis=1).values.tolist() # Create our training labels list, convert the DataFrame to a lists of lists train_labels = (raw_training_data["income-level"] == " >50K").values.tolist() # Since the census data set has categorical features, we need to convert # them to numerical values. We'll use a list of pipelines to convert each # categorical column and then use FeatureUnion to combine them before calling # the RandomForestClassifier. categorical_pipelines = [] # Each categorical column needs to be extracted individually and converted to a # numerical value. To do this, each categorical column will use a pipeline that # extracts one feature column via SelectKBest(k=1) and a LabelBinarizer() to # convert the categorical value to a numerical one. A scores array (created # below) will select and extract the feature column. The scores array is # created by iterating over the columns and checking if it is a # categorical column. for i, col in enumerate(COLUMNS[:-1]): if col in CATEGORICAL_COLUMNS: # Create a scores array to get the individual categorical column. # Example: # data = [ # 39, 'State-gov', 77516, 'Bachelors', 13, 'Never-married', # 'Adm-clerical', 'Not-in-family', 'White', 'Male', 2174, 0, # 40, 'United-States' # ] # scores = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # # Returns: [['State-gov']] # Build the scores array scores = [0] * len(COLUMNS[:-1]) # This column is the categorical column we want to extract. scores[i] = 1 skb = SelectKBest(k=1) skb.scores_ = scores # Convert the categorical column to a numerical value lbn = LabelBinarizer() r = skb.transform(train_features) lbn.fit(r) # Create the pipeline to extract the categorical feature categorical_pipelines.append( ( 'categorical-{}'.format(i), Pipeline([ ('SKB-{}'.format(i), skb), ('LBN-{}'.format(i), lbn)]) ) ) # Create pipeline to extract the numerical features skb = SelectKBest(k=6) # From COLUMNS use the features that are numerical skb.scores_ = [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0] categorical_pipelines.append(("numerical", skb)) # Combine all the features using FeatureUnion preprocess = FeatureUnion(categorical_pipelines) # Create the classifier classifier = RandomForestClassifier() # Transform the features and fit them to the classifier classifier.fit(preprocess.transform(train_features), train_labels) # Create the overall model as a single pipeline pipeline = Pipeline([("union", preprocess), ("classifier", classifier)]) # Create the model file # It is required to name the model file "model.pkl" if you are using pickle model_filename = "model.pkl" with open(model_filename, "wb") as model_file: pickle.dump(pipeline, model_file) # Upload the model to Cloud Storage bucket = storage_client.bucket(bucket_name) blob = bucket.blob(model_filename) blob.upload_from_filename(model_filename) """ Explanation: Save training code in one Python file in the census_training directory. The following cell writes a training file to the census_training directory. The training file performs the following operations: + Loads the data into a pandas DataFrame that can be used by scikit-learn + Fits the model is against the training data + Exports the model with the Python pickle library The following model training code is not executed within this notebook. Instead, it is saved to a Python file and packaged as a Python module that runs on AI Platform after you submit the training job. End of explanation """ import time # Define a timestamped job name JOB_NAME = "census_training_{}".format(int(time.time())) # Submit the training job: !gcloud ai-platform jobs submit training $JOB_NAME \ --job-dir gs://$BUCKET_NAME/census_job_dir \ --package-path ./census_training \ --module-name census_training.train \ --region us-central1 \ --runtime-version=1.12 \ --python-version=3.5 \ --scale-tier BASIC \ --stream-logs \ -- \ --bucket-name $BUCKET_NAME """ Explanation: Submit the training job In this section, you use gcloud ai-platform jobs submit training to submit your training job. The -- argument passed to the command is a separator; anything after the separator will be passed to the Python code as input arguments. For more information about the arguments preceeding the separator, run the following: gcloud ai-platform jobs submit training --help The argument given to the python script is --bucket-name. The --bucket-name argument is used to specify the name of the bucket to save the model file. End of explanation """ !gsutil ls gs://$BUCKET_NAME/ """ Explanation: Verify model file in Cloud Storage View the contents of the destination model directory to verify that your model file has been uploaded to Cloud Storage. Note: The model can take a few minutes to train and show up in Cloud Storage. End of explanation """ MODEL_NAME = "CensusPredictor" VERSION_NAME = "census_predictor_{}".format(int(time.time())) """ Explanation: Serve the model Once the model is successfully created and trained, you can serve it. A model can have different versions. In order to serve the model, create a model and version in AI Platform. Define the model and version names: End of explanation """ !gcloud ai-platform models create $MODEL_NAME --regions us-central1 """ Explanation: Create the model in AI Platform: End of explanation """ !gcloud ai-platform versions create $VERSION_NAME \ --model=$MODEL_NAME \ --framework=scikit-learn \ --origin=gs://$BUCKET_NAME/ \ --python-version=3.5 \ --runtime-version=1.12 """ Explanation: Create a version that points to your model file in Cloud Storage: End of explanation """ # Define a name for the input file INPUT_FILE = "./census_training/input.json" %%writefile $INPUT_FILE [25, "Private", 226802, "11th", 7, "Never-married", "Machine-op-inspct", "Own-child", "Black", "Male", 0, 0, 40, "United-States"] [38, "Private", 89814, "HS-grad", 9, "Married-civ-spouse", "Farming-fishing", "Husband", "White", "Male", 0, 0, 50, "United-States"] [28, "Local-gov", 336951, "Assoc-acdm", 12, "Married-civ-spouse", "Protective-serv", "Husband", "White", "Male", 0, 0, 40, "United-States"] [44, "Private", 160323, "Some-college", 10, "Married-civ-spouse", "Machine-op-inspct", "Husband", "Black", "Male", 7688, 0, 40, "United-States"] [18, "?", 103497, "Some-college", 10, "Never-married", "?", "Own-child", "White", "Female", 0, 0, 30, "United-States"] [34, "Private", 198693, "10th", 6, "Never-married", "Other-service", "Not-in-family", "White", "Male", 0, 0, 30, "United-States"] [29, "?", 227026, "HS-grad", 9, "Never-married", "?", "Unmarried", "Black", "Male", 0, 0, 40, "United-States"] [63, "Self-emp-not-inc", 104626, "Prof-school", 15, "Married-civ-spouse", "Prof-specialty", "Husband", "White", "Male", 3103, 0, 32, "United-States"] [24, "Private", 369667, "Some-college", 10, "Never-married", "Other-service", "Unmarried", "White", "Female", 0, 0, 40, "United-States"] [55, "Private", 104996, "7th-8th", 4, "Married-civ-spouse", "Craft-repair", "Husband", "White", "Male", 0, 0, 10, "United-States"] """ Explanation: Make predictions Format data for prediction Before you send an online prediction request, you must format your test data to prepare it for use by the AI Platform prediction service. Make sure that the format of your input instances matches what your model expects. Create an input.json file with each input instance on a separate line. The following example uses ten data instances. Note that the format of input instances needs to match what your model expects. In this example, the Census model requires 14 features, so your input must be a matrix of shape (num_instances, 14). End of explanation """ !gcloud ai-platform predict --model $MODEL_NAME --version \ $VERSION_NAME --json-instances $INPUT_FILE """ Explanation: Send the online prediction request The prediction results return True if the person's income is predicted to be greater than $50,000 per year, and False otherwise. The output of the command below may appear similar to the following: [False, False, False, True, False, False, False, False, False, False] End of explanation """ # Delete the model version !gcloud ai-platform versions delete $VERSION_NAME --model=$MODEL_NAME --quiet # Delete the model !gcloud ai-platform models delete $MODEL_NAME --quiet # Delete the bucket and contents !gsutil rm -r gs://$BUCKET_NAME # Delete the local files created by the tutorial !rm -rf census_training """ Explanation: Clean up To delete all resources you created in this tutorial, run the following commands: End of explanation """
lionell/university-labs
num_methods/first/lab1.ipynb
mit
EPS = sp.Rational("1e-3") x = sp.Symbol("x") """ Explanation: Лабораторна робота №1 <img src="http://civil.engr.siu.edu/cheval/engr351/Images/ENGR351.jpg" width="500px" height="300px" > Умова задачі Задано функцію $f(x)$, потрібно знайти корінь цієї функції, тобто хоча б одне значення параметру $x=x_0$, при якому $f(x_0)=0$. Якщо такого значення не існує повернути $null$. Розглянемо три різні методи розвязку даної задачі: Метод дихотомії Метод Нютона Метод простої ітерації Кожен з цих методів має свої недоліки і переваги, тому немає однозначно найкращого методу для розвязання цїєї задачі. Для початку введемо декілька загальнопринятих позначень: $\epsilon$ та $x$ як символи бібліотеки SymPy End of explanation """ fun = x * x * x - 2 * x plot(fun, (x, -2, 2)) """ Explanation: Визначимо функцію $fun$, для якої ми збираємося шукати корінь End of explanation """ der = sp.diff(fun, x) plot(der, (x, -2, 2)) """ Explanation: Та її похідну $der$, що необхідна для коректної роботи деяких методів End of explanation """ def dih(a, b, f=fun, eps=EPS): print("[{}; {}]".format(a, b)) if f.subs(x, a) * f.subs(x, b) > 0: return None if a > b: a, b = b, a if (b - a).evalf() <= EPS / sp.Integer(2): return a m = a + (b - a) / sp.Integer(2) if f.subs(x, a) * f.subs(x, m) <= 0: return dih(a, m, f, eps) else: return dih(m, b, f, eps) res = dih(a=-5, b=sp.Rational('-0.1')) "Result {}".format(sp.N(res)) """ Explanation: Метод дихотомії Метод полягає у зменшені відрузку що розглядається вдвічі на кожній ітерації. Необхідна умова для застосування цього метода $f(a) \cdot f(b) <= 0$ Алгоритм Покладемо $l = a, r = b$, тоді виконується інваріант $f(l) \cdot f(r) <=0$. Покажемо що він зберігається на кожній ітерації. На кожній ітерації циклу вибирається точка $m = \large\frac{l + r}{2}$, і перевіряється умова $f(a) \cdot f(m) <= 0$. Якщо вона виконується, тоді корінь знаходиться на проміжку $[a; m]$, інакше корінь треба шукати на проміжку $[m; b]$. Рекурсивно виконуємо функцію пошуку для одного з вище вказаних проміжків. End of explanation """ def newton(x0, f=fun, d=der, eps=EPS): x1 = x0 - f.subs(x, x0) / d.subs(x, x0) print(x1) while sp.Abs(x1 - x0).evalf() > EPS / sp.Integer(2): x0, x1 = x1, x1 - f.subs(x, x1) / d.subs(x, x1) print(x1) return x1 res = newton(x0=sp.Rational("0.7")) "Result {}".format(sp.N(res, 10)) """ Explanation: Метод Нютона Метод полягає в End of explanation """ alpha = sp.Symbol("alpha") h = x - fun * alpha h def simple(x0, alpha, f=fun, eps=EPS): h = x - alpha * f x1 = h.subs(x, x0) print("[{}; {}]".format(x0, x1)) while abs(x1 - x0) > EPS / sp.Integer(2): x0, x1 = x1, h.subs(x, x1) print("[{}; {}]".format(x0, x1)) return x1 res = simple(x0=-3, alpha=1/10) "Result {}".format(sp.N(res, 10)) """ Explanation: Метод простої ітерації End of explanation """
kit-cel/wt
wt/vorlesung/ch7_9/random_walk.ipynb
gpl-2.0
# importing import numpy as np import matplotlib.pyplot as plt import matplotlib # showing figures inline %matplotlib inline # plotting options font = {'size' : 20} plt.rc('font', **font) plt.rc('text', usetex=True) matplotlib.rc('figure', figsize=(18, 6) ) """ Explanation: Content and Objective Realizations of a random walk are generated Histogram of values at a given time is shown (thereby demonstrating the central limit theorem) Acf is determined to show that the process is not stationary Import End of explanation """ # Function for Generating Poisson Processes def get_Random_Walk( N ): """ Function generating a Random Walk by adding-up changes distributed uniformly on {+1,-1} IN: N, number of steps/changes to be simulated OUT: X, random walk """ # sample steps uniformly out of { -1, +1 } steps = - 1 + 2 * np.random.randint( 2, size = N - 1 ) # alternative solution #steps = np.random.choice( [-1, 1], size = N - 1 ) # process by summing up increments X = np.append( 0, np.cumsum( steps ) ) return X """ Explanation: Function for Random Walks End of explanation """ # increase N_real if multiple realization should be plotted N_steps = 50 N_trials = 1000 # initialize array for storing multiple processes X_random_walk = np.zeros( ( N_trials, N_steps + 1 ) ) # loop for realizations for n in range( N_trials ): # get realizations of Poisson process X_random_walk[ n, : ] = get_Random_Walk( N_steps + 1) # plotting for n in np.arange( N_trials ): plt.plot( X_random_walk[ n, :], linewidth = 2.0 ) plt.grid( True ) plt.xlabel('$n$') plt.ylabel('$S_n$') plt.margins(.1) # extract end-points results_end = X_random_walk[ : , -1 ] # get histogram num_bins = 2 * N_steps + 1 # ----> EXERCISE: Can you justify the choice of num_bins?! width = 2 bins = np.linspace( -N_steps, N_steps, num_bins, endpoint=True) r_hist = np.histogram( results_end, bins = bins, density = True ) plt.barh( r_hist[1][:-1], 0 + r_hist[0] / np.sum(r_hist[0]) * N_trials / 20 , width, left=N_steps+0.1, color = '#ff7f0e' ) """ Explanation: Showing a Bunch of Realizations for a Random Walk End of explanation """ # sample time and tau vector t = np.arange( 0, N_steps ) tau_acf = np.arange( - N_steps, N_steps ) tau_acf_pos = np.arange( 0, N_steps ) # initialize array for storing multiple processes X_random_walk = np.zeros( ( N_trials, 3 * N_trials ) ) # loop for realizations for n in range( N_trials ): # get realizations of random walk # NOTE: zeros are padded in order to avoid border effects X_random_walk[ n, : len(t) ] = get_Random_Walk( N_steps ) # initialize empty two-dim array (t and tau) acf_2d = np.zeros( ( len(t), len(tau_acf) ) ) # loop for all times for ind_t, val_t in enumerate( t ): # loop for all delays for ind_tau, val_tau in enumerate( tau_acf_pos ): # get acf at according index/time/delay corr = [ X_random_walk[ _n, ind_t + ind_tau ] * X_random_walk[ _n, ind_t ] for _n in range( N_trials ) ] # assign acf # NOTE: Negative shifts are given by acf( -tau ) = acf( tau ) for real-valued processes acf_2d[ ind_t, + ind_tau + len(t) ] = np.sum( corr ) / N_trials acf_2d[ ind_t, - ind_tau + len(t) ] = np.sum( corr ) / N_trials # parameters for meshing T, Tau_acf = np.meshgrid( tau_acf, t ) # plotting plt.contourf( T, Tau_acf , acf_2d[ : , : ] ) plt.xlabel('$\ell$') plt.ylabel('$k$') plt.title('$\\varphi_{XX}(k,\ell)$') plt.colorbar(); """ Explanation: Determining ACF of the Random Walk End of explanation """
tensorflow/docs
site/en/tutorials/text/image_captioning.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 The TensorFlow Authors. End of explanation """ import tensorflow as tf # You'll generate plots of attention in order to see which parts of an image # your model focuses on during captioning import matplotlib.pyplot as plt import collections import random import numpy as np import os import time import json from PIL import Image """ Explanation: Image captioning with visual attention <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/text/image_captioning"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/image_captioning.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/image_captioning.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/image_captioning.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Given an image like the example below, your goal is to generate a caption such as "a surfer riding on a wave". Image Source; License: Public Domain To accomplish this, you'll use an attention-based model, which enables us to see what parts of the image the model focuses on as it generates a caption. The model architecture is similar to Show, Attend and Tell: Neural Image Caption Generation with Visual Attention. This notebook is an end-to-end example. When you run the notebook, it downloads the MS-COCO dataset, preprocesses and caches a subset of images using Inception V3, trains an encoder-decoder model, and generates captions on new images using the trained model. In this example, you will train a model on a relatively small amount of data—the first 30,000 captions for about 20,000 images (because there are multiple captions per image in the dataset). End of explanation """ # Download caption annotation files annotation_folder = '/annotations/' if not os.path.exists(os.path.abspath('.') + annotation_folder): annotation_zip = tf.keras.utils.get_file('captions.zip', cache_subdir=os.path.abspath('.'), origin='http://images.cocodataset.org/annotations/annotations_trainval2014.zip', extract=True) annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json' os.remove(annotation_zip) # Download image files image_folder = '/train2014/' if not os.path.exists(os.path.abspath('.') + image_folder): image_zip = tf.keras.utils.get_file('train2014.zip', cache_subdir=os.path.abspath('.'), origin='http://images.cocodataset.org/zips/train2014.zip', extract=True) PATH = os.path.dirname(image_zip) + image_folder os.remove(image_zip) else: PATH = os.path.abspath('.') + image_folder """ Explanation: Download and prepare the MS-COCO dataset You will use the MS-COCO dataset to train your model. The dataset contains over 82,000 images, each of which has at least 5 different caption annotations. The code below downloads and extracts the dataset automatically. Caution: large download ahead. You'll use the training set, which is a 13GB file. End of explanation """ with open(annotation_file, 'r') as f: annotations = json.load(f) # Group all captions together having the same image ID. image_path_to_caption = collections.defaultdict(list) for val in annotations['annotations']: caption = f"<start> {val['caption']} <end>" image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (val['image_id']) image_path_to_caption[image_path].append(caption) image_paths = list(image_path_to_caption.keys()) random.shuffle(image_paths) # Select the first 6000 image_paths from the shuffled set. # Approximately each image id has 5 captions associated with it, so that will # lead to 30,000 examples. train_image_paths = image_paths[:6000] print(len(train_image_paths)) train_captions = [] img_name_vector = [] for image_path in train_image_paths: caption_list = image_path_to_caption[image_path] train_captions.extend(caption_list) img_name_vector.extend([image_path] * len(caption_list)) print(train_captions[0]) Image.open(img_name_vector[0]) """ Explanation: Optional: limit the size of the training set To speed up training for this tutorial, you'll use a subset of 30,000 captions and their corresponding images to train your model. Choosing to use more data would result in improved captioning quality. End of explanation """ def load_image(image_path): img = tf.io.read_file(image_path) img = tf.io.decode_jpeg(img, channels=3) img = tf.keras.layers.Resizing(299, 299)(img) img = tf.keras.applications.inception_v3.preprocess_input(img) return img, image_path """ Explanation: Preprocess the images using InceptionV3 Next, you will use InceptionV3 (which is pretrained on Imagenet) to classify each image. You will extract features from the last convolutional layer. First, you will convert the images into InceptionV3's expected format by: * Resizing the image to 299px by 299px * Preprocess the images using the preprocess_input method to normalize the image so that it contains pixels in the range of -1 to 1, which matches the format of the images used to train InceptionV3. End of explanation """ image_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet') new_input = image_model.input hidden_layer = image_model.layers[-1].output image_features_extract_model = tf.keras.Model(new_input, hidden_layer) """ Explanation: Initialize InceptionV3 and load the pretrained Imagenet weights Now you'll create a tf.keras model where the output layer is the last convolutional layer in the InceptionV3 architecture. The shape of the output of this layer is 8x8x2048. You use the last convolutional layer because you are using attention in this example. You don't perform this initialization during training because it could become a bottleneck. You forward each image through the network and store the resulting vector in a dictionary (image_name --> feature_vector). After all the images are passed through the network, you save the dictionary to disk. End of explanation """ # Get unique images encode_train = sorted(set(img_name_vector)) # Feel free to change batch_size according to your system configuration image_dataset = tf.data.Dataset.from_tensor_slices(encode_train) image_dataset = image_dataset.map( load_image, num_parallel_calls=tf.data.AUTOTUNE).batch(16) for img, path in image_dataset: batch_features = image_features_extract_model(img) batch_features = tf.reshape(batch_features, (batch_features.shape[0], -1, batch_features.shape[3])) for bf, p in zip(batch_features, path): path_of_feature = p.numpy().decode("utf-8") np.save(path_of_feature, bf.numpy()) """ Explanation: Caching the features extracted from InceptionV3 You will pre-process each image with InceptionV3 and cache the output to disk. Caching the output in RAM would be faster but also memory intensive, requiring 8 * 8 * 2048 floats per image. At the time of writing, this exceeds the memory limitations of Colab (currently 12GB of memory). Performance could be improved with a more sophisticated caching strategy (for example, by sharding the images to reduce random access disk I/O), but that would require more code. The caching will take about 10 minutes to run in Colab with a GPU. If you'd like to see a progress bar, you can: Install tqdm: !pip install tqdm Import tqdm: from tqdm import tqdm Change the following line: for img, path in image_dataset: to: for img, path in tqdm(image_dataset): End of explanation """ caption_dataset = tf.data.Dataset.from_tensor_slices(train_captions) # We will override the default standardization of TextVectorization to preserve # "<>" characters, so we preserve the tokens for the <start> and <end>. def standardize(inputs): inputs = tf.strings.lower(inputs) return tf.strings.regex_replace(inputs, r"!\"#$%&\(\)\*\+.,-/:;=?@\[\\\]^_`{|}~", "") # Max word count for a caption. max_length = 50 # Use the top 5000 words for a vocabulary. vocabulary_size = 5000 tokenizer = tf.keras.layers.TextVectorization( max_tokens=vocabulary_size, standardize=standardize, output_sequence_length=max_length) # Learn the vocabulary from the caption data. tokenizer.adapt(caption_dataset) # Create the tokenized vectors cap_vector = caption_dataset.map(lambda x: tokenizer(x)) # Create mappings for words to indices and indices to words. word_to_index = tf.keras.layers.StringLookup( mask_token="", vocabulary=tokenizer.get_vocabulary()) index_to_word = tf.keras.layers.StringLookup( mask_token="", vocabulary=tokenizer.get_vocabulary(), invert=True) """ Explanation: Preprocess and tokenize the captions You will transform the text captions into integer sequences using the TextVectorization layer, with the following steps: Use adapt to iterate over all captions, split the captions into words, and compute a vocabulary of the top 5,000 words (to save memory). Tokenize all captions by mapping each word to its index in the vocabulary. All output sequences will be padded to length 50. Create word-to-index and index-to-word mappings to display results. End of explanation """ img_to_cap_vector = collections.defaultdict(list) for img, cap in zip(img_name_vector, cap_vector): img_to_cap_vector[img].append(cap) # Create training and validation sets using an 80-20 split randomly. img_keys = list(img_to_cap_vector.keys()) random.shuffle(img_keys) slice_index = int(len(img_keys)*0.8) img_name_train_keys, img_name_val_keys = img_keys[:slice_index], img_keys[slice_index:] img_name_train = [] cap_train = [] for imgt in img_name_train_keys: capt_len = len(img_to_cap_vector[imgt]) img_name_train.extend([imgt] * capt_len) cap_train.extend(img_to_cap_vector[imgt]) img_name_val = [] cap_val = [] for imgv in img_name_val_keys: capv_len = len(img_to_cap_vector[imgv]) img_name_val.extend([imgv] * capv_len) cap_val.extend(img_to_cap_vector[imgv]) len(img_name_train), len(cap_train), len(img_name_val), len(cap_val) """ Explanation: Split the data into training and testing End of explanation """ # Feel free to change these parameters according to your system's configuration BATCH_SIZE = 64 BUFFER_SIZE = 1000 embedding_dim = 256 units = 512 num_steps = len(img_name_train) // BATCH_SIZE # Shape of the vector extracted from InceptionV3 is (64, 2048) # These two variables represent that vector shape features_shape = 2048 attention_features_shape = 64 # Load the numpy files def map_func(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')+'.npy') return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train)) # Use map to load the numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( map_func, [item1, item2], [tf.float32, tf.int64]), num_parallel_calls=tf.data.AUTOTUNE) # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE) """ Explanation: Create a tf.data dataset for training Your images and captions are ready! Next, let's create a tf.data dataset to use for training your model. End of explanation """ class BahdanauAttention(tf.keras.Model): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def call(self, features, hidden): # features(CNN_encoder output) shape == (batch_size, 64, embedding_dim) # hidden shape == (batch_size, hidden_size) # hidden_with_time_axis shape == (batch_size, 1, hidden_size) hidden_with_time_axis = tf.expand_dims(hidden, 1) # attention_hidden_layer shape == (batch_size, 64, units) attention_hidden_layer = (tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))) # score shape == (batch_size, 64, 1) # This gives you an unnormalized score for each image feature. score = self.V(attention_hidden_layer) # attention_weights shape == (batch_size, 64, 1) attention_weights = tf.nn.softmax(score, axis=1) # context_vector shape after sum == (batch_size, hidden_size) context_vector = attention_weights * features context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights class CNN_Encoder(tf.keras.Model): # Since you have already extracted the features and dumped it # This encoder passes those features through a Fully connected layer def __init__(self, embedding_dim): super(CNN_Encoder, self).__init__() # shape after fc == (batch_size, 64, embedding_dim) self.fc = tf.keras.layers.Dense(embedding_dim) def call(self, x): x = self.fc(x) x = tf.nn.relu(x) return x class RNN_Decoder(tf.keras.Model): def __init__(self, embedding_dim, units, vocab_size): super(RNN_Decoder, self).__init__() self.units = units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') self.fc1 = tf.keras.layers.Dense(self.units) self.fc2 = tf.keras.layers.Dense(vocab_size) self.attention = BahdanauAttention(self.units) def call(self, x, features, hidden): # defining attention as a separate model context_vector, attention_weights = self.attention(features, hidden) # x shape after passing through embedding == (batch_size, 1, embedding_dim) x = self.embedding(x) # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # passing the concatenated vector to the GRU output, state = self.gru(x) # shape == (batch_size, max_length, hidden_size) x = self.fc1(output) # x shape == (batch_size * max_length, hidden_size) x = tf.reshape(x, (-1, x.shape[2])) # output shape == (batch_size * max_length, vocab) x = self.fc2(x) return x, state, attention_weights def reset_state(self, batch_size): return tf.zeros((batch_size, self.units)) encoder = CNN_Encoder(embedding_dim) decoder = RNN_Decoder(embedding_dim, units, tokenizer.vocabulary_size()) optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) """ Explanation: Model Fun fact: the decoder below is identical to the one in the example for Neural Machine Translation with Attention. The model architecture is inspired by the Show, Attend and Tell paper. In this example, you extract the features from the lower convolutional layer of InceptionV3 giving us a vector of shape (8, 8, 2048). You squash that to a shape of (64, 2048). This vector is then passed through the CNN Encoder (which consists of a single Fully connected layer). The RNN (here GRU) attends over the image to predict the next word. End of explanation """ checkpoint_path = "./checkpoints/train" ckpt = tf.train.Checkpoint(encoder=encoder, decoder=decoder, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) start_epoch = 0 if ckpt_manager.latest_checkpoint: start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1]) # restoring the latest checkpoint in checkpoint_path ckpt.restore(ckpt_manager.latest_checkpoint) """ Explanation: Checkpoint End of explanation """ # adding this in a separate cell because if you run the training cell # many times, the loss_plot array will be reset loss_plot = [] @tf.function def train_step(img_tensor, target): loss = 0 # initializing the hidden state for each batch # because the captions are not related from image to image hidden = decoder.reset_state(batch_size=target.shape[0]) dec_input = tf.expand_dims([word_to_index('<start>')] * target.shape[0], 1) with tf.GradientTape() as tape: features = encoder(img_tensor) for i in range(1, target.shape[1]): # passing the features through the decoder predictions, hidden, _ = decoder(dec_input, features, hidden) loss += loss_function(target[:, i], predictions) # using teacher forcing dec_input = tf.expand_dims(target[:, i], 1) total_loss = (loss / int(target.shape[1])) trainable_variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, trainable_variables) optimizer.apply_gradients(zip(gradients, trainable_variables)) return loss, total_loss EPOCHS = 20 for epoch in range(start_epoch, EPOCHS): start = time.time() total_loss = 0 for (batch, (img_tensor, target)) in enumerate(dataset): batch_loss, t_loss = train_step(img_tensor, target) total_loss += t_loss if batch % 100 == 0: average_batch_loss = batch_loss.numpy()/int(target.shape[1]) print(f'Epoch {epoch+1} Batch {batch} Loss {average_batch_loss:.4f}') # storing the epoch end loss value to plot later loss_plot.append(total_loss / num_steps) if epoch % 5 == 0: ckpt_manager.save() print(f'Epoch {epoch+1} Loss {total_loss/num_steps:.6f}') print(f'Time taken for 1 epoch {time.time()-start:.2f} sec\n') plt.plot(loss_plot) plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss Plot') plt.show() """ Explanation: Training You extract the features stored in the respective .npy files and then pass those features through the encoder. The encoder output, hidden state(initialized to 0) and the decoder input (which is the start token) is passed to the decoder. The decoder returns the predictions and the decoder hidden state. The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss. Use teacher forcing to decide the next input to the decoder. Teacher forcing is the technique where the target word is passed as the next input to the decoder. The final step is to calculate the gradients and apply it to the optimizer and backpropagate. End of explanation """ def evaluate(image): attention_plot = np.zeros((max_length, attention_features_shape)) hidden = decoder.reset_state(batch_size=1) temp_input = tf.expand_dims(load_image(image)[0], 0) img_tensor_val = image_features_extract_model(temp_input) img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3])) features = encoder(img_tensor_val) dec_input = tf.expand_dims([word_to_index('<start>')], 0) result = [] for i in range(max_length): predictions, hidden, attention_weights = decoder(dec_input, features, hidden) attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy() predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy() predicted_word = tf.compat.as_text(index_to_word(predicted_id).numpy()) result.append(predicted_word) if predicted_word == '<end>': return result, attention_plot dec_input = tf.expand_dims([predicted_id], 0) attention_plot = attention_plot[:len(result), :] return result, attention_plot def plot_attention(image, result, attention_plot): temp_image = np.array(Image.open(image)) fig = plt.figure(figsize=(10, 10)) len_result = len(result) for i in range(len_result): temp_att = np.resize(attention_plot[i], (8, 8)) grid_size = max(int(np.ceil(len_result/2)), 2) ax = fig.add_subplot(grid_size, grid_size, i+1) ax.set_title(result[i]) img = ax.imshow(temp_image) ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent()) plt.tight_layout() plt.show() # captions on the validation set rid = np.random.randint(0, len(img_name_val)) image = img_name_val[rid] real_caption = ' '.join([tf.compat.as_text(index_to_word(i).numpy()) for i in cap_val[rid] if i not in [0]]) result, attention_plot = evaluate(image) print('Real Caption:', real_caption) print('Prediction Caption:', ' '.join(result)) plot_attention(image, result, attention_plot) """ Explanation: Caption! The evaluate function is similar to the training loop, except you don't use teacher forcing here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output. Stop predicting when the model predicts the end token. And store the attention weights for every time step. End of explanation """ image_url = 'https://tensorflow.org/images/surf.jpg' image_extension = image_url[-4:] image_path = tf.keras.utils.get_file('image'+image_extension, origin=image_url) result, attention_plot = evaluate(image_path) print('Prediction Caption:', ' '.join(result)) plot_attention(image_path, result, attention_plot) # opening the image Image.open(image_path) """ Explanation: Try it on your own images For fun, below you're provided a method you can use to caption your own images with the model you've just trained. Keep in mind, it was trained on a relatively small amount of data, and your images may be different from the training data (so be prepared for weird results!) End of explanation """
ajayrfhp/dvd
examples/.ipynb_checkpoints/MNIST-checkpoint.ipynb
mit
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=False) img = mnist.train.images[123] img = np.reshape(img,(28,28)) plt.imshow(img, cmap = 'gray') plt.show() img = np.reshape(img,(28,28,1)) print img.shape, 'label = ', mnist.train.labels[123] from dvd import dvd img_embedding = dvd.get_embedding_x(img) print img_embedding.shape """ Explanation: MNIST with transfer learning. First let us build a MNIST logistic regression classifier. We will then get better feature embeddings for images by using dvd library. This involves transfer learning. We will compare simple classifier with transfer learnt model for accuracy score. End of explanation """ from sklearn import linear_model from sklearn.metrics import accuracy_score clf = linear_model.LogisticRegression() clf.fit(mnist.train.images, mnist.train.labels) preds = clf.predict(mnist.test.images) print accuracy_score(preds, mnist.test.labels) """ Explanation: Simple logistic Regression End of explanation """ train = np.reshape(mnist.train.images, (mnist.train.images.shape[0],28,28)) print 'initial training shape = ', train.shape train = dvd.get_embedding_X(train) print 'training shape after embedding =', train.shape test = np.reshape(mnist.test.images, (mnist.test.images.shape[0],28,28)) test = dvd.get_embedding_X(test) """ Explanation: Lets get VGG embeddings for train and test input images and convert them to transfer learnt space. End of explanation """ from sklearn import linear_model from sklearn.metrics import accuracy_score clf = linear_model.LogisticRegression() clf.fit(train, mnist.train.labels) preds = clf.predict(test) print accuracy_score(preds, mnist.test.labels) """ Explanation: Model with transfer learnt features End of explanation """
mortada/notebooks
blog/unbiased_variance_estimator.ipynb
apache-2.0
%matplotlib inline import matplotlib.pyplot as plt from IPython.core.pylabtools import figsize figsize(15, 5) import pandas as pd import numpy as np np.random.seed(42) N = 100000 # size of population population = pd.Series(np.random.randint(1, 11, N)) """ Explanation: Variance Estimation In statistics we know that the mean and variance of a population $Y$ are defined to be: \begin{equation} \left{ \begin{aligned} \text{Mean}(Y) &= \mu = \frac{1}{N} \sum_{i=1}^{N} Y_i \ \text{Var}(Y) &= \sigma^2 = \frac{1}{N} \sum_{i=1}^{N} (Y_i - \mu)^2 \ \end{aligned} \right. \end{equation} where $N$ is the size of the population. <!-- PELICAN_END_SUMMARY --> Given the population $Y$, we can draw a sample $X$ and compute statistics for $X$: \begin{equation} \left{ \begin{aligned} \text{Mean}(X) &= \bar{X} = \frac{1}{n} \sum_{j=1}^{n} X_j \ \text{Var}(X) &= s^2 = \frac{1}{n - 1} \sum_{j=1}^{n} (X_j - \bar{X})^2 \ \end{aligned} \right. \end{equation} where lowercase $n$ is the size of the sample, typically a much smaller number than $N$. One detail that is often not clearly explained in introductory statistics is why we should divide by $n - 1$ instead of $n$ in the calculation for the sample variance. Why divide by n - 1? It turns out that we should divide by $n - 1$ because dividing by $n$ would give us a biased estimator of the population variance. Let's look at a concrete example before diving into the math for why. Let's say we have a population of 100,000 data points. These can represent, for instance, a movie rating for each of 100,000 people. End of explanation """ population.mean() ((population - population.mean()) ** 2).sum() / N """ Explanation: We can easily calculate the population mean and population variance: End of explanation """ population.var(ddof=0) """ Explanation: Note that we are dividing by $N$ in the variance calculation, also that in numpy or pandas this is the same as simply using the method var with ddof=0 End of explanation """ samples = {} n = 30 # size of each sample num_samples = 500 # we are drawing 500 samples, each with size n for i in range(num_samples): samples[i] = population.sample(n).reset_index(drop=True) samples = pd.DataFrame(samples) samples.T.tail() """ Explanation: where ddof=0 means to divide by $N$, and ddof=1 means to divide by $N - 1$. Simulation As usual in statistics, the population parameters are often unknown. But we can estimate them by drawing samples from the population. Here we are drawing a random sample of size $30$. As of version 0.16.1, pandas has a convenient Series.sample() function for this: End of explanation """ df = pd.DataFrame({'estimated mean': pd.expanding_mean(samples.mean()), 'actual population mean': pd.Series(population.mean(), index=samples.columns)}) df.plot(ylim=(4.5, 6.5)) """ Explanation: As we expect, if we average all the sample means we can see that the it is a good estimate for the true population mean: End of explanation """ df = pd.DataFrame({'biased var estimate (divide by n)': pd.expanding_mean(samples.var(ddof=0)), 'unbiased var estimate (divide by n - 1)': pd.expanding_mean(samples.var(ddof=1)), 'actual population var': pd.Series(population.var(ddof=0), index=samples.columns)}) df.plot(ylim=(6.5, 10.5)) """ Explanation: Now let's compare the results we would get by using the biased estimator (dividing by $n$) and the unbiased estimator (dividing by $n-1$) End of explanation """
yandexdataschool/Practical_RL
week09_policy_II/td3_and_sac/hw-continuous-control_pytorch.ipynb
unlicense
!git clone https://github.com/benelot/pybullet-gym lib/pybullet-gym !pip install -e lib/pybullet-gym import gym import numpy as np import pybulletgym """ Explanation: Continuous Control In this notebook you will solve continuous control environment using either Twin Delayed DDPG (TD3) or Soft Actor-Critic (SAC). Both are off-policy algorithms that are current go-to algorithms for continuous control tasks. Select one of these two algorithms (TD3 or SAC) to implement. Both algorithms are extensions of basic Deep Deterministic Policy Gradient (DDPG) algorithm, and DDPG is kind of "DQN with another neural net approximating greedy policy", and all that differs is a set of stabilization tricks: * TD3 trains deterministic policy, while SAC uses stochastic policy. This means that for SAC you can solve exploration-exploitation trade-off by simple sampling from policy, while in TD3 you will have to add noise to your actions. * TD3 proposes to stabilize targets by adding a clipped noise to actions, which slightly prevents overestimation. In SAC, we formally switch to formalism of Maximum Entropy RL and add entropy bonus into our value function. Also both algorithms utilize a twin trick: train two critics and use pessimistic targets by taking minimum from two proposals. Standard trick with target networks is also necessary. We will go through all these tricks step-by-step. SAC is probably less clumsy scheme than TD3, but requires a bit more code to implement. More detailed description of algorithms can be found in Spinning Up documentation: * on DDPG * on TD3 * on SAC Environment For now, let's start with our environment. To run the environment you will need to install pybullet-gym which unlike MuJoCo does not require you to have a license. Recently there were some weird troubles with pybullet :(, if nothing works try ver.2.5.6 : pip install pybullet==2.5.6 To install the library: End of explanation """ env = gym.make("AntPyBulletEnv-v0") # we want to look inside env.render() # examples of states and actions print("observation space: ", env.observation_space, "\nobservations:", env.reset()) print("action space: ", env.action_space, "\naction_sample: ", env.action_space.sample()) """ Explanation: First, we will create an instance of the environment. In pybullet-gym, if render is called before the first reset, then you will (hopefully) see the visualisation of 3d physic environment. End of explanation """ class RandomActor(): def get_action(self, states): assert len(states.shape) == 1, "can't work with batches" return env.action_space.sample() s = env.reset() rewards_per_step = [] actor = RandomActor() for i in range(10000): a = actor.get_action(s) s, r, done, _ = env.step(a) rewards_per_step.append(r) if done: s = env.reset() print("done: ", i) """ Explanation: Let's run random policy and see how it looks. End of explanation """ rewards_per_step[100:110] """ Explanation: So, basically most episodes are 1000 steps long (then happens termination by time), though sometimes we are terminated earlier if simulation discovers some obvious reasons to think that we crashed our ant. Important thing about continuous control tasks like this is that we receive non-trivial signal at each step: End of explanation """ env.close() """ Explanation: This dense signal will guide our optimizations. It also partially explains why off-policy algorithms are more effective and sample-efficient than on-policy algorithms like PPO: 1-step targets are already quite informative. End of explanation """ from logger import TensorboardSummaries as Summaries env = gym.make("AntPyBulletEnv-v0") env = Summaries(env, "MyFirstWalkingAnt"); state_dim = env.observation_space.shape[0] # dimension of state space (28 numbers) action_dim = env.action_space.shape[0] # dimension of action space (8 numbers) """ Explanation: We will add only one wrapper to our environment to simply write summaries, mainly, the total reward during an episode. End of explanation """ import torch import torch.nn as nn DEVICE = "cuda" if torch.cuda.is_available() else "cpu" class Critic(nn.Module): def __init__(self, state_dim, action_dim): super().__init__() <YOUR CODE> def get_qvalues(self, states, actions): ''' input: states - tensor, (batch_size x features) actions - tensor, (batch_size x actions_dim) output: qvalues - tensor, critic estimation, (batch_size) ''' qvalues = <YOUR CODE> assert len(qvalues.shape) == 1 and qvalues.shape[0] == states.shape[0] return qvalues """ Explanation: Models Let's start with critic model. On the one hand, it will function as an approximation of $Q^*(s, a)$, on the other hand it evaluates current actor $\pi$ and can be viewed as $Q^{\pi}(s, a)$. This critic will take both state $s$ and action $a$ as input and output a scalar value. Recommended architecture is 3-layered MLP. Danger: when models have a scalar output it is a good rule to squeeze it to avoid unexpected broadcasting, since [batch_size, 1] broadcasts with many tensor sizes. End of explanation """ # template for TD3; template for SAC is below class TD3_Actor(nn.Module): def __init__(self, state_dim, action_dim): super().__init__() <YOUR CODE> def get_action(self, states, std_noise=0.1): ''' Used to collect data by interacting with environment, so your have to add some noise to actions. input: states - numpy, (batch_size x features) output: actions - numpy, (batch_size x actions_dim) ''' # no gradient computation is required here since we will use this only for interaction with torch.no_grad(): actions = <YOUR CODE> assert isinstance(actions, (list,np.ndarray)), "convert actions to numpy to send into env" assert actions.max() <= 1. and actions.min() >= -1, "actions must be in the range [-1, 1]" return actions def get_best_action(self, states): ''' Will be used to optimize actor. Requires differentiable w.r.t. parameters actions. input: states - PyTorch tensor, (batch_size x features) output: actions - PyTorch tensor, (batch_size x actions_dim) ''' actions = <YOUR CODE> assert actions.requires_grad, "you must be able to compute gradients through actions" return actions def get_target_action(self, states, std_noise=0.2, clip_eta=0.5): ''' Will be used to create target for critic optimization. Returns actions with added "clipped noise". input: states - PyTorch tensor, (batch_size x features) output: actions - PyTorch tensor, (batch_size x actions_dim) ''' # no gradient computation is required here since we will use this only for interaction with torch.no_grad(): actions = <YOUR CODE> # actions can fly out of [-1, 1] range after added noise return actions.clamp(-1, 1) """ Explanation: Next, let's define a policy, or an actor $\pi$. Use architecture, similar to critic (3-layered MLP). The output depends on algorithm: For TD3, model deterministic policy. You should output action_dim numbers in range $[-1, 1]$. Unfortunately, deterministic policies lead to problems with stability and exploration, so we will need three "modes" of how this policy can be operating: * First one - greedy - is a simple feedforward pass through network that will be used to train the actor. * Second one - exploration mode - is when we need to add noise (e.g. Gaussian) to our actions to collect more diverse data. * Third mode - "clipped noised" - will be used when we will require a target for critic, where we need to somehow "noise" our actor output, but not too much, so we add clipped noise to our output: $$\pi_{\theta}(s) + \varepsilon, \quad \varepsilon = \operatorname{clip}(\epsilon, -0.5, 0.5), \epsilon \sim \mathcal{N}(0, \sigma^2 I)$$ End of explanation """ # template for SAC from torch.distributions import Normal class SAC_Actor(nn.Module): def __init__(self, state_dim, action_dim): super().__init__() <YOUR CODE> def apply(self, states): ''' For given batch of states samples actions and also returns its log prob. input: states - PyTorch tensor, (batch_size x features) output: actions - PyTorch tensor, (batch_size x action_dim) log_prob - PyTorch tensor, (batch_size) ''' <YOUR CODE> return actions, log_prob def get_action(self, states): ''' Used to interact with environment by sampling actions from policy input: states - numpy, (batch_size x features) output: actions - numpy, (batch_size x actions_dim) ''' # no gradient computation is required here since we will use this only for interaction with torch.no_grad(): # hint: you can use `apply` method here actions = <YOUR CODE> assert isinstance(actions, (list,np.ndarray)), "convert actions to numpy to send into env" assert actions.max() <= 1. and actions.min() >= -1, "actions must be in the range [-1, 1]" return actions """ Explanation: For SAC, model gaussian policy. This means policy distribution is going to be multivariate normal with diagonal covariance. The policy head will predict the mean and covariance, and it should be guaranteed that covariance is non-negative. Important: the way you model covariance strongly influences optimization procedure, so here are some options: let $f_{\theta}$ be the output of covariance head, then: * use exponential function $\sigma(s) = \exp(f_{\theta}(s))$ * transform output to $[-1, 1]$ using tanh, then project output to some interval $[m, M]$, where $m = -20$, $M = 2$ and then use exponential function. This will guarantee the range of modeled covariance is adequate. So, the resulting formula is: $$\sigma(s) = \exp^{m + 0.5(M - m)(\tanh(f_{\theta}(s)) + 1)}$$ * softplus operation $\sigma(s) = \log(1 + \exp^{f_{\theta}(s)})$ seems to work poorly here. o_O Note: torch.distributions.Normal already has everything you will need to work with such policy after you modeled mean and covariance, i.e. sampling via reparametrization trick (see rsample method) and compute log probability (see log_prob method). There is one more problem with gaussian distribution. We need to force our actions to be in $[-1, 1]$ bound. To achieve this, model unbounded gaussian $\mathcal{N}(\mu_{\theta}(s), \sigma_{\theta}(s)^2I)$, where $\mu$ can be arbitrary. Then every time you have samples $u$ from this gaussian policy, squash it using $\operatorname{tanh}$ function to get a sample from $[-1, 1]$: $$u \sim \mathcal{N}(\mu, \sigma^2I)$$ $$a = \operatorname{tanh}(u)$$ Important: after that you are required to use change of variable formula every time you compute likelihood (see appendix C in paper on SAC for details): $$\log p(a \mid \mu, \sigma) = \log p(u \mid \mu, \sigma) - \sum_{i = 1}^D \log (1 - \operatorname{tanh}^2(u_i)),$$ where $D$ is action_dim. In practice, add something like 1e-6 inside logarithm to protect from computational instabilities. End of explanation """ class ReplayBuffer(): def __init__(self, size): """ Create Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. Note: for this assignment you can pick any data structure you want. If you want to keep it simple, you can store a list of tuples of (s, a, r, s') in self._storage However you may find out there are faster and/or more memory-efficient ways to do so. """ self._storage = [] self._maxsize = size # OPTIONAL: YOUR CODE def __len__(self): return len(self._storage) def add(self, obs_t, action, reward, obs_tp1, done): ''' Make sure, _storage will not exceed _maxsize. Make sure, FIFO rule is being followed: the oldest examples has to be removed earlier ''' data = (obs_t, action, reward, obs_tp1, done) storage = self._storage maxsize = self._maxsize <YOUR CODE> # add data to storage def sample(self, batch_size): """Sample a batch of experiences. Parameters ---------- batch_size: int How many transitions to sample. Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. """ storage = self._storage <YOUR CODE> # randomly generate batch_size integers # to be used as indexes of samples <YOUR CODE> # collect <s,a,r,s',done> for each index return <YOUR CODE> # <states>, <actions>, <rewards>, <next_states>, <is_done> exp_replay = ReplayBuffer(10) for _ in range(30): exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False) obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample( 5) assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is" def play_and_record(initial_state, agent, env, exp_replay, n_steps=1): """ Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer. Whenever game ends, add record with done=True and reset the game. It is guaranteed that env has done=False when passed to this function. :returns: return sum of rewards over time and the state in which the env stays """ s = initial_state sum_rewards = 0 # Play the game for n_steps as per instructions above for t in range(n_steps): # select action using policy with exploration a = <YOUR CODE> ns, r, done, _ = env.step(a) exp_replay.add(s, a, r, ns, done) s = env.reset() if done else ns sum_rewards += r return sum_rewards, s #testing your code. exp_replay = ReplayBuffer(2000) actor = <YOUR ACTOR CLASS>(state_dim, action_dim).to(DEVICE) state = env.reset() play_and_record(state, actor, env, exp_replay, n_steps=1000) # if you're using your own experience replay buffer, some of those tests may need correction. # just make sure you know what your code does assert len(exp_replay) == 1000, "play_and_record should have added exactly 1000 steps, "\ "but instead added %i" % len(exp_replay) is_dones = list(zip(*exp_replay._storage))[-1] assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\ "Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]" % ( np.mean(is_dones), len(exp_replay)) for _ in range(100): obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample( 10) assert obs_batch.shape == next_obs_batch.shape == (10,) + (state_dim,) assert act_batch.shape == ( 10, action_dim), "actions batch should have shape (10, 8) but is instead %s" % str(act_batch.shape) assert reward_batch.shape == ( 10,), "rewards batch should have shape (10,) but is instead %s" % str(reward_batch.shape) assert is_done_batch.shape == ( 10,), "is_done batch should have shape (10,) but is instead %s" % str(is_done_batch.shape) assert [int(i) in (0, 1) for i in is_dones], "is_done should be strictly True or False" print("Well done!") """ Explanation: ReplayBuffer The same as in DQN. You can copy code from your DQN assignment, just check that it works fine with continuous actions (probably it is). Let's recall the interface: * exp_replay.add(obs, act, rw, next_obs, done) - saves (s,a,r,s',done) tuple into the buffer * exp_replay.sample(batch_size) - returns observations, actions, rewards, next_observations and is_done for batch_size random samples. * len(exp_replay) - returns number of elements stored in replay buffer. End of explanation """ gamma=0.99 # discount factor max_buffer_size = 10**5 # size of experience replay start_timesteps = 5000 # size of experience replay when start training timesteps_per_epoch=1 # steps in environment per step of network updates batch_size=128 # batch size for all optimizations max_grad_norm=10 # max grad norm for all optimizations tau=0.005 # speed of updating target networks policy_update_freq=<> # frequency of actor update; vanilla choice is 2 for TD3 or 1 for SAC alpha=0.1 # temperature for SAC # iterations passed n_iterations = 0 """ Explanation: Initialization Let's start initializing our algorithm. Here is our hyperparameters: End of explanation """ # experience replay exp_replay = ReplayBuffer(max_buffer_size) """ Explanation: Here is our experience replay: End of explanation """ # models to train actor = <YOUR ACTOR CLASS>(state_dim, action_dim).to(DEVICE) critic1 = Critic(state_dim, action_dim).to(DEVICE) critic2 = Critic(state_dim, action_dim).to(DEVICE) """ Explanation: Here is our models: two critics and one actor. End of explanation """ # target networks: slow-updated copies of actor and two critics target_critic1 = Critic(state_dim, action_dim).to(DEVICE) target_critic2 = Critic(state_dim, action_dim).to(DEVICE) target_actor = TD3_Actor(state_dim, action_dim).to(DEVICE) # comment this line if you chose SAC # initialize them as copies of original models target_critic1.load_state_dict(critic1.state_dict()) target_critic2.load_state_dict(critic2.state_dict()) target_actor.load_state_dict(actor.state_dict()) # comment this line if you chose SAC """ Explanation: To stabilize training, we will require target networks - slow updating copies of our models. In TD3, both critics and actor have their copies, in SAC it is assumed that only critics require target copies while actor is always used fresh. End of explanation """ def update_target_networks(model, target_model): for param, target_param in zip(model.parameters(), target_model.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) """ Explanation: In continuous control, target networks are usually updated using exponential smoothing: $$\theta^{-} \leftarrow \tau \theta + (1 - \tau) \theta^{-},$$ where $\theta^{-}$ are target network weights, $\theta$ - fresh parameters, $\tau$ - hyperparameter. This util function will do it: End of explanation """ # optimizers: for every model we have opt_actor = torch.optim.Adam(actor.parameters(), lr=3e-4) opt_critic1 = torch.optim.Adam(critic1.parameters(), lr=3e-4) opt_critic2 = torch.optim.Adam(critic2.parameters(), lr=3e-4) # just to avoid writing this code three times def optimize(name, model, optimizer, loss): ''' Makes one step of SGD optimization, clips norm with max_grad_norm and logs everything into tensorboard ''' loss = loss.mean() optimizer.zero_grad() loss.backward() grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) optimizer.step() # logging env.writer.add_scalar(name, loss.item(), n_iterations) env.writer.add_scalar(name + "_grad_norm", grad_norm.item(), n_iterations) """ Explanation: Finally, we will have three optimization procedures to train our three models, so let's welcome our three Adams: End of explanation """ def compute_critic_target(rewards, next_states, is_done): ''' Important: use target networks for this method! Do not use "fresh" models except fresh policy in SAC! input: rewards - PyTorch tensor, (batch_size) next_states - PyTorch tensor, (batch_size x features) is_done - PyTorch tensor, (batch_size) output: critic target - PyTorch tensor, (batch_size) ''' with torch.no_grad(): critic_target = <YOUR CODE> assert not critic_target.requires_grad, "target must not require grad." assert len(critic_target.shape) == 1, "dangerous extra dimension in target?" return critic_target """ Explanation: Critic target computation Finally, let's discuss our losses for critic and actor. To train both critics we would like to minimize MSE using 1-step targets: for one sampled transition $(s, a, r, s')$ it should look something like this: $$y(s, a) = r + \gamma V(s').$$ How do we evaluate next state and compute $V(s')$? Well, technically Monte-Carlo estimation looks simple: $$V(s') \approx Q(s', a')$$ where (important!) $a'$ is a sample from our current policy $\pi(a' \mid s')$. But out actor $\pi$ will be actually trained to search for actions $a'$ where our critic gives big estimates, and this straightforward approach leads to serious overesimation issues. We require some hacks. First, we will use target networks for $Q$ (and TD3 also uses target network for $\pi$). Second, we will use two critics and take minimum across their estimations: $$V(s') = \min_{i = 1,2} Q^{-}_i(s', a'),$$ where $a'$ is sampled from target policy $\pi^{-}(a' \mid s')$ in TD3 and from fresh policy $\pi(a' \mid s')$ in SAC. And the last but not the least: in TD3 to compute $a'$ use mode with clipped noise that will prevent our policy from exploiting narrow peaks in our critic approximation; in SAC add (estimation of) entropy bonus in next state $s'$: $$V(s') = \min_{i = 1,2} Q^{-}_i(s', a') - \alpha \log \pi (a' \mid s')$$ End of explanation """ def compute_actor_loss(states): ''' Returns actor loss on batch of states input: states - PyTorch tensor, (batch_size x features) output: actor loss - PyTorch tensor, (batch_size) ''' # make sure you have gradients w.r.t. actor parameters actions = <YOUR CODE> assert actions.requires_grad, "actions must be differentiable with respect to policy parameters" # compute actor loss actor_loss = <YOUR CODE> return actor_loss """ Explanation: To train actor we want simply to maximize $$\mathbb{E}{a \sim \pi(a \mid s)} Q(s, a) \to \max{\pi}$$ in TD3, because of deterministic policy, the expectation reduces: $$Q(s, \pi(s)) \to \max_{\pi}$$ in SAC, use reparametrization trick to compute gradients and also do not forget to add entropy regularizer to motivate policy to be as stochastic as possible: $$\mathbb{E}{a \sim \pi(a \mid s)} Q(s, a) - \alpha \log \pi(a \mid s) \to \max{\pi}$$ Note: We will use (fresh) critic1 here as Q-functon to "exploit". You can also use both critics and again take minimum across their estimations (this is done in original implementation of SAC and not done in TD3), but this seems to be not of high importance. End of explanation """ seed = <YOUR FAVOURITE RANDOM SEED> np.random.seed(seed) env.unwrapped.seed(seed) torch.manual_seed(seed); from tqdm.notebook import trange interaction_state = env.reset() random_actor = RandomActor() for n_iterations in trange(0, 1000000, timesteps_per_epoch): # if experience replay is small yet, no training happens # we also collect data using random policy to collect more diverse starting data if len(exp_replay) < start_timesteps: _, interaction_state = play_and_record(interaction_state, random_actor, env, exp_replay, timesteps_per_epoch) continue # perform a step in environment and store it in experience replay _, interaction_state = play_and_record(interaction_state, actor, env, exp_replay, timesteps_per_epoch) # sample a batch from experience replay states, actions, rewards, next_states, is_done = exp_replay.sample(batch_size) # move everything to PyTorch tensors states = torch.tensor(states, device=DEVICE, dtype=torch.float) actions = torch.tensor(actions, device=DEVICE, dtype=torch.float) rewards = torch.tensor(rewards, device=DEVICE, dtype=torch.float) next_states = torch.tensor(next_states, device=DEVICE, dtype=torch.float) is_done = torch.tensor( is_done.astype('float32'), device=DEVICE, dtype=torch.float ) # losses critic1_loss = <YOUR CODE> optimize("critic1", critic1, opt_critic1, critic1_loss) critic2_loss = <YOUR CODE> optimize("critic2", critic2, opt_critic2, critic2_loss) # actor update is less frequent in TD3 if n_iterations % policy_update_freq == 0: actor_loss = <YOUR CODE> optimize("actor", actor, opt_actor, actor_loss) # update target networks update_target_networks(critic1, target_critic1) update_target_networks(critic2, target_critic2) update_target_networks(actor, target_actor) # comment this line if you chose SAC """ Explanation: Pipeline Finally combining all together and launching our algorithm. Your goal is to reach at least 1000 average reward during evaluation after training in this ant environment (since this is a new hometask, this threshold might be updated, so at least just see if your ant learned to walk in the rendered simulation). rewards should rise more or less steadily in this environment. There can be some drops due to instabilities of algorithm, but it should eventually start rising after 100K-200K iterations. If no progress in reward is observed after these first 100K-200K iterations, there is a bug. gradient norm appears to be quite big for this task, it is ok if it reaches 100-200 (we handled it with clip_grad_norm). Consider everything exploded if it starts growing exponentially, then there is a bug. End of explanation """ def evaluate(env, actor, n_games=1, t_max=1000): ''' Plays n_games and returns rewards and rendered games ''' rewards = [] for _ in range(n_games): s = env.reset() R = 0 for _ in range(t_max): # select action for final evaluation of your policy action = <YOUR CODE> assert (action.max() <= 1).all() and (action.min() >= -1).all() s, r, done, _ = env.step(action) R += r if done: break rewards.append(R) return np.array(rewards) # evaluation will take some time! sessions = evaluate(env, actor, n_games=20) score = sessions.mean() print(f"Your score: {score}") assert score >= 1000, "Needs more training?" print("Well done!") env.close() """ Explanation: Evaluation End of explanation """ env = gym.make("AntPyBulletEnv-v0") # we want to look inside env.render(mode="human") # let's hope this will work # don't forget to pray env = gym.wrappers.Monitor(env, directory="videos", force=True) # record sessions # note that t_max is 300, so collected reward will be smaller than 1000 evaluate(env, actor, n_games=1, t_max=300) env.close() """ Explanation: Record End of explanation """
pdamodaran/yellowbrick
examples/jkeung/testing.ipynb
apache-2.0
import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split """ Explanation: ROC Curve Example Inspired by: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html This is an example of how to create an ROC Curvs in sklearn vs using the Yellowbrick libarary. The data used is the breast cancer dataset that is included in sklearn. Import Libraries End of explanation """ bc = datasets.load_breast_cancer() X = bc.data y = bc.target random_state = np.random.RandomState(0) # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=random_state) """ Explanation: Import some data to play with End of explanation """ # Learn to predict each class against the other classifier = svm.SVC(kernel='linear', probability=True, random_state=random_state) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr, tpr, _ = roc_curve(y_test, y_score) roc_auc = auc(fpr, tpr) """ Explanation: Split the data and prepare data for ROC Curve End of explanation """ plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() """ Explanation: Plot ROC Curve using Matplotlib End of explanation """ import yellowbrick as yb from yellowbrick.classifier import ROCAUC visualizer = ROCAUC(classifier) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data g = visualizer.poof() # Draw/show/poof the data """ Explanation: Create ROCAUC using YellowBrick End of explanation """
google-research/google-research
aav/model_and_dataset_analysis/data_prep.ipynb
apache-2.0
import os import numpy import pandas from six.moves import zip from sklearn import mixture import gzip !pip install python-Levenshtein import Levenshtein """ Explanation: Copyright 2020 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. End of explanation """ R1_TILE21_WT_SEQ = 'DEEEIRTTNPVATEQYGSVSTNLQRGNR' # Covariance type to use in Gaussian Mixture Model. _COVAR_TYPE = 'full' # Number of components to use in Gaussian Mixture Model. _NUM_COMPONENTS = 2 class BinningLabeler(object): """Emits class labels from provided cutoff values. Input cutoffs are encoded as 1-D arrays. Given a cutoffs array of size n, creates n+1 labels for cutoffs, where the first bin is [-inf, cutoffs[0]], and last bin is (cutoffs[-1], inf]. """ def __init__(self, cutoffs): """Constructor. Args: cutoffs: (numpy.ndarray or list or numeric) values to bin data at. First bin is [-inf, cutoffs[0]], and last bin is (cutoffs[-1], inf]. Raises: ValueError: If no cutoff(s) (i.e. an empty list) is provided. """ cutoffs = numpy.atleast_1d(cutoffs) if cutoffs.size: self._cutoffs = numpy.sort(cutoffs) else: raise ValueError('Invalid cutoffs. At least one cutoff value required.') def predict(self, values): """Provides model labels for input value(s) using the cutoff bins. Args: values: (numpy.ndarray or numeric) Value(s) to infer a label on. Returns: A numpy array with length len(values) and labels corresponding to categories defined by the cutoffs array intervals. The labels are [0, 1, . . ., n], where n = len(cutoffs). Note, labels correspond to bins in sorted order from smallest to largest cutoff value. """ return numpy.digitize(values, self._cutoffs) class TwoGaussianMixtureModelLabeler(object): """Emits class labels from Gaussian Mixture given input data. Input data is encoded as 1-D arrays. Allows for an optional ambiguous label between the two modelled Gaussian distributions. Without the optional ambigouous category, the two labels are: 0 - For values more likely derived from the Gaussian with smaller mean 2 - For values more likely derived from the Gaussian with larger mean When allowing for an ambiguous category the three labels are: 0 - For values more likely derived from the Gaussian with smaller mean 1 - For values which fall within an ambiguous probability cutoff. 2 - For values more likely derived from the Gaussian with larger mean """ def __init__(self, data): """Constructor. Args: data: (numpy.ndarray or list) Input data to model with Gaussian Mixture. Input data is presumed to be in the form [x1, x2, ...., xn]. """ self._data = numpy.array([data]).T self._gmm = mixture.GaussianMixture( n_components=_NUM_COMPONENTS, covariance_type=_COVAR_TYPE).fit(self._data) # Re-map the gaussian with smaller mean to the "0" label. self._label_by_index = dict( list(zip([0, 1], numpy.argsort(self._gmm.means_[:, 0]).tolist()))) self._label_by_index_fn = numpy.vectorize(lambda x: self._label_by_index[x]) def predict(self, values, probability_cutoff=0.): """Provides model labels for input value(s) using the GMM. Args: values: (array or single float value) Value(s) to infer a label on. When values=None, predictions are run on self._data. probability_cutoff: (float) Proability between 0 and 1 to identify which values correspond to ambiguous labels. At probablity_cutoff=0 (default) it only returns the original two state predictions. Returns: A numpy array with length len(values) and labels corresponding to 0,1 if probability_cutoff = 0 and 0, 1, 2 otherwise. In the latter, 0 corresponds to the gaussian with smaller mean, 1 corresponds to the ambiguous label, and 2 corresponds to the gaussian with larger mean. """ values = numpy.atleast_1d(values) values = numpy.array([values]).T predictions = self._label_by_index_fn(self._gmm.predict(values)) # Re-map the initial 0,1 predictions to 0,2. predictions *= 2 if probability_cutoff > 0: probas = self._gmm.predict_proba(values) max_probas = numpy.max(probas, axis=1) ambiguous_values = max_probas < probability_cutoff # Set ambiguous label as 1. predictions[ambiguous_values] = 1 return predictions """ Explanation: Code to fit GMM End of explanation """ with gzip.open('GAS1_target_20190516.csv.gz', 'rb') as f: gas1 = pandas.read_csv(f, index_col=None) gas1 = gas1.rename({ 'aa': 'sequence', 'mask': 'mutation_sequence', 'mut': 'num_mutations', 'category': 'partition', }, axis=1) gas1_orig = gas1.copy() ## for comparison below if needed gas1.head() """ Explanation: Load validation experiment dataframe End of explanation """ numpy.testing.assert_allclose( gas1.GAS1_plasmid_F, gas1.GAS1_plasmid_N / gas1.GAS1_plasmid_N.sum()) numpy.testing.assert_allclose( gas1.GAS1_virus_F, gas1.GAS1_virus_N / gas1.GAS1_virus_N.sum()) """ Explanation: Validate that N->F columns computed as expected End of explanation """ zero_plasmids_mask = gas1.GAS1_plasmid_N == 0 zero_plasmids_mask.sum() """ Explanation: Filter sequences with insufficient plasmids Find zero-plasmid sequences End of explanation """ low_plasmids_mask = (gas1.GAS1_plasmid_N < 10) & ~zero_plasmids_mask low_plasmids_mask.sum() """ Explanation: Find low-plasmid count sequences These selection values are unreliable, more noisy End of explanation """ seqs_to_remove = (low_plasmids_mask | zero_plasmids_mask) seqs_to_remove.sum() num_seqs_before_plasmid_filter = len(gas1) num_seqs_before_plasmid_filter gas1 = gas1[~seqs_to_remove].copy() num_seqs_before_plasmid_filter - len(gas1) len(gas1) """ Explanation: Drop sequences that don't meet the plasmid count bars End of explanation """ PSEUDOCOUNT = 1 def counts_to_frequency(counts): return counts / counts.sum() gas1['virus_N'] = gas1.GAS1_virus_N + PSEUDOCOUNT gas1['plasmid_N'] = gas1.GAS1_plasmid_N + PSEUDOCOUNT gas1['virus_F'] = counts_to_frequency(gas1.virus_N) gas1['plasmid_F'] = counts_to_frequency(gas1.plasmid_N) """ Explanation: Add pseudocounts End of explanation """ gas1['viral_selection'] = numpy.log2(gas1.virus_F / gas1.plasmid_F) assert 0 == gas1.viral_selection.isna().sum() assert not numpy.any(numpy.isinf(gas1.viral_selection)) gas1.viral_selection.describe() """ Explanation: Compute viral selection End of explanation """ # Classify the selection coeff series after fitting to a GMM gmm_model = TwoGaussianMixtureModelLabeler( gas1[gas1.partition.isin(['stop', 'wild_type'])].viral_selection) gas1['viral_selection_gmm'] = gmm_model.predict(gas1.viral_selection) # Compute the threshold for the viable class from the GMM labels selection_coeff_threshold = gas1.loc[gas1.viral_selection_gmm == 2, 'viral_selection'].min() print('selection coeff cutoff = %.3f' % selection_coeff_threshold) # Add a label column def is_viable_mutant(mutant_data): return mutant_data['viral_selection'] > selection_coeff_threshold gas1['is_viable'] = gas1.apply(is_viable_mutant, axis=1) print(gas1.is_viable.mean()) """ Explanation: Compute GMM threshold End of explanation """ ml_generated_seqs = [ 'cnn_designed_plus_rand_train_seed', 'cnn_designed_plus_rand_train_walked', 'cnn_rand_doubles_plus_single_seed', 'cnn_rand_doubles_plus_single_walked', 'cnn_standard_seed', 'cnn_standard_walked', 'lr_designed_plus_rand_train_seed', 'lr_designed_plus_rand_train_walked', 'lr_rand_doubles_plus_single_seed', 'lr_rand_doubles_plus_single_walked', 'lr_standard_seed', 'lr_standard_walked', 'rnn_designed_plus_rand_train_seed', 'rnn_designed_plus_rand_train_walked', 'rnn_rand_doubles_plus_singles_seed', 'rnn_rand_doubles_plus_singles_walked', 'rnn_standard_seed', 'rnn_standard_walked', ] is_ml_generated_mask = gas1.partition.isin(ml_generated_seqs) ml_gen_df = gas1[is_ml_generated_mask].copy() non_ml_gen_df = gas1[~is_ml_generated_mask].copy() ml_gen_df.partition.value_counts() ml_gen_deduped = ml_gen_df.groupby('sequence').apply( lambda dupes: dupes.loc[dupes.plasmid_N.idxmax()]).copy() display(ml_gen_deduped.shape) ml_gen_deduped.head() """ Explanation: De-dupe model-designed sequences Partition the sequences that should not be de-deduped Split off the partitions for which we want to retain replicates, such as controls/etc. End of explanation """ gas1_deduped = pandas.concat([ml_gen_deduped, non_ml_gen_df], axis=0) print(gas1_deduped.shape) gas1_deduped.partition.value_counts() """ Explanation: Concatenate de-deduped ML-generated seqs with rest End of explanation """ gas1 = gas1_deduped gas1['num_edits'] = gas1.sequence.apply( lambda s: Levenshtein.distance(R1_TILE21_WT_SEQ, s)) gas1.num_edits.describe() COLUMN_SCHEMA = [ 'sequence', 'partition', 'mutation_sequence', 'num_mutations', 'num_edits', 'viral_selection', 'is_viable', ] gas1a = gas1[COLUMN_SCHEMA].copy() """ Explanation: Compute edit distance for chip End of explanation """ harvard = pandas.read_csv('r0r1_with_partitions_and_labels.csv', index_col=None) harvard = harvard.rename({ 'S': 'viral_selection', 'aa_seq': 'sequence', 'mask': 'mutation_sequence', 'mut': 'num_mutations', }, axis=1) designed_mask = harvard.partition.isin(['min_fit', 'thresh', 'temp']) harvard.loc[designed_mask, ['partition']] = 'designed' harvard['num_edits'] = harvard.sequence.apply( lambda s: Levenshtein.distance(R1_TILE21_WT_SEQ, s)) harvard.num_edits.describe() harvard1 = harvard[COLUMN_SCHEMA].copy() harvard1.head(3) harvard1['chip'] = 'harvard' gas1a['chip'] = 'gas1' combined = pandas.concat([ harvard1, gas1a, ], axis=0, sort=False) print(combined.shape) combined.partition.value_counts() combined.head() """ Explanation: Concat with training data chip End of explanation """
adolfoguimaraes/machinelearning
Projects/01_Projeto_HillaryTrump_Twitter.ipynb
mit
import pandas as pd import nltk df = pd.read_csv("https://www.data2learning.com/machinelearning/datasets/tweets.csv") dataset = df[['text','handle']] dict_ = dataset.T.to_dict("list") """ Explanation: Projeto Hillary x Trump Nesse projeto vamos utilizar tweets relacionados a última eleição presidencial dos Estados Unidos, onde Hillary Clinton e Donald Trump dispuram o pleito final. A proposta é utilizar os métodos de aprendizados supervisionados estudados para classificar tweets entre duas categorias: Hillary e Trump. O primeiro passo foi obter um conjunto de tweets que foi publicado pelas contas oficiais do tweet dos dois candidatos. Para isso, vamos utilizar este dataset disponibilizado pelo Kaglle. Com este conjunto de dados, vamos construir um modelo capaz de aprender, a partir de um conjunto de palavras, se o texto foi digitado pela conta da Hillary ou do Trump. Uma vez que este modelo foi construído, vamos classificar um conjunto novo de dados relacionados às eleições americadas e classifica-los em um dos discursos. A proposta é tentar classificar tweets que tenham um direcionamento mais próximo do discurso da Hillary e aqueles que são mais próximos do discurso do Trump. Lê-se como discurso os tweets publicados. Para essa base de teste, vamos utilizar um subconjunto de tweets deste dataset que consta com tweets que foram postados no dia da eleição americana. Para exibir os resultados, vamos construir uma página html. Além da análise automática, esta página terá informações sobre os termos mais citados pelas contas dos candidatos. Sendo assim, o primeiro passo é gerar a base de tweets dos candidatos e extrair as informações mais relevantes. Vamos trabalhar primeiro aqui no Jupyter Notebook para testar os métodos. Ao final será gerado um JSON que será lido pela página HTML. Um exemplo da página já alimentada pode ser encontrada neste link. Vamos começar ;) Pré-Processamento da Base dos Candidatos End of explanation """ from unicodedata import normalize, category from nltk.tokenize import regexp_tokenize from collections import Counter, Set from nltk.corpus import stopwords import re def pre_process_text(text): # Expressão regular para extrair padrões do texto. São reconhecidos (na ordem, o símbolo | separa um padrão): # - links com https, links com http, links com www, palavras, nome de usuários (começa com @), hashtags (começa com #) pattern = r'(https://[^"\' ]+|www.[^"\' ]+|http://[^"\' ]+|[a-zA-Z]+|\@\w+|\#\w+)' #Cria a lista de stopwords english_stop = stopwords.words(['english']) users_cited = [] hash_tags = [] tokens = [] text = text.lower() patterns = regexp_tokenize(text, pattern) users_cited = [e for e in patterns if e[0] == '@'] hashtags = [e for e in patterns if e[0] == '#'] tokens = [e for e in patterns if e[:4] != 'http'] tokens = [e for e in tokens if e[:4] != 'www.'] tokens = [e for e in tokens if e[0] != '#'] tokens = [e for e in tokens if e[0] != '@'] tokens = [e for e in tokens if e not in english_stop] tokens = [e for e in tokens if len(e) > 3] return users_cited, hashtags, tokens users_cited_h = [] # armazena os usuários citatdos por hillary users_cited_t = [] # armazena os usuários citados por trump hashtags_h = [] # armazena as hashtags de hillary hashtags_t = [] # armazena as hashtags de trump words_h = [] # lista de palavras que apareceram no discurso de hillary words_t = [] # lista de palavras que apareceram no discurso de trump all_tokens = [] # armazena todos os tokens, para gerar o vocabulário final all_texts = [] # armazena todos for d in dict_: text_ = dict_[d][0] class_ = dict_[d][1] users_, hash_, tokens_ = pre_process_text(text_) if class_ == "HillaryClinton": class_ = "hillary" users_cited_h += users_ hashtags_h += hash_ words_h += tokens_ elif class_ == "realDonaldTrump": class_ = "trump" users_cited_t += users_ hashtags_t += hash_ words_t += tokens_ temp_dict = { 'text': " ".join(tokens_), 'class_': class_ } all_tokens += tokens_ all_texts.append(temp_dict) print("Termos mais frequentes ditos por Hillary:") print() hillary_frequent_terms = nltk.FreqDist(words_h).most_common(10) for word in hillary_frequent_terms: print(word[0]) print("Termos mais frequentes ditos por Trump:") print() trump_frequent_terms = nltk.FreqDist(words_t).most_common(10) for word in trump_frequent_terms: print(word[0]) """ Explanation: O objeto dict_ representa todos os textos associados a classe correspondente. Na etapa de pré-processamento vamos fazer algumas operações: retirar dos textos hashtags, usuários e links. Essas informações serão incluídas em listas separadas paara serem usadas posteriormente. serão eliminados stopwords, símbolos de pontuação, palavras curtas; numerais tambéms serão descartados, mantendo apenas palavras. Essas etapas de pré-processamento dependem do objetivo do trabalho. Pode ser de interessa, a depender da tarefa de classificação, manter tais símbolos. Para o nosso trabalho, só é de interesse as palavras em si. Para esta tarefa, vamos utilizar também o NLTK, conjunto de ferramentas voltadas para o processamento de linguagem natural. vamos criar um método para isso, já que iremos utiliza-lo mais adiante com a base de teste: End of explanation """ #Pegando os bigram e trigram mais frequentes from nltk.collocations import BigramCollocationFinder, TrigramCollocationFinder from nltk.metrics import BigramAssocMeasures, TrigramAssocMeasures bcf = BigramCollocationFinder.from_words(words_h) tcf = TrigramCollocationFinder.from_words(words_h) bcf.apply_freq_filter(3) tcf.apply_freq_filter(3) result_bi = bcf.nbest(BigramAssocMeasures.raw_freq, 5) result_tri = tcf.nbest(TrigramAssocMeasures.raw_freq, 5) hillary_frequent_bitrigram = [] for r in result_bi: w_ = " ".join(r) print(w_) hillary_frequent_bitrigram.append(w_) print for r in result_tri: w_ = " ".join(r) print(w_) hillary_frequent_bitrigram.append(w_) """ Explanation: Bigrams e Trigram mais frequentes da Hillary End of explanation """ bcf = BigramCollocationFinder.from_words(words_t) tcf = TrigramCollocationFinder.from_words(words_t) bcf.apply_freq_filter(3) tcf.apply_freq_filter(3) result_bi = bcf.nbest(BigramAssocMeasures.raw_freq, 5) result_tri = tcf.nbest(TrigramAssocMeasures.raw_freq, 5) trump_frequent_bitrigram = [] for r in result_bi: w_ = " ".join(r) print(w_) trump_frequent_bitrigram.append(w_) print for r in result_tri: w_ = " ".join(r) print(w_) trump_frequent_bitrigram.append(w_) """ Explanation: Bigrams e Trigram mais frequentes do Trump End of explanation """ # Cada token é concatenado em uma única string que representa um tweet # Cada classe é atribuída a um vetor (hillary, trump) # Instâncias: [t1, t2, t3, t4] # Classes: [c1, c2, c3, c4] all_tweets = [] all_class = [] for t in all_texts: all_tweets.append(t['text']) all_class.append(t['class_']) print("Criar o bag of words...\n") #Número de features, coluna da tabela max_features = 2000 from sklearn.feature_extraction.text import CountVectorizer # Initialize the "CountVectorizer" object, which is scikit-learn's # bag of words tool. vectorizer = CountVectorizer(analyzer = "word", \ tokenizer = None, \ preprocessor = None, \ stop_words = None, \ max_features = max_features) # fit_transform() does two functions: First, it fits the model # and learns the vocabulary; second, it transforms our training data # into feature vectors. The input to fit_transform should be a list of # strings. X = vectorizer.fit_transform(all_tweets) # Numpy arrays are easy to work with, so convert the result to an # array X = X.toarray() y = all_class print("Train data: OK!") X.shape """ Explanation: Construindo um bag of words End of explanation """ # Teste os modelos a partir daqui """ Explanation: Ao final deste processo já temos nossa base de dados dividido em duas variáveis: X e y. X corresponde ao bag of words, ou seja, cada linha consiste de um twitter e cada coluna de uma palavra presente no vocabulário da base dados. Para cada linha/coluna é atribuído um valor que corresponde a quantidade de vezes que aquela palavra aparece no respectivo tweet. Se a palavra não está presente, o valor de 0 é atribuído. y corresponde a classe de cada tweet: hillary, tweet do perfil @HillaryClinton e trump, tweet do perfil @realDonaldTrump. Testando diferentes modelos Vamos testar os diferentes modelos estudados com a base de dados criada e escolher aquele que melhor generaliza o conjunto de dados. Para testar os modelos, vamos utilizar validação cruzada de 10 folds. Aplique os modelos estudados: KNN (teste diferentes valores de K e escolha o melhor) Árvore de Decisão SVM (varie o valor de C e escolha o melhor) Além disso, teste dois outros modelos: RandomForest Naive Bayes Para estes dois, pesquise no Google como utiliza-los no Scikit-Learn. Para cada modelo imprima a acurácia no treino e na média dos 10 folds da validação cruzada. A escolha do melhor deve ser feita a partir do valor da média da validação cruzada. O melhor modelo será utilizado para classificar outros textos extraídos do twitter e na implementação da página web. Atenção: dada a quantidade de dados, alguns modelos pode demorar alguns minutos para executar End of explanation """ hillary_frequent_hashtags = nltk.FreqDist(hashtags_h).most_common(10) trump_frequent_hashtags = nltk.FreqDist(hashtags_t).most_common(10) dict_web = { 'hillary_information': { 'frequent_terms': hillary_frequent_terms, 'frequent_bitrigram': hillary_frequent_bitrigram, 'frequent_hashtags': hillary_frequent_hashtags }, 'trump_information': { 'frequent_terms': trump_frequent_terms, 'frequent_bitrigram': trump_frequent_bitrigram, 'frequent_hashtags': trump_frequent_hashtags }, 'classified_information': { 'hillary_terms': hillary_classified_frequent_terms, 'hillary_bigram': hillary_classified_bitrigram, 'trump_terms': trump_classified_frequent_terms, 'trump_bigram': trump_classified_bitrigram, 'texts_classified': all_classified } } with open('data.json', 'w') as outfile: json.dump(dict_web, outfile) """ Explanation: Atenção: as tarefas a seguir serão disponibilizadas após a entrega da primeira parte. Sendo assim, não precisa enviar o que se pede a seguir. Quando passar a data da entrega, disponibilizo o notebook completo. No entanto, fiquem a vontade de fazer a próxima tarefa como forma de aprendizado. É um bom exercício ;) Usando o melhor modelo em novos textos Vamos executar o melhor clasificador em um conjunto de textos novos. Esses textos não tem classificação. Eles foram postados durante o dia da eleição americana. A idéia é identificar de forma automática os tweets que estão mais próximos dos discursos da Hillary Clinton e de Donald Trump. Essa tarefa será realizada em sala após a entrega da atividade do melhor modelo Gerando o .json lido pela página web Esse será o JSON gerado após a etapa de teste do melhor modelo. Essa tarefa também será realizada em sala após a entrega do teste dos melhores modelos. End of explanation """
rsignell-usgs/python-training
web-services/01-skill_score.ipynb
cc0-1.0
import os try: import cPickle as pickle except ImportError: import pickle run_name = '2015-08-17' fname = os.path.join(run_name, 'config.pkl') with open(fname, 'rb') as f: config = pickle.load(f) import numpy as np from pandas import DataFrame, read_csv from utilities import to_html, save_html, apply_skill fname = '{}-all_obs.csv'.format(run_name) all_obs = read_csv(os.path.join(run_name, fname), index_col='name') def rename_cols(df): columns = dict() for station in df.columns: mask = all_obs['station'].astype(str) == station name = all_obs['station'][mask].index[0] columns.update({station: name}) return df.rename(columns=columns) from glob import glob from pandas import Panel from utilities import nc2df def load_ncs(run_name): fname = '{}-{}.nc'.format ALL_OBS_DATA = nc2df(os.path.join(run_name, fname(run_name, 'OBS_DATA'))) index = ALL_OBS_DATA.index dfs = dict(OBS_DATA=ALL_OBS_DATA) for fname in glob(os.path.join(run_name, "*.nc")): if 'OBS_DATA' in fname: continue else: model = fname.split('.')[0].split('-')[-1] df = nc2df(fname) # FIXME: Horrible work around duplicate times. if len(df.index.values) != len(np.unique(df.index.values)): kw = dict(subset='index', take_last=True) df = df.reset_index().drop_duplicates(**kw).set_index('index') kw = dict(method='time', limit=30) df = df.reindex(index).interpolate(**kw).ix[index] dfs.update({model: df}) return Panel.fromDict(dfs).swapaxes(0, 2) """ Explanation: <img style='float: left' width="150px" src="http://bostonlightswim.org/wp/wp-content/uploads/2011/08/BLS-front_4-color.jpg"> <br><br> The Boston Light Swim Sea Surface Temperature time-series model skill Load configuration End of explanation """ from utilities import mean_bias dfs = load_ncs(run_name) df = apply_skill(dfs, mean_bias, remove_mean=False, filter_tides=False) df = rename_cols(df) skill_score = dict(mean_bias=df.copy()) # Filter out stations with no valid comparison. df.dropna(how='all', axis=1, inplace=True) df = df.applymap('{:.2f}'.format).replace('nan', '--') html = to_html(df.T) fname = os.path.join(run_name, 'mean_bias.html'.format(run_name)) save_html(fname, html) html """ Explanation: Skill 1: Model Bias (or Mean Bias) The bias skill compares the model mean temperature against the observations. It is possible to introduce a Mean Bias in the model due to a mismatch of the boundary forcing and the model interior. $$ \text{MB} = \mathbf{\overline{m}} - \mathbf{\overline{o}}$$ End of explanation """ from utilities import rmse dfs = load_ncs(run_name) df = apply_skill(dfs, rmse, remove_mean=True, filter_tides=False) df = rename_cols(df) skill_score['rmse'] = df.copy() # Filter out stations with no valid comparison. df.dropna(how='all', axis=1, inplace=True) df = df.applymap('{:.2f}'.format).replace('nan', '--') html = to_html(df.T) fname = os.path.join(run_name, 'rmse.html'.format(run_name)) save_html(fname, html) html """ Explanation: Skill 2: Central Root Mean Squared Error Root Mean Squared Error of the deviations from the mean. $$ \text{CRMS} = \sqrt{\left(\mathbf{m'} - \mathbf{o'}\right)^2}$$ where: $\mathbf{m'} = \mathbf{m} - \mathbf{\overline{m}}$ and $\mathbf{o'} = \mathbf{o} - \mathbf{\overline{o}}$ End of explanation """ from utilities import r2 dfs = load_ncs(run_name) df = apply_skill(dfs, r2, remove_mean=True, filter_tides=False) df = rename_cols(df) skill_score['r2'] = df.copy() # Filter out stations with no valid comparison. df.dropna(how='all', axis=1, inplace=True) df = df.applymap('{:.2f}'.format).replace('nan', '--') html = to_html(df.T) fname = os.path.join(run_name, 'r2.html'.format(run_name)) save_html(fname, html) html fname = os.path.join(run_name, 'skill_score.pkl') with open(fname,'wb') as f: pickle.dump(skill_score, f) """ Explanation: Skill 3: R$^2$ https://en.wikipedia.org/wiki/Coefficient_of_determination End of explanation """ %matplotlib inline import matplotlib.pyplot as plt from utilities.taylor_diagram import TaylorDiagram def make_taylor(samples): fig = plt.figure(figsize=(9, 9)) dia = TaylorDiagram(samples['std']['OBS_DATA'], fig=fig, label="Observation") colors = plt.matplotlib.cm.jet(np.linspace(0, 1, len(samples))) # Add samples to Taylor diagram. samples.drop('OBS_DATA', inplace=True) for model, row in samples.iterrows(): dia.add_sample(row['std'], row['corr'], marker='s', ls='', label=model) # Add RMS contours, and label them. contours = dia.add_contours(colors='0.5') plt.clabel(contours, inline=1, fontsize=10) # Add a figure legend. kw = dict(prop=dict(size='small'), loc='upper right') leg = fig.legend(dia.samplePoints, [p.get_label() for p in dia.samplePoints], numpoints=1, **kw) return fig dfs = load_ncs(run_name) # Bin and interpolate all series to 1 hour. freq = '30min' for station, df in list(dfs.iteritems()): df = df.resample(freq).interpolate().dropna(axis=1) if 'OBS_DATA' in df: samples = DataFrame.from_dict(dict(std=df.std(), corr=df.corr()['OBS_DATA'])) else: continue samples[samples < 0] = np.NaN samples.dropna(inplace=True) if len(samples) <= 2: # 1 obs 1 model. continue fig = make_taylor(samples) fig.savefig(os.path.join(run_name, '{}.png'.format(station))) plt.close(fig) """ Explanation: Normalized Taylor diagrams The radius is model standard deviation error divided by observations deviation, azimuth is arc-cosine of cross correlation (R), and distance to point (1, 0) on the abscissa is Centered RMS. End of explanation """
Juanlu001/MOOC-Estadistica-Investigadores
P2P 1 - Datos de cigarrillos.ipynb
mit
import urllib.request urllib.request.urlretrieve("http://www.amstat.org/publications/jse/datasets/cigarettes.dat.txt", "cigarettes.dat") !wc -l cigarettes.dat cat cigarettes.dat """ Explanation: Parte 0: Preparar los datos Descargamos los datos de http://www.amstat.org/publications/jse/v2n1/datasets.mcintyre.html. End of explanation """ import pandas as pd df = pd.read_csv("cigarettes.dat", delim_whitespace=True, header=None, names=["Marca", "Alquitrán", "Nicotina", "Peso", "Monóxido"]) df.head() """ Explanation: Vamos a cargar los en Python con pandas. pandas es una biblioteca de Python para trabajar con tablas de datos (llamados DataFrames) de forma cómoda. En Pybonacci escribimos un tutorial de pandas desde lo más básico a usos un poco más intermedios. End of explanation """ df["Clases"] = ['Rubio', 'Negro', 'Negro', 'Rubio', 'Rubio', 'Negro', 'Rubio', 'Rubio', 'Negro', 'Rubio', 'Rubio', 'Rubio', 'Rubio', 'Rubio', 'Rubio', 'Rubio', 'Negro', 'Rubio', 'Negro', 'Rubio', 'Negro', 'Rubio', 'Negro', 'Negro', 'Rubio'] df[["Clases", "Alquitrán", "Nicotina", "Peso", "Monóxido"]] """ Explanation: Además, vamos a añadir el tipo de cigarro para que la tabla quede como la presentada en el curso. End of explanation """ df.describe().transpose() """ Explanation: Parte 1: Informe estadístico Los DataFrame de pandas tienen un método describe() que imprime algunas medidas estadísticas interesantes de nuestra tabla: End of explanation """ df.sem() df.var() """ Explanation: Podemos añadir también el error estándar de la media y la varianza: End of explanation """ (df.describe(percentiles=[.05, .10, .25, .50, .75, .90, .95]) [["Monóxido", "Alquitrán", "Nicotina", "Peso"]] .transpose() [["5%", "10%", "25%", "50%", "75%", "90%", "95%"]]) """ Explanation: Por tanto, contestando a las preguntas del informe: 1) El tamaño muestral es de 25. 2) La variable con mayor variabilidad es el alquitrán: se aprecia mejor en la tabla de la varianza. 3) La media más representativa de los datos es la del peso: el error estándar de la media es el menor de las cuatro variables. 4) La media más estable es la del peso, por lo mismo que se ha dicho en el apartado anterior. Parte 2: Analizar los percentiles Ahora volvemos a utilizar el método describe() pero esta vez especificamos manualmente los percentiles, seleccionamos solo las columnas que nos interesan y presentamos la tabla: End of explanation """ df.median() iqr = df.quantile(.75) - df.quantile(.25) iqr """ Explanation: Recuperamos además la mediana y el recorrido intercuartílico: End of explanation """ %matplotlib inline import matplotlib.pyplot as plt plt.style.use("fivethirtyeight") plt.figure(figsize=(8, 8)) plt.subplot(2, 2, 1) df.boxplot("Monóxido", return_type='axes') plt.subplot(2, 2, 2) df.boxplot("Alquitrán", return_type='axes') plt.ylim(0, 35) # Para ver una medida discordante plt.subplot(2, 2, 3) df.boxplot("Nicotina", return_type='axes') plt.subplot(2, 2, 4) df.boxplot("Peso", return_type='axes') plt.ylim(0, 1.20) # Importante """ Explanation: Observamos una gran variabilidad de los contenidos de alquitrán y monóxido de carbono, mientras que las cantidades de nicotina son más estables y el peso de los cigarrillos prácticamente no cambia. Los resultados son similares a los obtenidos estudiando la media y su dispersión. Parte 3: Box plots Por último, utilizamos la biblioteca matplotlib para representar los diagramas de cajas. Por defecto son de tipo Tukey, es decir: los bigotes llegan hasta 1.5 veces el recorrido intercuartílico por encima del percentil 75 y por debajo del percentil 25. End of explanation """ df[df["Alquitrán"] > df["Alquitrán"].quantile(.75) + 1.5 * iqr["Alquitrán"]] """ Explanation: Tanto el monóxido como el peso presentan distribuciones bastante simétricas, mientras que el alquitrán tiene un claro sesgo positivo. Especial atención merece el peso en este caso, pues una correcta escala vertical es esencial para no percibir una variabilidad errónea. Tanto en los datos de nicotina como en los de alquitrán se aprecian sendos valores discordantes, que invitarían a no comprar esa marca de cigarrillos. End of explanation """
kobejean/tensorflow
tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb
apache-2.0
# to generate gifs !pip install imageio """ Explanation: Copyright 2018 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"). DCGAN: An example with tf.keras and eager <table class="tfo-notebook-buttons" align="left"><td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td><td> <a target="_blank" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table> This notebook demonstrates how to generate images of handwritten digits using tf.keras and eager execution. To do so, we use Deep Convolutional Generative Adverserial Networks (DCGAN). This model takes about ~30 seconds per epoch (using tf.contrib.eager.defun to create graph functions) to train on a single Tesla K80 on Colab, as of July 2018. Below is the output generated after training the generator and discriminator models for 150 epochs. End of explanation """ from __future__ import absolute_import, division, print_function # Import TensorFlow >= 1.10 and enable eager execution import tensorflow as tf tf.enable_eager_execution() import os import time import numpy as np import glob import matplotlib.pyplot as plt import PIL import imageio from IPython import display """ Explanation: Import TensorFlow and enable eager execution End of explanation """ (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') # We are normalizing the images to the range of [-1, 1] train_images = (train_images - 127.5) / 127.5 BUFFER_SIZE = 60000 BATCH_SIZE = 256 """ Explanation: Load the dataset We are going to use the MNIST dataset to train the generator and the discriminator. The generator will then generate handwritten digits. End of explanation """ train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) """ Explanation: Use tf.data to create batches and shuffle the dataset End of explanation """ class Generator(tf.keras.Model): def __init__(self): super(Generator, self).__init__() self.fc1 = tf.keras.layers.Dense(7*7*64, use_bias=False) self.batchnorm1 = tf.keras.layers.BatchNormalization() self.conv1 = tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same', use_bias=False) self.batchnorm2 = tf.keras.layers.BatchNormalization() self.conv2 = tf.keras.layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False) self.batchnorm3 = tf.keras.layers.BatchNormalization() self.conv3 = tf.keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False) def call(self, x, training=True): x = self.fc1(x) x = self.batchnorm1(x, training=training) x = tf.nn.relu(x) x = tf.reshape(x, shape=(-1, 7, 7, 64)) x = self.conv1(x) x = self.batchnorm2(x, training=training) x = tf.nn.relu(x) x = self.conv2(x) x = self.batchnorm3(x, training=training) x = tf.nn.relu(x) x = tf.nn.tanh(self.conv3(x)) return x class Discriminator(tf.keras.Model): def __init__(self): super(Discriminator, self).__init__() self.conv1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same') self.conv2 = tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same') self.dropout = tf.keras.layers.Dropout(0.3) self.flatten = tf.keras.layers.Flatten() self.fc1 = tf.keras.layers.Dense(1) def call(self, x, training=True): x = tf.nn.leaky_relu(self.conv1(x)) x = self.dropout(x, training=training) x = tf.nn.leaky_relu(self.conv2(x)) x = self.dropout(x, training=training) x = self.flatten(x) x = self.fc1(x) return x generator = Generator() discriminator = Discriminator() # Defun gives 10 secs/epoch performance boost generator.call = tf.contrib.eager.defun(generator.call) discriminator.call = tf.contrib.eager.defun(discriminator.call) """ Explanation: Write the generator and discriminator models Generator It is responsible for creating convincing images that are good enough to fool the discriminator. It consists of Conv2DTranspose (Upsampling) layers. We start with a fully connected layer and upsample the image 2 times so as to reach the desired image size (mnist image size) which is (28, 28, 1). We use leaky relu activation except for the last layer which uses tanh activation. Discriminator The discriminator is responsible for classifying the fake images from the real images. In other words, the discriminator is given generated images (from the generator) and the real MNIST images. The job of the discriminator is to classify these images into fake (generated) and real (MNIST images). Basically the generator should be good enough to fool the discriminator that the generated images are real. End of explanation """ def discriminator_loss(real_output, generated_output): # [1,1,...,1] with real output since it is true and we want # our generated examples to look like it real_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.ones_like(real_output), logits=real_output) # [0,0,...,0] with generated images since they are fake generated_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.zeros_like(generated_output), logits=generated_output) total_loss = real_loss + generated_loss return total_loss def generator_loss(generated_output): return tf.losses.sigmoid_cross_entropy(tf.ones_like(generated_output), generated_output) discriminator_optimizer = tf.train.AdamOptimizer(1e-4) generator_optimizer = tf.train.AdamOptimizer(1e-4) """ Explanation: Define the loss functions and the optimizer Discriminator loss The discriminator loss function takes 2 inputs; real images, generated images real_loss is a sigmoid cross entropy loss of the real images and an array of ones (since these are the real images) generated_loss is a sigmoid cross entropy loss of the generated images and an array of zeros (since these are the fake images) Then the total_loss is the sum of real_loss and the generated_loss Generator loss It is a sigmoid cross entropy loss of the generated images and an array of ones The discriminator and the generator optimizers are different since we will train them separately. End of explanation """ checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator) """ Explanation: Checkpoints (Object-based saving) End of explanation """ EPOCHS = 150 noise_dim = 100 num_examples_to_generate = 16 # keeping the random vector constant for generation (prediction) so # it will be easier to see the improvement of the gan. random_vector_for_generation = tf.random_normal([num_examples_to_generate, noise_dim]) def generate_and_save_images(model, epoch, test_input): # make sure the training parameter is set to False because we # don't want to train the batchnorm layer when doing inference. predictions = model(test_input, training=False) fig = plt.figure(figsize=(4,4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() def train(dataset, epochs, noise_dim): for epoch in range(epochs): start = time.time() for images in dataset: # generating noise from a uniform distribution noise = tf.random_normal([BATCH_SIZE, noise_dim]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) generated_output = discriminator(generated_images, training=True) gen_loss = generator_loss(generated_output) disc_loss = discriminator_loss(real_output, generated_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.variables)) if epoch % 1 == 0: display.clear_output(wait=True) generate_and_save_images(generator, epoch + 1, random_vector_for_generation) # saving (checkpoint) the model every 15 epochs if (epoch + 1) % 15 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print ('Time taken for epoch {} is {} sec'.format(epoch + 1, time.time()-start)) # generating after the final epoch display.clear_output(wait=True) generate_and_save_images(generator, epochs, random_vector_for_generation) train(train_dataset, EPOCHS, noise_dim) """ Explanation: Training We start by iterating over the dataset The generator is given noise as an input which when passed through the generator model will output a image looking like a handwritten digit The discriminator is given the real MNIST images as well as the generated images (from the generator). Next, we calculate the generator and the discriminator loss. Then, we calculate the gradients of loss with respect to both the generator and the discriminator variables (inputs) and apply those to the optimizer. Generate Images After training, its time to generate some images! We start by creating noise array as an input to the generator The generator will then convert the noise into handwritten images. Last step is to plot the predictions and voila! End of explanation """ # restoring the latest checkpoint in checkpoint_dir checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) """ Explanation: Restore the latest checkpoint End of explanation """ def display_image(epoch_no): return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no)) display_image(EPOCHS) """ Explanation: Display an image using the epoch number End of explanation """ with imageio.get_writer('dcgan.gif', mode='I') as writer: filenames = glob.glob('image*.png') filenames = sorted(filenames) last = -1 for i,filename in enumerate(filenames): frame = 2*(i**0.5) if round(frame) > round(last): last = frame else: continue image = imageio.imread(filename) writer.append_data(image) image = imageio.imread(filename) writer.append_data(image) # this is a hack to display the gif inside the notebook os.system('cp dcgan.gif dcgan.gif.png') display.Image(filename="dcgan.gif.png") """ Explanation: Generate a GIF of all the saved images. <!-- TODO(markdaoust): Remove the hack when Ipython version is updated --> End of explanation """ #from google.colab import files #files.download('dcgan.gif') """ Explanation: To downlod the animation from Colab uncomment the code below: End of explanation """
kit-cel/wt
nt2_ce2/vorlesung/basic_concepts_Python.ipynb
gpl-2.0
# defining lists sport_list = [ 'cycling', 'football', 'fitness' ] first_prime_numbers = [ 2, 3, 5, 7, 11, 13, 17, 19 ] # getting contents sport = sport_list[ 2 ] third_prime = first_prime_numbers[ 2 ] # printing print( 'All sports:', sport_list ) print( 'Sport to be done:', sport ) print( '\nFirst primes:', first_prime_numbers ) print( 'Third prime number:', third_prime ) # adapt entries and append new entries sport_list[ 1 ] = 'swimming' sport_list.append( 'running' ) first_prime_numbers.append( 23 ) # printing print( 'All sports:', sport_list ) print( 'First primes:', first_prime_numbers ) """ Explanation: Contents and Objective Describing several commands and methods that will be used throughout the simulations <b>Note:</b> Basic knowledge of programming languages and concepts is assumed. Only specific concepts that are different from, e.g., C++ or Matlab, are provided. <b>NOTE 2:</b> The following summary is by no means complete or exhaustive, but only provides a short and simplified overview of the commands used throughout the simulations in the lecture. For a detailed introduction please have a look at one of the numerous web-tutorials or books on Python, e.g., https://www.python-kurs.eu/ https://link.springer.com/book/10.1007%2F978-1-4842-4246-9 https://primo.bibliothek.kit.edu/primo_library/libweb/action/search.do?mode=Basic&vid=KIT&vl%28freeText0%29=python&vl%28freeText0%29=python&fn=search&tab=kit&srt=date Cell Types There are two types of cells: Text cells (called 'Markdown'): containing text, allowing use of LaTeX Math/code cells: where code is being executed As long as you are just reading the simulations, there is no need to be concerned about this fact. Data Structures In the following sections the basic data structures used in upcoming simulations will be introduced. Basic types as int, float, string are supposed to be well-known. Lists Container-type structure for collecting entities (which may even be of different type) Defined by key word list( ) or by square brackets with entities being separated by comma Referenced by index in square brackets; <b>Note</b>: indexing starting at 0 Entries may be changed, appended, sliced,... End of explanation """ # defining tuple sport_tuple = ( 'cycling', 'football', 'fitness' ) # getting contents sport = sport_tuple[ 2 ] # printing print( 'All sports:', sport_tuple ) print( 'Sport to be done:', sport ) # append new entries sport_tuple += ( 'running', ) # printing print( 'All sports:', sport_tuple ) print() # changing entries will fail # --> ERROR is being generated on purpose # --> NOTE: Error is handled by 'try: ... except: ...' statement try: sport_tuple[ 1 ] = 'swimming' except: print('ERROR: Entries within tuples cannot be adapted!') """ Explanation: Tuples Similar to lists but "immutable", i.e., entries can be appended, but not be changed Defined by tuple( ) or by brackets with entities being separated by comma Referenced by index in square brackets; <b>Note</b>: indexing starting at 0 End of explanation """ # defining dictionaries sports_days = { 'Monday': 'pause', 'Tuesday': 'fitness', 'Wednesday' : 'running', 'Thursday' : 'fitness', 'Friday' : 'swimming', 'Saturday' : 'cycling', 'Sunday' : 'cycling' } print( 'Sport by day:', sports_days ) print( '\nOn Tuesday:', sports_days[ 'Tuesday' ]) # Changes are made by using the key as identifier sports_days[ 'Tuesday' ] = 'running' print( 'Sport by day:', sports_days ) """ Explanation: Dictionaries Container in which entries are of type: ( key : value ) Defined by key word "dict" or by curly brackets with entities of shape "key : value" being separated by comma Referenced by key in square brackets --> <b>Note</b>: Indexing by keys instead of indices might be a major advantage (at least sometimes) End of explanation """ # defining sets sports_set = { 'fitness', 'running', 'swimming', 'cycling'} print( sports_set ) print() # indexing will fail # --> ERROR is being generated on purpose try: print( sports_set[0] ) except: print('ERROR: No indexing of sets!') # adding elements (or not) sports_set.add( 'pause' ) print(sports_set) sports_set.add( 'fitness' ) print(sports_set) # union of sets (also: intersection, complement, ...) all_stuff_set = set( sports_set ) union_of_sets = all_stuff_set.union( first_prime_numbers) print( union_of_sets ) """ Explanation: Sets As characterized by the naming, sets are representing mathematical sets; no double occurences of elements Defined by keyword "set" of by curly brackets with entities being separated by comma <b>Note</b>: As in maths, sets don't possess ordering, so there is no indexing of sets! End of explanation """ # looping in lists simply parsing along the list for s in sport_list: print( s ) print() # looping in dictionaries happens along keys for s in sports_days: print( '{}: \t{}'.format( s, sports_days[ s ] ) ) """ Explanation: Flow Control Standards commands as for, while, ... Functions for specific purposes <b>Note:</b> Since commands and their concept are quite self-explaining, only short description of syntax is provided For Loops for loops in Python allow looping along every so-called iterable as, e.g., list, tuple, dicts.... <b>Note</b>: Not necessarily int Syntax: for i in iterable: <b>Note:</b> Blocks are structured by indentation; sub-command (as, e.g., in a loop) are indented End of explanation """ # initialize variables sum_primes = 0 _n = 0 # sum primes up to sum-value of 20 while sum_primes < 20: # add prime of according index sum_primes += first_prime_numbers[ _n ] # increase index _n += 1 print( 'Sum of first {} primes is {}.'.format( _n, sum_primes ) ) """ Explanation: While Loops while loops in Python are (as usual) constructed by checking condition and exiting loop if condition becomes False <b>Note:</b> Blocks are structured by indentation; sub-command (as, e.g., in a loop) are indented End of explanation """ def get_n_th_prime( n, first_prime_numbers ): ''' DOC String IN: index of prime number, list of prime numbers OUT: n-th prime number ''' # do something smart as, e.g., checking that according index really exists # "assert" does the job by checking first arg and--if not being TRUE--providing text given as second arg try: val = first_prime_numbers[ n - 1 ] except: return '"ERROR: Index not feasible!"' # NOTE: since counting starts at 0, (n-1)st number is returned # Furthermore, there is no need for a function here; a simple reference would have done the job! return first_prime_numbers[ n - 1 ] # show doc string print( help( get_n_th_prime ) ) # apply functions N = 3 print( '{}. prime number is {}.'.format( N, get_n_th_prime( N, first_prime_numbers ) ) ) print() N = 30 print( '{}. prime number is {}.'.format( N, get_n_th_prime( N, first_prime_numbers ) ) ) """ Explanation: Functions Defined by key-word "def" followed by list of arguments in brackets Doc string defined directly after "def" by ''' TEXT ''' Values returned by key word "return"; <b>Note:</b> return "value" can be scalar, list, dict, vector, maxtrix,... End of explanation """
mdiaz236/DeepLearningFoundations
tv-script-generation/.ipynb_checkpoints/dlnd_tv_script_generation-checkpoint.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper data_dir = './data/simpsons/moes_tavern_lines.txt' text = helper.load_data(data_dir) # Ignore notice, since we don't use it for analysing the data text = text[81:] """ Explanation: TV Script Generation In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern. Get the Data The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc.. End of explanation """ view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) scenes = text.split('\n\n') print('Number of scenes: {}'.format(len(scenes))) sentence_count_scene = [scene.count('\n') for scene in scenes] print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene))) sentences = [sentence for scene in scenes for sentence in scene.split('\n')] print('Number of lines: {}'.format(len(sentences))) word_count_sentence = [len(sentence.split()) for sentence in sentences] print('Average number of words in each line: {}'.format(np.average(word_count_sentence))) print() print('The sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ words = list(set(text.split())) {i: word for (i, word) in enumerate(words)} import numpy as np import problem_unittests as tests def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ words = list(set(text)) vocab_to_int = {word: i for (i, word) in enumerate(words)} int_to_vocab = {i: word for (i, word) in enumerate(words)} return vocab_to_int, int_to_vocab """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) """ Explanation: Implement Preprocessing Functions The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below: - Lookup Table - Tokenize Punctuation Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call vocab_to_int - Dictionary to go from the id to word, we'll call int_to_vocab Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab) End of explanation """ def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenize dictionary where the key is the punctuation and the value is the token """ return { '.': '||period||', ',': '||comma||', '"': '||quotation_mark||', ';': '||semicolon||', '!': '||exclamation_mark||', '?': '||question_mark||', '(': '||left_parentheses||', ')': '||right_parentheses||', '--': '||dash||', '\n': '||return||' } """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) """ Explanation: Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!". Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( . ) - Comma ( , ) - Quotation Mark ( " ) - Semicolon ( ; ) - Exclamation mark ( ! ) - Question mark ( ? ) - Left Parentheses ( ( ) - Right Parentheses ( ) ) - Dash ( -- ) - Return ( \n ) This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||". End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import numpy as np import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) """ Explanation: Build the Neural Network You'll build the components necessary to build a RNN by implementing the following functions below: - get_inputs - get_init_cell - get_embed - build_rnn - build_nn - get_batches Check the Version of TensorFlow and Access to GPU End of explanation """ def get_inputs(): """ Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate) """ input_placeholder = tf.placeholder(tf.int32, [None, None], name = 'input') targets_placeholder = tf.placeholder(tf.int32, [None, None], name = 'targets') learning_rate_placeholder =tf.placeholder(tf.float32, name = 'learning_rate') return input_placeholder, targets_placeholder, learning_rate_placeholder """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_inputs(get_inputs) """ Explanation: Input Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Input text placeholder named "input" using the TF Placeholder name parameter. - Targets placeholder - Learning Rate placeholder Return the placeholders in the following the tuple (Input, Targets, LearingRate) End of explanation """ test_batch_size_ph = tf.placeholder(tf.int32) test_batch_size_ph.shape def get_init_cell(batch_size, rnn_size): """ Create an RNN Cell and initialize it. :param batch_size: Size of batches :param rnn_size: Size of RNNs :return: Tuple (cell, initialize state) """ lstm_layers = 2 cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size) # drop = tf.contrib.rnn.DropoutWrapper(cell) multi = tf.contrib.rnn.MultiRNNCell([cell] * lstm_layers) initial_state = multi.zero_state(batch_size, tf.float32) initial_state = tf.identity(initial_state, "initial_state") return multi, initial_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_init_cell(get_init_cell) """ Explanation: Build RNN Cell and Initialize Stack one or more BasicLSTMCells in a MultiRNNCell. - The Rnn size should be set using rnn_size - Initalize Cell State using the MultiRNNCell's zero_state() function - Apply the name "initial_state" to the initial state using tf.identity() Return the cell and initial state in the following tuple (Cell, InitialState) End of explanation """ def get_embed(input_data, vocab_size, embed_dim): """ Create embedding for <input_data>. :param input_data: TF placeholder for text input. :param vocab_size: Number of words in vocabulary. :param embed_dim: Number of embedding dimensions :return: Embedded input. """ embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) embed = tf.nn.embedding_lookup(embedding, input_data) return embed """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_embed(get_embed) """ Explanation: Word Embedding Apply embedding to input_data using TensorFlow. Return the embedded sequence. End of explanation """ def build_rnn(cell, inputs): """ Create a RNN using a RNN Cell :param cell: RNN Cell :param inputs: Input text data :return: Tuple (Outputs, Final State) """ outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, "final_state") return outputs, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_rnn(build_rnn) """ Explanation: Build RNN You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN. - Build the RNN using the tf.nn.dynamic_rnn() - Apply the name "final_state" to the final state using tf.identity() Return the outputs and final_state state in the following tuple (Outputs, FinalState) End of explanation """ def build_nn(cell, rnn_size, input_data, vocab_size): """ Build part of the neural network :param cell: RNN cell :param rnn_size: Size of rnns :param input_data: Input data :param vocab_size: Vocabulary size :return: Tuple (Logits, FinalState) """ embed_data = get_embed(input_data, vocab_size, rnn_size) outputs, final_state = build_rnn(cell, embed_data) logits = tf.contrib.layers.fully_connected(outputs, num_outputs=vocab_size) return logits, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_nn(build_nn) """ Explanation: Build the Neural Network Apply the functions you implemented above to: - Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function. - Build RNN using cell and your build_rnn(cell, inputs) function. - Apply a fully connected layer with a linear activation and vocab_size as the number of outputs. Return the logits and final state in the following tuple (Logits, FinalState) End of explanation """ def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ n_batches = int(len(int_text) / (batch_size * seq_length)) # Drop the last few characters to make only full batches xdata = np.array(int_text[: n_batches * batch_size * seq_length]) ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1]) x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1) y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1) return np.array(list(zip(x_batches, y_batches))) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_batches(get_batches) """ Explanation: Batches Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements: - The first element is a single batch of input with the shape [batch size, sequence length] - The second element is a single batch of targets with the shape [batch size, sequence length] If you can't fill the last batch with enough data, drop the last batch. For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following: ``` [ # First Batch [ # Batch of Input [[ 1 2 3], [ 7 8 9]], # Batch of targets [[ 2 3 4], [ 8 9 10]] ], # Second Batch [ # Batch of Input [[ 4 5 6], [10 11 12]], # Batch of targets [[ 5 6 7], [11 12 13]] ] ] ``` End of explanation """ # Number of Epochs num_epochs = 100 # Batch Size batch_size = 256 # RNN Size rnn_size = 1000 # Sequence Length seq_length = 10 # Learning Rate learning_rate = .01 # Show stats for every n number of batches show_every_n_batches = 13 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ save_dir = './save' """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set num_epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set seq_length to the length of sequence. Set learning_rate to the learning rate. Set show_every_n_batches to the number of batches the neural network should print progress. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from tensorflow.contrib import seq2seq train_graph = tf.Graph() with train_graph.as_default(): vocab_size = len(int_to_vocab) input_text, targets, lr = get_inputs() input_data_shape = tf.shape(input_text) cell, initial_state = get_init_cell(input_data_shape[0], rnn_size) logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function cost = seq2seq.sequence_loss( logits, targets, tf.ones([input_data_shape[0], input_data_shape[1]])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ batches = get_batches(int_text, batch_size, seq_length) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(num_epochs): state = sess.run(initial_state, {input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { input_text: x, targets: y, initial_state: state, lr: learning_rate} train_loss, state, _ = sess.run([cost, final_state, train_op], feed) # Show every <show_every_n_batches> batches if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0: print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( epoch_i, batch_i, len(batches), train_loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_dir) print('Model Trained and Saved') """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params((seq_length, save_dir)) """ Explanation: Save Parameters Save seq_length and save_dir for generating a new TV script. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() seq_length, load_dir = helper.load_params() """ Explanation: Checkpoint End of explanation """ def get_tensors(loaded_graph): """ Get input, initial state, final state, and probabilities tensor from <loaded_graph> :param loaded_graph: TensorFlow graph loaded from file :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) """ InputTensor = loaded_graph.get_tensor_by_name("input:0") InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0") FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0") ProbsTensor = loaded_graph.get_tensor_by_name("probs:0") return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_tensors(get_tensors) """ Explanation: Implement Generate Functions Get Tensors Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names: - "input:0" - "initial_state:0" - "final_state:0" - "probs:0" Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) End of explanation """ def pick_word(probabilities, int_to_vocab): """ Pick the next word in the generated text :param probabilities: Probabilites of the next word :param int_to_vocab: Dictionary of word ids as the keys and words as the values :return: String of the predicted word """ max_pos = max(enumerate(probabilities),key=lambda x: x[1])[0] return int_to_vocab[max_pos] """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_pick_word(pick_word) """ Explanation: Choose Word Implement the pick_word() function to select the next word using probabilities. End of explanation """ gen_length = 200 # homer_simpson, moe_szyslak, or Barney_Gumble prime_word = 'moe_szyslak' """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_dir + '.meta') loader.restore(sess, load_dir) # Get Tensors from loaded model input_text, initial_state, final_state, probs = get_tensors(loaded_graph) # Sentences generation setup gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {input_text: np.array([[1]])}) # Generate sentences for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [probs, final_state], {input_text: dyn_input, initial_state: prev_state}) pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\n ', '\n') tv_script = tv_script.replace('( ', '(') print(tv_script) """ Explanation: Generate TV Script This will generate the TV script for you. Set gen_length to the length of TV script you want to generate. End of explanation """
pombredanne/gensim
docs/notebooks/topic_coherence-movies.ipynb
lgpl-2.1
import re import os from scipy.stats import pearsonr from datetime import datetime from gensim.models import CoherenceModel from gensim.corpora.dictionary import Dictionary %load_ext line_profiler # This was used for finding out which line was taking maximum time for indirect confirmation measure """ Explanation: Benchmark testing of coherence pipeline on Movies dataset: How to find how well coherence measure matches your manual annotators Introduction: For the validation of any model adapted from a paper, it is of utmost importance that the results of benchmark testing on the datasets listed in the paper match between the actual implementation (palmetto) and gensim. This coherence pipeline has been implemented from the work done by Roeder et al. The paper can be found here. Approach : 1. We will use the Movies dataset first. This dataset along with the topics on which the coherence is calculated and the gold (human) ratings on these topics can be found here. 2. We will then calculate the coherence on these topics using the pipeline implemented in gensim. 3. Once we have got all our coherence values on these topics we will calculate the correlation with the human ratings using pearson's r. 4. We will compare this final correlation value with the values listed in the paper and see if the pipeline is working as expected. End of explanation """ prefix = "/home/devashish/datasets/Movies/movie/" start = datetime.now() texts = [] for fil in os.listdir(prefix): for line in open(prefix + fil): # lower case all words lowered = line.lower() #remove punctuation and split into seperate words words = re.findall(r'\w+', lowered, flags = re.UNICODE | re.LOCALE) texts.append(words) end = datetime.now() print "Time taken: %s" % (end - start) start = datetime.now() dictionary = Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] end = datetime.now() print "Time taken: %s" % (end - start) """ Explanation: Download the dataset from the link and plug in the location here End of explanation """ print len(corpus) print dictionary topics = [] # list of 100 topics for l in open('/home/devashish/datasets/Movies/topicsMovie.txt'): topics.append([l.split()]) topics.pop(100) human_scores = [] for l in open('/home/devashish/datasets/Movies/goldMovie.txt'): human_scores.append(float(l.strip())) """ Explanation: Cross validate the numbers According to the paper the number of documents should be 108952 with a vocabulary of 1625124. The difference is because of a difference in preprocessing. However the results obtained are still very similar. End of explanation """ start = datetime.now() u_mass = [] flags = [] for n, topic in enumerate(topics): try: cm = CoherenceModel(topics=topic, corpus=corpus, dictionary=dictionary, coherence='u_mass') u_mass.append(cm.get_coherence()) except KeyError: flags.append(n) end = datetime.now() print "Time taken: %s" % (end - start) """ Explanation: Start off with u_mass coherence measure. End of explanation """ start = datetime.now() c_v = [] for n, topic in enumerate(topics): try: cm = CoherenceModel(topics=topic, texts=texts, dictionary=dictionary, coherence='c_v') c_v.append(cm.get_coherence()) except KeyError: pass end = datetime.now() print "Time taken: %s" % (end - start) """ Explanation: Start c_v coherence measure This is expected to take much more time since c_v uses a sliding window to perform probability estimation and uses the cosine similarity indirect confirmation measure. End of explanation """ start = datetime.now() c_uci = [] flags = [] for n, topic in enumerate(topics): try: cm = CoherenceModel(topics=topic, texts=texts, dictionary=dictionary, coherence='c_uci') c_uci.append(cm.get_coherence()) except KeyError: flags.append(n) end = datetime.now() print "Time taken: %s" % (end - start) start = datetime.now() c_npmi = [] for n, topic in enumerate(topics): print n try: cm = CoherenceModel(topics=topic, texts=texts, dictionary=dictionary, coherence='c_npmi') c_npmi.append(cm.get_coherence()) except KeyError: pass end = datetime.now() print "Time taken: %s" % (end - start) final_scores = [] for n, score in enumerate(human_scores): if n not in flags: final_scores.append(score) """ Explanation: Start c_uci and c_npmi coherence measures They should be taking lesser time than c_v but should have a higher correlation than u_mass End of explanation """ print len(u_mass), len(c_v), len(c_uci), len(c_npmi), len(final_scores) # 1 topic has word(s) that is not in the dictionary. Probably some difference # in preprocessing """ Explanation: One topic encountered a KeyError. This was because of a difference in preprocessing due to which one topic word wasn't found in the dictionary End of explanation """ print pearsonr(u_mass, final_scores)[0] print pearsonr(c_v, final_scores)[0] print pearsonr(c_uci, final_scores)[0] print pearsonr(c_npmi, final_scores)[0] """ Explanation: The values in the paper were: u_mass correlation : 0.093 c_v correlation : 0.548 c_uci correlation : 0.473 c_npmi correlation : 0.438 Our values are also very similar to these values which is good. This validates the correctness of our pipeline. End of explanation """
QuantScientist/Deep-Learning-Boot-Camp
day03/additional materials/5.2 Multi-Modal Networks.ipynb
mit
# let's load MNIST data as we did in the exercise on MNIST with FC Nets # %load ../solutions/sol_52.py """ Explanation: Quick Intro to Keras Functional API Preamble: All models (layers) are callables ```python from keras.layers import Input, Dense from keras.models import Model this returns a tensor inputs = Input(shape=(784,)) a layer instance is callable on a tensor, and returns a tensor x = Dense(64, activation='relu')(inputs) x = Dense(64, activation='relu')(x) predictions = Dense(10, activation='softmax')(x) this creates a model that includes the Input layer and three Dense layers model = Model(input=inputs, output=predictions) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels) # starts training ``` Multi-Input Networks Keras Merge Layer Here's a good use case for the functional API: models with multiple inputs and outputs. The functional API makes it easy to manipulate a large number of intertwined datastreams. Let's consider the following model. ```python from keras.layers import Dense, Input from keras.models import Model from keras.layers.merge import concatenate left_input = Input(shape=(784, ), name='left_input') left_branch = Dense(32, input_dim=784, name='left_branch')(left_input) right_input = Input(shape=(784,), name='right_input') right_branch = Dense(32, input_dim=784, name='right_branch')(right_input) x = concatenate([left_branch, right_branch]) predictions = Dense(10, activation='softmax', name='main_output')(x) model = Model(inputs=[left_input, right_input], outputs=predictions) ``` Resulting Model will look like the following network: <img src="../imgs/multi_input_model.png" /> Such a two-branch model can then be trained via e.g.: python model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit([input_data_1, input_data_2], targets) # we pass one data array per model input Try yourself Step 1: Get Data - MNIST End of explanation """ ## try yourself ## `evaluate` the model on test data """ Explanation: Step 2: Create the Multi-Input Network End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/messy-consortium/cmip6/models/emac-2-53-vol/aerosol.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'messy-consortium', 'emac-2-53-vol', 'aerosol') """ Explanation: ES-DOC CMIP6 Model Properties - Aerosol MIP Era: CMIP6 Institute: MESSY-CONSORTIUM Source ID: EMAC-2-53-VOL Topic: Aerosol Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. Properties: 69 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:10 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Meteorological Forcings 5. Key Properties --&gt; Resolution 6. Key Properties --&gt; Tuning Applied 7. Transport 8. Emissions 9. Concentrations 10. Optical Radiative Properties 11. Optical Radiative Properties --&gt; Absorption 12. Optical Radiative Properties --&gt; Mixtures 13. Optical Radiative Properties --&gt; Impact Of H2o 14. Optical Radiative Properties --&gt; Radiative Scheme 15. Optical Radiative Properties --&gt; Cloud Interactions 16. Model 1. Key Properties Key properties of the aerosol model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of aerosol model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Prognostic variables in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of tracers in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are aerosol calculations generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Physical properties of seawater in ocean 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the time evolution of the prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the aerosol model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.5. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Meteorological Forcings ** 4.1. Variables 3D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Variables 2D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Two dimensionsal forcing variables, e.g. land-sea mask definition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Frequency Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Frequency with which meteological forcings are applied (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Resolution Resolution in the aersosol model grid 5.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for aerosol model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Transport Aerosol transport 7.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of transport in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) """ Explanation: 7.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for aerosol transport modeling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.3. Mass Conservation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to ensure mass conservation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.4. Convention Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Transport by convention End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Emissions Atmospheric aerosol emissions 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of emissions in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to define aerosol species (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the aerosol species are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) """ Explanation: 8.4. Prescribed Climatology Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify the climatology type for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed via a climatology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Other Method Characteristics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Characteristics of the &quot;other method&quot; used for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Concentrations Atmospheric aerosol concentrations 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of concentrations in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as mass mixing ratios. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as AOD plus CCNs. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Optical Radiative Properties Aerosol optical and radiative properties 10.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of optical and radiative properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11. Optical Radiative Properties --&gt; Absorption Absortion properties in aerosol scheme 11.1. Black Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Dust Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.3. Organics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Optical Radiative Properties --&gt; Mixtures ** 12.1. External Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there external mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12.2. Internal Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there internal mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Mixing Rule Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If there is internal mixing with respect to chemical composition then indicate the mixinrg rule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13. Optical Radiative Properties --&gt; Impact Of H2o ** 13.1. Size Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact size? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.2. Internal Mixture Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact internal mixture? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Optical Radiative Properties --&gt; Radiative Scheme Radiative scheme for aerosol 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Shortwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of shortwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Optical Radiative Properties --&gt; Cloud Interactions Aerosol-cloud interactions 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol-cloud interactions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.2. Twomey Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the Twomey effect included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Twomey Minimum Ccn Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the Twomey effect is included, then what is the minimum CCN number? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.4. Drizzle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect drizzle? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Cloud Lifetime Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect cloud lifetime? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.6. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Model Aerosol model 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) """ Explanation: 16.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the Aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other model components coupled to the Aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.4. Gas Phase Precursors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of gas phase aerosol precursors. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.5. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.6. Bulk Scheme Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of species covered by the bulk scheme. End of explanation """
mne-tools/mne-tools.github.io
0.18/_downloads/62c7de5c3dadb3e4bb93d667d4af9010/plot_opm_rest_data.ipynb
bsd-3-clause
# sphinx_gallery_thumbnail_number = 14 # Authors: Denis Engemann <[email protected]> # Luke Bloy <[email protected]> # Eric Larson <[email protected]> # # License: BSD (3-clause) import os.path as op from mne.filter import next_fast_len from mayavi import mlab import mne print(__doc__) data_path = mne.datasets.opm.data_path() subject = 'OPM_sample' subjects_dir = op.join(data_path, 'subjects') bem_dir = op.join(subjects_dir, subject, 'bem') bem_fname = op.join(subjects_dir, subject, 'bem', subject + '-5120-5120-5120-bem-sol.fif') src_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject) vv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif' vv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif' vv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif' opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif' opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif' opm_trans_fname = None opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat') """ Explanation: VectorView and OPM resting state datasets Here we compute the resting state from raw for data recorded using a Neuromag VectorView system and a custom OPM system. The pipeline is meant to mostly follow the Brainstorm [1] OMEGA resting tutorial pipeline &lt;bst_omega_&gt;. The steps we use are: Filtering: downsample heavily. Artifact detection: use SSP for EOG and ECG. Source localization: dSPM, depth weighting, cortically constrained. Frequency: power spectrum density (Welch), 4 sec window, 50% overlap. Standardize: normalize by relative power for each source. :depth: 1 Preprocessing End of explanation """ raws = dict() raw_erms = dict() new_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz) raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming raws['vv'].load_data().resample(new_sfreq) raws['vv'].info['bads'] = ['MEG2233', 'MEG1842'] raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error') raw_erms['vv'].load_data().resample(new_sfreq) raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842'] raws['opm'] = mne.io.read_raw_fif(opm_fname) raws['opm'].load_data().resample(new_sfreq) raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname) raw_erms['opm'].load_data().resample(new_sfreq) # Make sure our assumptions later hold assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq'] """ Explanation: Load data, resample. We will store the raw objects in dicts with entries "vv" and "opm" to simplify housekeeping and simplify looping later. End of explanation """ titles = dict(vv='VectorView', opm='OPM') ssp_ecg, _ = mne.preprocessing.compute_proj_ecg( raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1) raws['vv'].add_proj(ssp_ecg, remove_existing=True) # due to how compute_proj_eog works, it keeps the old projectors, so # the output contains both projector types (and also the original empty-room # projectors) ssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog( raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112') raws['vv'].add_proj(ssp_ecg_eog, remove_existing=True) raw_erms['vv'].add_proj(ssp_ecg_eog) fig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:], info=raws['vv'].info) fig.suptitle(titles['vv']) fig.subplots_adjust(0.05, 0.05, 0.95, 0.85) """ Explanation: Do some minimal artifact rejection just for VectorView data End of explanation """ kinds = ('vv', 'opm') n_fft = next_fast_len(int(round(4 * new_sfreq))) print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq'])) for kind in kinds: fig = raws[kind].plot_psd(n_fft=n_fft, proj=True) fig.suptitle(titles[kind]) fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) """ Explanation: Explore data End of explanation """ src = mne.read_source_spaces(src_fname) bem = mne.read_bem_solution(bem_fname) fwd = dict() trans = dict(vv=vv_trans_fname, opm=opm_trans_fname) # check alignment and generate forward with mne.use_coil_def(opm_coil_def_fname): for kind in kinds: dig = True if kind == 'vv' else False fig = mne.viz.plot_alignment( raws[kind].info, trans=trans[kind], subject=subject, subjects_dir=subjects_dir, dig=dig, coord_frame='mri', surfaces=('head', 'white')) mlab.view(0, 90, focalpoint=(0., 0., 0.), distance=0.6, figure=fig) fwd[kind] = mne.make_forward_solution( raws[kind].info, trans[kind], src, bem, eeg=False, verbose=True) """ Explanation: Alignment and forward End of explanation """ freq_bands = dict( delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45)) topos = dict(vv=dict(), opm=dict()) stcs = dict(vv=dict(), opm=dict()) snr = 3. lambda2 = 1. / snr ** 2 for kind in kinds: noise_cov = mne.compute_raw_covariance(raw_erms[kind]) inverse_operator = mne.minimum_norm.make_inverse_operator( raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True) stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd( raws[kind], inverse_operator, lambda2=lambda2, n_fft=n_fft, dB=False, return_sensor=True, verbose=True) topo_norm = sensor_psd.data.sum(axis=1, keepdims=True) stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs # Normalize each source point by the total power across freqs for band, limits in freq_bands.items(): data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True) topos[kind][band] = mne.EvokedArray( 100 * data / topo_norm, sensor_psd.info) stcs[kind][band] = \ 100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data """ Explanation: Compute and apply inverse to PSD estimated using multitaper + Welch. Group into frequency bands, then normalize each source point and sensor independently. This makes the value of each sensor point and source location in each frequency band the percentage of the PSD accounted for by that band. End of explanation """ def plot_band(kind, band): title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band]) topos[kind][band].plot_topomap( times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno', time_format=title) brain = stcs[kind][band].plot( subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both', time_label=title, title=title, colormap='inferno', clim=dict(kind='percent', lims=(70, 85, 99))) brain.show_view(dict(azimuth=0, elevation=0), roll=0) return fig, brain fig_theta, brain_theta = plot_band('vv', 'theta') """ Explanation: Now we can make some plots of each frequency band. Note that the OPM head coverage is only over right motor cortex, so only localization of beta is likely to be worthwhile. Theta End of explanation """ fig_alpha, brain_alpha = plot_band('vv', 'alpha') """ Explanation: Alpha End of explanation """ fig_beta, brain_beta = plot_band('vv', 'beta') fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta') """ Explanation: Beta Here we also show OPM data, which shows a profile similar to the VectorView data beneath the sensors. End of explanation """ fig_gamma, brain_gamma = plot_band('vv', 'gamma') """ Explanation: Gamma End of explanation """
keras-team/keras-io
examples/nlp/ipynb/text_classification_with_switch_transformer.ipynb
apache-2.0
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers """ Explanation: Text classification with Switch Transformer Author: Khalid Salama<br> Date created: 2020/05/10<br> Last modified: 2021/02/15<br> Description: Implement a Switch Transformer for text classification. Introduction This example demonstrates the implementation of the Switch Transformer model for text classification. The Switch Transformer replaces the feedforward network (FFN) layer in the standard Transformer with a Mixture of Expert (MoE) routing layer, where each expert operates independently on the tokens in the sequence. This allows increasing the model size without increasing the computation needed to process each example. Note that, for training the Switch Transformer efficiently, data and model parallelism need to be applied, so that expert modules can run simultaneously, each on its own accelerator. While the implementation described in the paper uses the TensorFlow Mesh framework for distributed training, this example presents a simple, non-distributed implementation of the Switch Transformer model for demonstration purposes. Setup End of explanation """ vocab_size = 20000 # Only consider the top 20k words num_tokens_per_example = 200 # Only consider the first 200 words of each movie review (x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size) print(len(x_train), "Training sequences") print(len(x_val), "Validation sequences") x_train = keras.preprocessing.sequence.pad_sequences( x_train, maxlen=num_tokens_per_example ) x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=num_tokens_per_example) """ Explanation: Download and prepare dataset End of explanation """ embed_dim = 32 # Embedding size for each token. num_heads = 2 # Number of attention heads ff_dim = 32 # Hidden layer size in feedforward network. num_experts = 10 # Number of experts used in the Switch Transformer. batch_size = 50 # Batch size. learning_rate = 0.001 # Learning rate. dropout_rate = 0.25 # Dropout rate. num_epochs = 3 # Number of epochs. num_tokens_per_batch = ( batch_size * num_tokens_per_example ) # Total number of tokens per batch. print(f"Number of tokens per batch: {num_tokens_per_batch}") """ Explanation: Define hyperparameters End of explanation """ class TokenAndPositionEmbedding(layers.Layer): def __init__(self, maxlen, vocab_size, embed_dim): super(TokenAndPositionEmbedding, self).__init__() self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim) self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim) def call(self, x): maxlen = tf.shape(x)[-1] positions = tf.range(start=0, limit=maxlen, delta=1) positions = self.pos_emb(positions) x = self.token_emb(x) return x + positions """ Explanation: Implement token & position embedding layer It consists of two seperate embedding layers, one for tokens, one for token index (positions). End of explanation """ def create_feedforward_network(ff_dim, name=None): return keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(ff_dim)], name=name ) """ Explanation: Implement the feedforward network This is used as the Mixture of Experts in the Switch Transformer. End of explanation """ def load_balanced_loss(router_probs, expert_mask): # router_probs [tokens_per_batch, num_experts] is the probability assigned for # each expert per token. expert_mask [tokens_per_batch, num_experts] contains # the expert with the highest router probability in one−hot format. num_experts = tf.shape(expert_mask)[-1] # Get the fraction of tokens routed to each expert. # density is a vector of length num experts that sums to 1. density = tf.reduce_mean(expert_mask, axis=0) # Get fraction of probability mass assigned to each expert from the router # across all tokens. density_proxy is a vector of length num experts that sums to 1. density_proxy = tf.reduce_mean(router_probs, axis=0) # Want both vectors to have uniform allocation (1/num experts) across all # num_expert elements. The two vectors will be pushed towards uniform allocation # when the dot product is minimized. loss = tf.reduce_mean(density_proxy * density) * tf.cast( (num_experts ** 2), tf.dtypes.float32 ) return loss """ Explanation: Implement the load-balanced loss This is an auxiliary loss to encourage a balanced load across experts. End of explanation """ class Router(layers.Layer): def __init__(self, num_experts, expert_capacity): self.num_experts = num_experts self.route = layers.Dense(units=num_experts) self.expert_capacity = expert_capacity super(Router, self).__init__() def call(self, inputs, training=False): # inputs shape: [tokens_per_batch, embed_dim] # router_logits shape: [tokens_per_batch, num_experts] router_logits = self.route(inputs) if training: # Add noise for exploration across experts. router_logits += tf.random.uniform( shape=router_logits.shape, minval=0.9, maxval=1.1 ) # Probabilities for each token of what expert it should be sent to. router_probs = keras.activations.softmax(router_logits, axis=-1) # Get the top−1 expert for each token. expert_gate is the top−1 probability # from the router for each token. expert_index is what expert each token # is going to be routed to. expert_gate, expert_index = tf.math.top_k(router_probs, k=1) # expert_mask shape: [tokens_per_batch, num_experts] expert_mask = tf.one_hot(expert_index, depth=self.num_experts) # Compute load balancing loss. aux_loss = load_balanced_loss(router_probs, expert_mask) self.add_loss(aux_loss) # Experts have a fixed capacity, ensure we do not exceed it. Construct # the batch indices, to each expert, with position in expert make sure that # not more that expert capacity examples can be routed to each expert. position_in_expert = tf.cast( tf.math.cumsum(expert_mask, axis=0) * expert_mask, tf.dtypes.int32 ) # Keep only tokens that fit within expert capacity. expert_mask *= tf.cast( tf.math.less( tf.cast(position_in_expert, tf.dtypes.int32), self.expert_capacity ), tf.dtypes.float32, ) expert_mask_flat = tf.reduce_sum(expert_mask, axis=-1) # Mask out the experts that have overflowed the expert capacity. expert_gate *= expert_mask_flat # Combine expert outputs and scaling with router probability. # combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity] combined_tensor = tf.expand_dims( expert_gate * expert_mask_flat * tf.squeeze(tf.one_hot(expert_index, depth=self.num_experts), 1), -1, ) * tf.squeeze(tf.one_hot(position_in_expert, depth=self.expert_capacity), 1) # Create binary dispatch_tensor [tokens_per_batch, num_experts, expert_capacity] # that is 1 if the token gets routed to the corresponding expert. dispatch_tensor = tf.cast(combined_tensor, tf.dtypes.float32) return dispatch_tensor, combined_tensor """ Explanation: Implement the router as a layer End of explanation """ class Switch(layers.Layer): def __init__(self, num_experts, embed_dim, num_tokens_per_batch, capacity_factor=1): self.num_experts = num_experts self.embed_dim = embed_dim self.experts = [ create_feedforward_network(embed_dim) for _ in range(num_experts) ] self.expert_capacity = num_tokens_per_batch // self.num_experts self.router = Router(self.num_experts, self.expert_capacity) super(Switch, self).__init__() def call(self, inputs): batch_size = tf.shape(inputs)[0] num_tokens_per_example = tf.shape(inputs)[1] # inputs shape: [num_tokens_per_batch, embed_dim] inputs = tf.reshape(inputs, [num_tokens_per_batch, self.embed_dim]) # dispatch_tensor shape: [expert_capacity, num_experts, tokens_per_batch] # combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity] dispatch_tensor, combine_tensor = self.router(inputs) # expert_inputs shape: [num_experts, expert_capacity, embed_dim] expert_inputs = tf.einsum("ab,acd->cdb", inputs, dispatch_tensor) expert_inputs = tf.reshape( expert_inputs, [self.num_experts, self.expert_capacity, self.embed_dim] ) # Dispatch to experts expert_input_list = tf.unstack(expert_inputs, axis=0) expert_output_list = [ self.experts[idx](expert_input) for idx, expert_input in enumerate(expert_input_list) ] # expert_outputs shape: [expert_capacity, num_experts, embed_dim] expert_outputs = tf.stack(expert_output_list, axis=1) # expert_outputs_combined shape: [tokens_per_batch, embed_dim] expert_outputs_combined = tf.einsum( "abc,xba->xc", expert_outputs, combine_tensor ) # output shape: [batch_size, num_tokens_per_example, embed_dim] outputs = tf.reshape( expert_outputs_combined, [batch_size, num_tokens_per_example, self.embed_dim], ) return outputs """ Explanation: Implement a Switch layer End of explanation """ class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ffn, dropout_rate=0.1): super(TransformerBlock, self).__init__() self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) # The ffn can be either a standard feedforward network or a switch # layer with a Mixture of Experts. self.ffn = ffn self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(dropout_rate) self.dropout2 = layers.Dropout(dropout_rate) def call(self, inputs, training): attn_output = self.att(inputs, inputs) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output) """ Explanation: Implement a Transformer block layer End of explanation """ def create_classifier(): switch = Switch(num_experts, embed_dim, num_tokens_per_batch) transformer_block = TransformerBlock(ff_dim, num_heads, switch) inputs = layers.Input(shape=(num_tokens_per_example,)) embedding_layer = TokenAndPositionEmbedding( num_tokens_per_example, vocab_size, embed_dim ) x = embedding_layer(inputs) x = transformer_block(x) x = layers.GlobalAveragePooling1D()(x) x = layers.Dropout(dropout_rate)(x) x = layers.Dense(ff_dim, activation="relu")(x) x = layers.Dropout(dropout_rate)(x) outputs = layers.Dense(2, activation="softmax")(x) classifier = keras.Model(inputs=inputs, outputs=outputs) return classifier """ Explanation: Implement the classifier The TransformerBlock layer outputs one vector for each time step of our input sequence. Here, we take the mean across all time steps and use a feedforward network on top of it to classify text. End of explanation """ def run_experiment(classifier): classifier.compile( optimizer=keras.optimizers.Adam(learning_rate), loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) history = classifier.fit( x_train, y_train, batch_size=batch_size, epochs=num_epochs, validation_data=(x_val, y_val), ) return history classifier = create_classifier() run_experiment(classifier) """ Explanation: Train and evaluate the model End of explanation """
fwenzel/github-org-scripts
User Search.ipynb
bsd-3-clause
print(github3.__version__) print(github3.__file__) """ Explanation: User Search For use to: 1. Try to find an account based on random knowledge 2. List all orgs they belong to (from a subset) - You will need org owner permissions to perform these searches Boiler plate Skip/hide this. Common usage is below. End of explanation """ # set values here - you can also override below # get api key from environment, fall back to file import os api_key = os.environ.get("GITHUB_PAT", "") if not api_key: api_key = open(".credentials", "r").readlines()[1].strip() if not api_key: raise OSError("no GitHub PAT found") orgs_to_check = [ "mozilla" , "mozilla-services" , "mozilla-l10n" , "mozilla-mobile" , "mozilla-partners" , "taskcluster" , "mozilla-conduit" , "mozilla-lockwise" , "mozilla-platform-ops" , "nss-dev" , "mozilla-releng" , "mozilla-private" , "mozilla-frontend-infra" , "mozilla-bteam" , "iodide-project" , "mozilla-games" , "mozillaReality" , "mozilla-standards" , "mozilla-tw" , "mozilla-extensions" ] import github3 def print_limits(): print("reset at: {}, remaining {}".format(gh.rate_limit()["rate"]["reset"], gh.rate_limit()["rate"]["remaining"])) try: gh = github3.login(token=api_key) print("You are authenticated as {}".format(gh.me().login)) except ConnectionError: print_limits() try: from functools import lru_cache except ImportError: from backports.functools_lru_cache import lru_cache """ Explanation: If you see this text, you may want to enable the nbextension "Collapsable Headings", so you can hide this in common usage. End of explanation """ @lru_cache(maxsize=32) def _search_for_user(user): l = list(gh.search_users(query="type:user "+user)) print("found {} potentials for {}".format(len(l), user)) return l def get_user_counts(user): l = _search_for_user(user) for u in l: yield u displayed_users = set() # cache to avoid duplicate output def show_users(user_list, search_term): global displayed_users unique_users = set(user_list) count = len(unique_users) if count >10: # Even if there are too many, we still want to check the 'root' term print("... too many to be useful, still trying '{}' ...".format(search_term)) displayed_users.add(search_term) else: for u in [x for x in unique_users if not x in displayed_users]: displayed_users.add(u) user = u.user.refresh() print(user.login, user.name, user.location, user.email) if 0 < count <= 10: return [u.login for u in unique_users] else: return [] def gather_possibles(seed): found = set() maybes = show_users(get_user_counts(seed), seed) found.update(maybes) # if it was an email addr, try again with the mailbox name if '@' in seed: seed2 = seed.split('@')[0] maybes = show_users(get_user_counts(seed2), seed2) found.update(maybes) return found class OutsideCollaboratorIterator(github3.structs.GitHubIterator): def __init__(self, org): super(OutsideCollaboratorIterator, self).__init__( count=-1, #get all url=org.url + "/outside_collaborators", cls=github3.users.ShortUser, session=org.session, ) @lru_cache(maxsize=32) def get_collaborators(org): collabs = [x.login.lower() for x in OutsideCollaboratorIterator(org)] return collabs def is_collaborator(org, login): return bool(login.lower() in get_collaborators(org)) # provide same interface for members -- but the iterator is free :D @lru_cache(maxsize=32) def get_members(org): collabs = [x.login.lower() for x in org.members()] return collabs def is_member(org, login): return bool(login.lower() in get_members(org)) def check_login_perms(logins): any_perms = False for login in logins: is_collab = False for org in orgs_to_check: o = gh.organization(org) if is_member(o, login): url = "https://github.com/orgs/{}/people?utf8=%E2%9C%93&query={}".format(o.login, login) print("{} has {} as a member: {}".format(o.login, login, url)) is_collab = True if is_collaborator(o, login): url = "https://github.com/orgs/{}/outside-collaborators?utf8=%E2%9C%93&query={}".format(o.login, login) print("{} has {} as a collaborator: {}".format(o.login, login, url)) is_collab = True if is_collab: any_perms = True else: print("No permissions found for {}".format(login)) return any_perms import re import os re_flags = re.MULTILINE | re.IGNORECASE byte_wrapper = re.compile(r"""^b'(?P<real_text>.*)'""") def process_from_email(email_body): # get rid of white space email_body = os.linesep.join( [s.strip() for s in email_body.splitlines() if s.strip()] ) user = set() # Extract data from internal email format match = re.search(r'^Full Name: (?P<full_name>\S.*)$', email_body, re_flags) if match: # add base and some variations full_name = match.group("full_name") user.add(full_name) # remove spaces user.add(full_name.replace(' ', '')) # reversed no spaces user.add(''.join(full_name.split()[::-1])) match = re.search(r'^Email: (?P<primary_email>.*)$', email_body, re_flags) primary_email = match.group("primary_email") if match else None user.add(primary_email) print("Check these URLs for Heroku activity:") print(" Mozillians: https://mozillians.org/en-US/search/?q={}".format(primary_email.replace('@', '%40'))) print(" Heroku: https://dashboard.heroku.com/teams/mozillacorporation/access?filter={}".format(primary_email.replace('@', '%40'))) print(email_body) match = re.search(r'^Github Profile: (?P<github_profile>.*)$', email_body, re_flags) declared_github = match.group("github_profile") if match else None user.add(declared_github) match = re.search(r'^Zimbra Alias: (?P<other_email>.*)$', email_body, re_flags) user.add(match.group("other_email") if match else None) # we consider each token in the IM line as a possible GitHub login match = re.search(r'^IM:\s*(.*)$', email_body, re_flags) if match: im_line = match.groups()[0] matches = re.finditer(r'\W*((\w+)(?:\s+\w+)*)', im_line) user.update([x.group(1) for x in matches] if matches else None) match = re.search(r'^Bugzilla Email: (?P<bz_email>.*)$', email_body, re_flags) user.add(match.group("bz_email") if match else None) # grab the department name, for a heuristic on whether we expect to find perms expect_github_login = False match = re.search(r'^\s*Dept Name: (?P<dept_name>\S.*)$', email_body, re_flags) if match: department_name = match.groups()[0].lower() dept_keys_infering_github = ["firefox", "engineering", "qa", "operations"] for key in dept_keys_infering_github: if key in department_name: expect_github_login = True break # clean up some noise, case insensitively # the tokens to ignore are added based on discovery, # they tend to cause the searches to get rate limited. user = {x.lower() for x in user if x and (len(x) > 2)} user = user - {None, "irc", "slack", "skype", "b", 'hotmail', 'mozilla', 'ro', 'com', 'softvision', 'mail', 'twitter', 'blog', 'https', 'jabber', 'net', 'github', 'gmail', 'facebook', 'guy', 'pdx', 'yahoo', 'aim', 'whatsapp' } global displayed_users displayed_users = set() try: print("Trying '{}'".format("', '".join(user))) guesses = set() for term in user: # some text strings are displayed as "b'<real_text>'" # strip to just "<real_text>" match = byte_wrapper.search(term) if match: term = match.group('real_text') possibles = gather_possibles(term) guesses.update({x.lower() for x in possibles}) # include declared_github if it exists if declared_github: guesses.add(declared_github.lower()) print("Checking logins {}".format(guesses)) found_perms = False if len(guesses): found_perms = check_login_perms(guesses) elif expect_github_login: print("\nWARNING: expected GitHub login for dept '{}'".format(department_name)) print("Finished all reporting.") if declared_github and not found_perms: # print some text to copy/paste into email print(", even for declared login '{}'.".format(declared_github)) if expect_github_login and not found_perms: print("WARNING: expected GitHub permissions for dept '{}'".format(department_name)) except github3.exceptions.ForbiddenError as e: print("API limit reached, try again in 5 minutes.\n") print(str(e)) print(gh.rate_limit()) """ Explanation: From here on, use gh to access all data End of explanation """ process_from_email(r""" """) """ Explanation: Start of common usage Currently, there a two common use cases: - processing an offboarding email, and - adhoc lookup of GitHub login For anything else, you're on your own! All usage requires the following setup: 1. Fill in a way to load your PAT token in the first code cell 2. Fill in the list of orgs to check in the second code cell Process offboarding email Usage steps - for each user: 1. Copy entire text of email 2. Paste between the """ marks in the cell below. 3. Execute that cell The cell below should have the following text: python process_from_email(r""" # paste email body here """) Or if you're not processing an email, fake the two fields 'email:' and 'im:': ```python process_from_email(r""" comma separated list im: various possible names comma Only 1 email email: [email protected] """) ``` End of explanation """ check_login_perms([ ]) """ Explanation: Adhoc Lookup Fill in list of the desired logins in the cell below End of explanation """
mne-tools/mne-tools.github.io
0.16/_downloads/plot_background_statistics.ipynb
bsd-3-clause
# Authors: Eric Larson <[email protected]> # License: BSD (3-clause) from functools import partial import numpy as np from scipy import stats import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # noqa, analysis:ignore import mne from mne.stats import (ttest_1samp_no_p, bonferroni_correction, fdr_correction, permutation_t_test, permutation_cluster_1samp_test) print(__doc__) """ Explanation: Statistical inference Here we will briefly cover multiple concepts of inferential statistics in an introductory manner, and demonstrate how to use some MNE statistical functions. :depth: 3 End of explanation """ width = 40 n_subjects = 10 signal_mean = 100 signal_sd = 100 noise_sd = 0.01 gaussian_sd = 5 sigma = 1e-3 # sigma for the "hat" method threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1) n_permutations = 'all' # run an exact test n_src = width * width # For each "subject", make a smoothed noisy signal with a centered peak rng = np.random.RandomState(42) X = noise_sd * rng.randn(n_subjects, width, width) # Add a signal at the dead center X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd # Spatially smooth with a 2D Gaussian kernel size = width // 2 - 1 gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2))) for si in range(X.shape[0]): for ri in range(X.shape[1]): X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same') for ci in range(X.shape[2]): X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same') """ Explanation: Hypothesis testing Null hypothesis ^^^^^^^^^^^^^^^ From Wikipedia &lt;https://en.wikipedia.org/wiki/Null_hypothesis&gt;_: In inferential statistics, a general statement or default position that there is no relationship between two measured phenomena, or no association among groups. We typically want to reject a null hypothesis with some probability level (e.g., p < 0.05). To think about what this means, let's follow the illustrative example from [1]_ and construct a toy dataset consisting of a 40 x 40 square with a "signal" present in the center (at pixel [20, 20]) with white noise added and a 5-pixel-SD normal smoothing kernel applied. End of explanation """ titles = ['t-statistic'] out = stats.ttest_1samp(X, 0, axis=0) ts = [out[0]] ps = [out[1]] mccs = [False] # these are not multiple-comparisons corrected # let's make a plotting function def plot_t_p(t, p, title, mcc, axes=None): if axes is None: fig = plt.figure(figsize=(6, 3)) axes = [fig.add_subplot(121, projection='3d'), fig.add_subplot(122)] show = True else: fig = axes[0].figure show = False p_lims = [0.05, 0.001] t_lims = -stats.distributions.t.ppf(p_lims, n_subjects - 1) p_lims = [-np.log10(0.05), -np.log10(0.001)] # t plot x, y = np.mgrid[0:width, 0:width] surf = axes[0].plot_surface(x, y, np.reshape(t, (width, width)), rstride=1, cstride=1, linewidth=0, vmin=t_lims[0], vmax=t_lims[1], cmap='viridis') axes[0].set(xticks=[], yticks=[], zticks=[], xlim=[0, width - 1], ylim=[0, width - 1]) axes[0].view_init(30, 15) cbar = plt.colorbar(ax=axes[0], shrink=0.75, orientation='horizontal', fraction=0.1, pad=0.025, mappable=surf) cbar.set_ticks(t_lims) cbar.set_ticklabels(['%0.1f' % t_lim for t_lim in t_lims]) cbar.set_label('t-value') cbar.ax.get_xaxis().set_label_coords(0.5, -0.3) if not show: axes[0].set(title=title) if mcc: axes[0].title.set_weight('bold') # p plot use_p = -np.log10(np.reshape(np.maximum(p, 1e-5), (width, width))) img = axes[1].imshow(use_p, cmap='inferno', vmin=p_lims[0], vmax=p_lims[1], interpolation='nearest') axes[1].set(xticks=[], yticks=[]) cbar = plt.colorbar(ax=axes[1], shrink=0.75, orientation='horizontal', fraction=0.1, pad=0.025, mappable=img) cbar.set_ticks(p_lims) cbar.set_ticklabels(['%0.1f' % p_lim for p_lim in p_lims]) cbar.set_label('$-\log_{10}(p)$') cbar.ax.get_xaxis().set_label_coords(0.5, -0.3) if show: text = fig.suptitle(title) if mcc: text.set_weight('bold') plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0) mne.viz.utils.plt_show() plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: In this case, a null hypothesis we could test for each voxel is: There is no difference between the mean value and zero ($H_0: \mu = 0$). The alternative hypothesis, then, is that the voxel has a non-zero mean. This is a two-tailed test because the mean could be less than or greater than zero (whereas a one-tailed test would test only one of these possibilities, i.e. $H_0: \mu \geq 0$ or $H_0: \mu \leq 0$). <div class="alert alert-info"><h4>Note</h4><p>Here we will refer to each spatial location as a "voxel". In general, though, it could be any sort of data value (e.g., cortical vertex at a specific time, pixel in a time-frequency decomposition, etc.).</p></div> Parametric tests ^^^^^^^^^^^^^^^^ Let's start with a 1-sample t-test, which is a standard test for differences in paired sample means. This test is parametric, as it assumes that the underlying sample distribution is Gaussian, and is only valid in this case. (This happens to be satisfied by our toy dataset, but is not always satisfied for neuroimaging data.) In the context of our toy dataset, which has many voxels, applying the 1-sample t-test is called a mass-univariate approach as it treats each voxel independently. End of explanation """ ts.append(ttest_1samp_no_p(X, sigma=sigma)) ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2) titles.append('$\mathrm{t_{hat}}$') mccs.append(False) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: "hat" variance adjustment ~~~~~~~~~~~~~~~~~~~~~~~~~ The "hat" technique regularizes the variance values used in the t-test calculation [1]_ to compensate for implausibly small variances. End of explanation """ # Here we have to do a bit of gymnastics to get our function to do # a permutation test without correcting for multiple comparisons: # Let's flatten the array for simplicity X.shape = (n_subjects, n_src) titles.append('Permutation') ts.append(np.zeros(width * width)) ps.append(np.zeros(width * width)) mccs.append(False) for ii in range(n_src): ts[-1][ii], ps[-1][ii] = \ permutation_t_test(X[:, [ii]], verbose=True if ii == 0 else False)[:2] plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: Non-parametric tests ^^^^^^^^^^^^^^^^^^^^ Instead of assuming an underlying Gaussian distribution, we could instead use a non-parametric resampling method. Under the null hypothesis, we have the principle of exchangeability, which means that, if the null is true, we should be able to exchange conditions and not change the distribution of the test statistic. In the case of a 2-tailed paired t-test against 0 (or between two conditions where you have already subtracted them), exchangeability means that we can flip the signs of our data. Therefore, we can construct the null distribution values by taking random subsets of samples (subjects), flipping the sign of their data, and recording the resulting statistic absolute value. The absolute value of the statistic evaluated on the veridical data can then be compared to this distribution, and the p-value is simply the proportion of null distribution values that were smaller. <div class="alert alert-info"><h4>Note</h4><p>In the case where ``n_permutations`` is large enough (or "all") so that the complete set of unique resampling exchanges can be done (which is $2^{N_{samp}}-1=1023$ for the one-tailed paired test here, not counting the veridical distribution), instead of randomly exchanging conditions the null is formed from using all possible exchanges. This is known as a permutation test (or exact test) form of a non-parametric resampling test.</p></div> End of explanation """ N = np.arange(1, 80) alpha = 0.05 p_type_I = 1 - (1 - alpha) ** N fig, ax = plt.subplots(figsize=(4, 3)) ax.scatter(N, p_type_I, 3) ax.set(xlim=N[[0, -1]], ylim=[0, 1], xlabel='$N_{\mathrm{test}}$', ylabel=u'Probability of ≥ 1\ntype I error') ax.grid(True) fig.tight_layout() mne.viz.utils.plt_show() """ Explanation: Multiple comparisons So far, we have done no correction for multiple comparisons. This is potentially problematic for these data because there are $40 \times 40 = 1600$ tests being performed. If we just use a threshold p &lt; 0.05 for all of our tests, we would expect many voxels to be declared significant even if there were no true effect. In other words, we would make many type I errors (adapted from here &lt;https://en.wikipedia.org/wiki/Type_I_and_type_II_errors&gt;_): .. rst-class:: skinnytable +----------+--------+------------------+------------------+ | | Null hypothesis | | +------------------+------------------+ | | True | False | +==========+========+==================+==================+ | | | Type I error | Correct | | | Yes | False positive | True positive | + Reject +--------+------------------+------------------+ | | | Correct | Type II error | | | No | True Negative | False negative | +----------+--------+------------------+------------------+ To see why, consider a standard $\alpha = 0.05$. For a single test, our probability of making a type I error is 0.05. The probability of making at least one type I error in $N_{\mathrm{test}}$ independent tests is then given by $1 - (1 - \alpha)^{N_{\mathrm{test}}}$: End of explanation """ titles.append('Bonferroni') ts.append(ts[-1]) ps.append(bonferroni_correction(ps[0])[1]) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: To combat this problem, multiple methods exist. Typically these provide control over either the: Familywise error rate (FWER) &lt;fwer&gt;_ The probability of making one or more type I errors: .. math:: \mathrm{P}(N_{\mathrm{type\ I}} >= 1 | H_0) False discovery rate (FDR) &lt;fdr&gt;_ The expected proportion of rejected null hypotheses that are actually true: .. math:: \mathrm{E}(N_{\mathrm{type\ I}} / N_{\mathrm{reject}} | N_{\mathrm{reject}} > 0) \mathrm{P}(N_{\mathrm{reject}} > 0 | H_0) We cover some techniques that control FWER and FDR below. Bonferroni correction ^^^^^^^^^^^^^^^^^^^^^ Perhaps the simplest way to deal with multiple comparisons, Bonferroni correction &lt;https://en.wikipedia.org/wiki/Bonferroni_correction&gt;_ conservatively multiplies the p-values by the number of comparisons to control the FWER. End of explanation """ titles.append('FDR') ts.append(ts[-1]) ps.append(fdr_correction(ps[0])[1]) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: False discovery rate (FDR) correction ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Typically FDR is performed with the Benjamini/Hochberg procedure, which is less restrictive than Bonferroni correction for large numbers of comparisons (fewer type II errors) but provides less strict control of errors (more type I errors). End of explanation """ titles.append('$\mathbf{Perm_{max}}$') out = permutation_t_test(X)[:2] ts.append(out[0]) ps.append(out[1]) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: Non-parametric resampling test with a maximum statistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Non-parametric resampling tests can also be used to correct for multiple comparisons. In its simplest form, we again do permutations using exchangeability under the null hypothesis, but this time we take the maximum statistic across all tests in each permutation to form the null distribution. The p-value for each voxel from the veridical data is then given by the proportion of null distribtion values that were smaller. This method has two important features: It controls FWER. It is non-parametric. Even though our initial test statistic (here a 1-sample t-test) for clustering is parametric, the null distribution for the null hypothesis rejection (cluster size distribution is indistinguishable from zero) is obtained by permutations. This means that it makes no assumptions of Gaussianity (which do hold for this example but do not in general for some types of processed neuroimaging data). End of explanation """ from sklearn.feature_extraction.image import grid_to_graph # noqa: E402 mini_connectivity = grid_to_graph(3, 3).toarray() assert mini_connectivity.shape == (9, 9) print(mini_connectivity[0]) del mini_connectivity """ Explanation: Clustering ^^^^^^^^^^ Each of the aforementioned multiple comparisons corrections have the disadvantage of not fully incorporating the correlation structure of the data, i.e., that points close to one another (e.g., in space or time) tend to be correlated. However, by defining the connectivity/adjacency/neighbor structure in our data, we can use clustering to compensate. To use this, we need to rethink our null hypothesis. Instead of thinking about a null hypothesis about means per voxel (with one independent test per voxel), we consider a null hypothesis about sizes of clusters in our data, which could be stated like: The distribution of spatial cluster sizes observed in two experimental conditions are drawn from the same probability distribution. Here we only have a single condition and we contrast to zero, which can be thought of as: The distribution of spatial cluster sizes is independent of the sign of the data. In this case, we again do permutations with a maximum statistic, but, under each permutation, we: Threshold the computed statistic with some initial threshold value. Cluster points that exceed this threshold (with the same sign) based on adjacency. Record the size of each cluster (measured, e.g., by a simple voxel count, or by the sum of voxel t-values within the cluster). After doing these permutations, the cluster sizes in our veridical data are compared to this null distribution. The p-value associated with each cluster is again given by the proportion of smaller null distribution values. This can then be subjected to a standard p-value threshold (e.g., p &lt; 0.05) to reject the null hypothesis (i.e., find an effect of interest). This reframing to consider cluster sizes rather than individual means maintains the advantages of the standard non-parametric permutation test -- namely controlling FWER and making no assumptions of parametric data distribution. Cricitally, though, it also accounts for the correlation structure in the data -- which in this toy case is spatial but in general can be multidimensional (e.g., spatio-temporal) -- because the null distribution will be derived from data in a way that preserves these correlations. However, there is a drawback. If a cluster significantly deviates from the null, no further inference on the cluster (e.g., peak location) can be made, as the entire cluster as a whole is used to reject the null. Moreover, because the test statistic concerns the full data, the null hypothesis (and our rejection of it) refers to the structure of the full data. For more information, see also the excellent FieldTrip cluster interpretation tutorial &lt;ft_cluster&gt;_. Defining the connectivity/neighbor/adjacency matrix ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First we need to define our connectivity/neighbor/adjacency matrix. This is a square array (or sparse matrix) of shape (n_src, n_src) that contains zeros and ones to define which spatial points are connected, i.e., which voxels are adjacent to each other. In our case this is quite simple, as our data are aligned on a rectangular grid. Let's pretend that our data were smaller -- a 3x3 grid. Thinking about each voxel as being connected to the other voxels it touches, we would need a 9x9 connectivity matrix. The first row should contain the elements in the .ravel()'ed data that it touches. Since it touches the second element in the first row and the first element in the second row (and is also a neighbor to itself), this would be:: [1, 1, 0, 1, 0, 0, 0, 0, 0] :mod:sklearn.feature_extraction provides a convenient function for this: End of explanation """ # Reshape data to what is equivalent to (n_samples, n_space, n_time) titles.append('Clustering') X.shape = (n_subjects, width, width) t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test( X, n_jobs=1, threshold=threshold, connectivity=None, tail=1, n_permutations=n_permutations) # Put the cluster data in a viewable format p_clust = np.ones((width, width)) for cl, p in zip(clusters, p_values): p_clust[cl] = p ts.append(t_clust) ps.append(p_clust) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: In general the connectivity between voxels can be more complex, such as those between sensors in 3D space, or time-varying activation at brain vertices on a cortical surface. MNE provides several convenience functions for computing connectivity/neighbor/adjacency matrices, see the Statistics API &lt;api_reference_statistics&gt;. Standard clustering ~~~~~~~~~~~~~~~~~~~ Here, since our data are on a grid, we can use connectivity=None to trigger optimized grid-based code, and run the clustering algorithm. End of explanation """ titles.append(r'$\mathbf{C_{hat}}$') stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma) t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test( X, n_jobs=1, threshold=threshold, connectivity=None, tail=1, n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None) p_hat = np.ones((width, width)) for cl, p in zip(clusters, p_values): p_hat[cl] = p ts.append(t_hat) ps.append(p_hat) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: "hat" variance adjustment ~~~~~~~~~~~~~~~~~~~~~~~~~ This method can also be used in this context to correct for small variances [1]_: End of explanation """ titles.append(r'$\mathbf{C_{TFCE}}$') threshold_tfce = dict(start=0, step=0.2) t_tfce, _, p_tfce, H0 = permutation_cluster_1samp_test( X, n_jobs=1, threshold=threshold_tfce, connectivity=None, tail=1, n_permutations=n_permutations) ts.append(t_tfce) ps.append(p_tfce) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: Threshold-free cluster enhancement (TFCE) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TFCE eliminates the free parameter initial threshold value that determines which points are included in clustering by approximating a continuous integration across possible threshold values with a standard Riemann sum &lt;https://en.wikipedia.org/wiki/Riemann_sum&gt; [2]. This requires giving a starting threshold 'start' and a step size 'step', which in MNE is supplied as a dict. The smaller the 'step' and closer to 0 the 'start' value, the better the approximation, but the longer it takes). A significant advantage of TFCE is that, rather than modifying the statistical null hypothesis under test (from one about individual voxels to one about the distribution of clusters in the data), it modifies the data under test while still controlling for multiple comparisons. The statistical test is then done at the level of individual voxels rather than clusters. This allows for evaluation of each point independently for significance rather than only as cluster groups. End of explanation """ titles.append(r'$\mathbf{C_{hat,TFCE}}$') t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test( X, n_jobs=1, threshold=threshold_tfce, connectivity=None, tail=1, n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None) ts.append(t_tfce_hat) ps.append(p_tfce_hat) mccs.append(True) plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) """ Explanation: We can also combine TFCE and the "hat" correction: End of explanation """ fig = plt.figure(facecolor='w', figsize=(14, 3)) assert len(ts) == len(titles) == len(ps) for ii in range(len(ts)): ax = [fig.add_subplot(2, 10, ii + 1, projection='3d'), fig.add_subplot(2, 10, 11 + ii)] plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax) fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1) plt.show() """ Explanation: Visualize and compare methods Let's take a look at these statistics. The top row shows each test statistic, and the bottom shows p-values for various statistical tests, with the ones with proper control over FWER or FDR with bold titles. End of explanation """
tpin3694/tpin3694.github.io
python/simple_unit_test.ipynb
mit
import unittest import sys """ Explanation: Title: Simple Unit Test Slug: simple_unit_test Summary: A simple unit test in Python. Date: 2016-01-23 12:00 Category: Python Tags: Testing Authors: Chris Albon Interesting in learning more? Here are some good books on unit testing in Python: Python Testing: Beginner's Guide and Python Testing Cookbook. Preliminaries End of explanation """ def multiply(x, y): return x * y """ Explanation: Create Function To Be Tested End of explanation """ # Create a test case class TestMultiply(unittest.TestCase): # Create the unit test def test_multiply_two_integers_together(self): # Test if 4 equals the output of multiply(2,2) self.assertEqual(4, multiply(2,2)) """ Explanation: Create Test Note: It is standard practice to name a unit test test_ + &lt;function being tested&gt;. This naming standard allows for automated test using some libraries. End of explanation """ # Run the unit test (and don't shut down the Jupyter Notebook) unittest.main(argv=['ignored', '-v'], exit=False) """ Explanation: Run Test End of explanation """
PrairieLearn/PrairieLearn
exampleCourse/questions/demo/annotated/MarkovChainGroupActivity/MarkovChains-PageRank/serverFilesQuestion/Markov-Chains-3.ipynb
agpl-3.0
A = np.array([[0, 2, 0, 5], [1, 0, 5, 6], [2, 4, 0, 3], [1, 0, 10, 2]]) labels = ['Google', 'Twitter', 'Facebook', 'Reddit'] graph.draw_matrix(A, labels) """ Explanation: Google PageRank Google's dominance as a search engine came from their PageRank algorithm, named after co-founder Larry Page. By assigning each page a relative rank, web searches can give more relevant results. The idea here is to model a user surfing different web pages by randomly clicking on links. Pages with more incoming links (they are cited more often) are presumed to be higher quality and therefore get a higher PageRank value. We can model this as a graph, where each webpage has a chance of moving to another one: <img src="PageRank Example.svg" width="300px"></img> This probability of moving from one page to another is estimated from the number of outgoing links, more formally the probability of moving from page $j$ to page $i$ is given by: $$ p\left(i \vert j\right) = \frac{\text{number of links from }j\text{ to }i}{\text{total links going out of } j}$$ For example, if Google has 4 outgoing links: 1 to Twitter 1 to Reddit 2 to Facebook Then it would have a $0\%$ probability of linking to itself, $25\%$ to Twitter, $25\%$ to Reddit, and $50\%$ to Facebook. Let's first start with a small example. You are given an adjacency matrix ${\bf A}$ such that each entry $A_{ij}$ contains the number of links going into page $i$ from $j$. End of explanation """ #grade_clear #clear n = len(A) M2 = np.zeros((n, n)) # Convert entries in M2 below #clear for i in range(len(A[0])): M2[:,i] = A[:,i]/ la.norm(A[:,i],1) """ Explanation: Check your answers! First, convert this to a Markov matrix M2 by converting each entry to a probability. Recall you can retrieve the column of a NumPy matrix with the syntax A[:,i]. End of explanation """ #clear x = np.random.rand(4) x /= la.norm(x, 1) eigvec = hf.power_iteration(M2, x) """ Explanation: Try this! Now, use power iteration as you have done before to find the steady-state of the Markov matrix. You can use any starting vector you like, as long as it is normalized. This steady-state is the relative PageRank of each webpage. Store your result in eigvec. End of explanation """ print(eigvec) """ Explanation: Now you can print it out: End of explanation """ print(labels[np.argmax(eigvec)]) """ Explanation: What is the highest ranking site here? You can use labels to get a name from a node index. End of explanation """ num_pages = 20 # Array with the edges edges = np.loadtxt("pagerank_large.txt").astype(np.int64) # these are random, don't look too deeply into this... labels = ['Google', 'Twitter', 'Facebook', 'Reddit', 'WordPress', 'ArXiv', 'Amazon', 'UIUC', 'Wikipedia', 'IMDb', 'GitHub', 'Yahoo!', 'Flickr', 'Apple', 'Baidu', 'VKontakte', 'Mozilla', 'LinkedIn', 'YouTube', 'NASA'] """ Explanation: Larger Example Lets try a larger example with more websites. We will have a slightly different format to represent our links. End of explanation """ edges.shape """ Explanation: The link information is given in the edges 2d numpy array, that has shape (total number of links, 2) End of explanation """ A2 = np.zeros((num_pages, num_pages)) for edge in edges: A2[edge[1], edge[0]] = 1 A2 """ Explanation: Each row of edges has two entries, [a,b], representing an edge (outgoing link) from website with index a to website with index b. For example, if the row is [1, 8], then there is an edge/link going out of node 1 into node 8. From the edges array, first create the adjacency matrix such that ${\bf A}_{i,j}$ is equal to $1$ if webpage $i$ can be reached from webpage $j$, and $0$ otherwise. You can assume that there are $n=20$ websites in total, and thus you will have a $20\times 20$ adjacency matrix. End of explanation """ graph.draw_matrix(A2.T, labels, show_weights=False) """ Explanation: We can draw the adjacency matrix for a visual depiction of what is going on: End of explanation """ M = A2 / la.norm(A2, 1, axis=0) M """ Explanation: Try this! Now, create the Markov matrix ${\bf M}$ from the adjacency matrix as you have done before. Recall that in order to satisfy the Markov property that the column sum is equal to 1, we need to normalize columns by dividing its values by the column sum. End of explanation """ #grade_clear #clear M3 = A2.copy() #clear M3[:,la.norm(A2, 1, axis=0) == 0] = 1/num_pages M3 /= la.norm(M3, 1, axis=0) M3 """ Explanation: What do you observe? Looks like you may have tried to compute divisions by zero! What happens when there is no outgoing link from a website? The column corresponding to that website will only have zero entries, and if we apply the above normalization, we will have a division by zero. How would you instead model the behavior of a web-surfer that is browsing a website without outgoing links? Discuss this with your group. Come up with ideas first, before continuing with the rest of the notebook. The PageRank algorithm proposes the following: once the web surfer reaches a page without outgoing links, we can assume that he will probably not stay on that webpage forever. Instead it assumes that the web surfer will move to any of the webpages with equal probability $1/n$, where $n$ (defined as num_pages) is the number of pages. Check your answers! Using the matrix adjacency matrix ${\bf A}_2$, construct the Markov matrix ${\bf M}_3$ following this proposed model and store your result in variable M3. End of explanation """ #clear x = np.ones(num_pages) / num_pages pr = hf.power_iteration(M3, x) print(pr) """ Explanation: Try this! Use your defined function power_iteration to find the PageRank steady-state vector and save this as pr. End of explanation """ names = np.array(labels) names[np.argsort(pr)[::-1]] """ Explanation: You can see the ranking of all the websites using the PageRank algorithm: End of explanation """
miykael/nipype_tutorial
notebooks/basic_error_and_crashes.ipynb
bsd-3-clause
%%bash rm $(pwd)/crash-* """ Explanation: Errors and Crashes Probably the most important chapter in this section is about how to handle error and crashes. Because at the beginning you will run into a few. For example: You specified filenames or paths that don't exist. You try to give an interface a string as input, where a float value is expected or you try to specify a parameter that doesn't exist. Be sure to use the right input type and input name. You wanted to give a list of inputs [func1.nii, func2.nii, func3.nii] to a node that only expects one input file. MapNode is your solution. You wanted to run SPM's motion correction on compressed NIfTI files, i.e. *.nii.gz? SPM cannot handle that. Nipype's Gunzip interface can help. You haven't set up all necessary environment variables. Nipype, for example, doesn't find your MATLAB or SPM version. You forget to specify a mandatory input field. You try to connect a node to an input field that another node is already connected to. Important note about crashfiles. Crashfiles are only created when you run a workflow, not during building a workflow. If you have a typo in a folder path, because they didn't happen during runtime, but still during workflow building. We will start by removing old crashfiles: End of explanation """ from nipype import SelectFiles, Node, Workflow from os.path import abspath as opap from nipype.interfaces.fsl import MCFLIRT, IsotropicSmooth # Create SelectFiles node templates={'func': '{subject_id}/ses-test/func/{subject_id}_ses-test_task-fingerfootlips_bold.nii.gz'} sf = Node(SelectFiles(templates), name='selectfiles') sf.inputs.base_directory = opap('/data/ds000114') sf.inputs.subject_id = 'sub-11' # Create Motion Correction Node mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True), name='mcflirt') # Create Smoothing node smooth = Node(IsotropicSmooth(fwhm=4), name='smooth') # Create a preprocessing workflow wf = Workflow(name="preprocWF") wf.base_dir = 'working_dir' # Connect the three nodes to each other wf.connect([(sf, mcflirt, [("func", "in_file")]), (mcflirt, smooth, [("out_file", "in_file")])]) # Let's run the workflow try: wf.run() except(RuntimeError) as err: print("RuntimeError:", err) else: raise """ Explanation: Example Crash 1: File doesn't exist When creating a new workflow, very often the initial errors are OSError, meaning Nipype cannot find the right files. For example, let's try to run a workflow on sub-11, that in our dataset doesn't exist. Creating the crash End of explanation """ !nipypecli crash $(pwd)/crash-*selectfiles-*.pklz """ Explanation: Investigating the crash Hidden, in the log file you can find the relevant information: OSError: No files were found matching func template: /data/ds000114/sub-11/ses-test/func/sub-11_ses-test_task-fingerfootlips_bold.nii.gz Interface SelectFiles failed to run. 170904-05:48:13,727 workflow INFO: *********************************** 170904-05:48:13,728 workflow ERROR: could not run node: preprocWF.selectfiles 170904-05:48:13,730 workflow INFO: crashfile: /repos/nipype_tutorial/notebooks/crash-20170904-054813-neuro-selectfiles-15f5400a-452e-4e0c-ae99-fc0d4b9a44f3.pklz 170904-05:48:13,731 workflow INFO: *********************************** This part tells you that it's an OSError and that it looked for the file /data/ds000114/sub-11/ses-test/func/sub-11_ses-test_task-fingerfootlips_bold.nii.gz. After the line ***********************************, you can additional see, that it's the node preprocWF.selectfiles that crasehd and that you can find a crashfile to this crash under /opt/tutorial/notebooks. Reading the crashfile To get the full picture of the error, we can read the content of the crashfile (that has pklz format by default) with the bash command nipypecli crash. We will get the same information as above, but additionally, we can also see directly the input values of the Node that crashed. End of explanation """ !nipypecli crash -r $(pwd)/crash-*selectfiles-*.pklz """ Explanation: nipypecli allows you to rerun the crashed node using an additional option -r. End of explanation """ wf.config['execution']['crashfile_format'] = 'txt' try: wf.run() except(RuntimeError) as err: print("RuntimeError:", err) else: raise """ Explanation: When running in the terminal you can also try options that enable the Python or Ipython debugger when re-executing: -d or -i. If you don't want to have an option to rerun the crashed workflow, you can change the format of crashfile to a text format. You can either change this in a configuration file (you can read more here), or you can directly change the wf.config dictionary before running the workflow. End of explanation """ !cat $(pwd)/crash-*selectfiles-*.txt """ Explanation: Now you should have a new text file with your crash report. End of explanation """ from nipype.interfaces.fsl import IsotropicSmooth try: smooth = IsotropicSmooth(fwhm='4') except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: Example Crash 2: Wrong Input Type or Typo in the parameter Very simple, if an interface expects a float as input, but you give it a string, it will crash: End of explanation """ IsotropicSmooth.help() """ Explanation: This will give you the error: TraitError: The 'fwhm' trait of an IsotropicSmoothInput instance must be a float, but a value of '4' &lt;type 'str'&gt; was specified. To make sure that you are using the right input types, just check the help section of a given interface. There you can see fwhm: (a float). End of explanation """ from nipype.interfaces.fsl import IsotropicSmooth try: smooth = IsotropicSmooth(output_type='NIFTIiii') except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: In a similar way, you will also get an error message if the input type is correct but you have a type in the name: TraitError: The 'output_type' trait of an IsotropicSmoothInput instance must be u'NIFTI_PAIR' or u'NIFTI_PAIR_GZ' or u'NIFTI_GZ' or u'NIFTI', but a value of 'NIFTIiii' &lt;type 'str'&gt; was specified. End of explanation """ from nipype.algorithms.misc import Gunzip from nipype import Node files = ['/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz', '/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz'] gunzip = Node(Gunzip(), name='gunzip',) try: gunzip.inputs.in_file = files except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: Example Crash 3: Giving an array as input where a single file is expected As you can see in the MapNode example, if you try to feed an array as an input into a field that only expects a single file, you will get a TraitError. End of explanation """ from nipype import MapNode gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file']) gunzip.inputs.in_file = files """ Explanation: This can be solved by using a MapNode: End of explanation """ files = ['/data/ds000114/sub-01/func/sub-01_task-fingerfootlips_bold.nii.gz', '/data/ds000114/sub-03/func/sub-03_task-fingerfootlips_bold.nii.gz'] try: gunzip.inputs.in_file = files except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: Now, make sure that you specify files that actually exist, otherwise you will have a TraitError again: End of explanation """ from nipype.interfaces.spm import Smooth try: smooth = Smooth(in_files='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz') except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: By the way, not that those crashes don't create a crashfile, because they didn't happen during runtime, but still during workflow building. Example Crash 4: SPM doesn't like *.nii.gz files SPM12 cannot handle compressed NIfTI files (*nii.gz). If you try to run the node nonetheless, it can give you different kind of problems: SPM Problem 1 with *.nii.gz files SPM12 has a problem with handling *.nii.gz files. For it a compressed functional image has no temporal dimension and therefore seems to be just a 3D file. So if we try to run the Realign interface on a compressed file, we will get a TraitError error. End of explanation """ from nipype.interfaces.spm import Realign try: realign = Realign(in_files='/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz') except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: SPM problem 2 with *.nii.gz files Sometimes TraitError can be more misleading. End of explanation """ from nipype.interfaces.spm import Realign realign = Realign(register_to_mean=True) try: realign.run() except(ValueError) as err: print("ValueError:", err) else: raise """ Explanation: This issue can be solved by unzipping the compressed NIfTI file before giving it as an input to an SPM node. This can either be done by using the Gunzip interface from Nipype or even better if the input is coming from a FSL interface, most of them have an input filed output_type='NIFTI', that you can set to NIFIT. Example Crash 5: Nipype cannot find the right software Especially at the beginning, just after installation, you sometimes forgot to specify some environment variables. If you try to use an interface where the environment variables of the software are not specified, e.g. if you try to run: python from nipype.interfaces.freesurfer import MRIConvert convert = MRIConvert(in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz', out_type='nii') you might get an errors, such as: IOError: command 'mri_convert' could not be found on host mnotter Interface MRIConvert failed to run. Or if you try to use SPM, but forgot to tell Nipype where to find it. If you forgot to tell the system where to find MATLAB (or MCR), then you will get the same kind of error as above. But if you forgot to specify which SPM you want to use, you'll get the following RuntimeError: Standard error: MATLAB code threw an exception: SPM not in matlab path You can solve this issue by specifying the path to your SPM version: python from nipype.interfaces.matlab import MatlabCommand MatlabCommand.set_default_paths('/opt/spm12-r7219/spm12_mcr/spm12') Example Crash 6: You forget mandatory inputs or use input fields that don't exist One of the simpler errors are the ones connected to input and output fields. Forgetting mandatory input fields Let's see what happens if you forget a [Mandatory] input field. End of explanation """ realign.help() """ Explanation: This gives you the error: ValueError: Realign requires a value for input 'in_files'. For a list of required inputs, see Realign.help() As described by the error text, if we use the help() function, we can actually see, which inputs are mandatory and which are optional. End of explanation """ from nipype.interfaces.afni import Despike try: despike = Despike(in_file='/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz', output_type='NIFTI') except(Exception) as err: if "TraitError" in str(err.__class__): print("TraitError:", err) else: raise else: raise """ Explanation: Using input fields that don't exist Let's see what happens if we try to specify a parameter that doesn't exist as an input field: End of explanation """ from nipype import SelectFiles, Node, Workflow from os.path import abspath as opap from nipype.interfaces.fsl import MCFLIRT, IsotropicSmooth # Create SelectFiles node templates={'func': '{subject_id}/func/{subject_id}_task-fingerfootlips_bold.nii.gz'} sf = Node(SelectFiles(templates), name='selectfiles') sf.inputs.base_directory = opap('/data/ds000114') sf.inputs.subject_id = 'sub-01' # Create Motion Correction Node mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True), name='mcflirt') # Create Smoothing node smooth = Node(IsotropicSmooth(fwhm=4), name='smooth') # Create a preprocessing workflow wf = Workflow(name="preprocWF") wf.base_dir = 'working_dir' # Connect the three nodes to each other wf.connect([(sf, mcflirt, [("func", "in_file")]), (mcflirt, smooth, [("out_file", "in_file")])]) """ Explanation: This results in the TraitError: TraitError: Cannot set the undefined 'output_type' attribute of a 'DespikeInputSpec' object. So what went wrong? If you use the help() function, you will see that the correct input filed is called outputtype and not output_type. Example Crash 7: Trying to connect a node to an input field that is already occupied Sometimes when you build a new workflow, you might forget that an output field was already connected and you try to connect a new node to the already occupied field. First, let's create a simple workflow: End of explanation """ # Create a new node mcflirt_NEW = Node(MCFLIRT(mean_vol=True), name='mcflirt_NEW') # Connect it to an already connected input field try: wf.connect([(mcflirt_NEW, smooth, [("out_file", "in_file")])]) except(Exception) as err: print("Exception:", err) else: raise """ Explanation: Now, let's create a new node and connect it to the already occupied input field in_file of the smooth node: End of explanation """
methylDragon/news-anaCrawler
newspaper_plotting.ipynb
gpl-3.0
firebase = pyrebase.initialize_app(config) auth = firebase.auth() uid = "" password = "" user = auth.sign_in_with_email_and_password(uid, password) db = firebase.database() # reference to the database service def firebaseRefresh(): global user user = auth.refresh(user['refreshToken']) """ Explanation: Connect to the database Log in to Firebase with our credentials. The fake-looking credentials are working credentials. Non-authenticated users cannot read or write data. This function must be executed before firebasePush(). End of explanation """ import unidecode import numpy as np import matplotlib.pyplot as plt def plot_polarity_subjectivity(listed_name_on_database): pol = [] sub = [] articles_of_a_newspaper = db.child(str("articles/" + listed_name_on_database)).get() articles = articles_of_a_newspaper.val() for article_no in range(len(articles)): data = list(articles.items())[article_no][1] pol.append(abs(float(data["polarity"]))) sub.append(float(data["subjectivity"])) plt.scatter(pol,sub,[80/np.sqrt(len(pol))]*len(sub), alpha=0.7, label = listed_name_on_database) return np.column_stack((pol, sub)) plt.clf() plt.figure(figsize=(12, 10)) plt.title("Scatter Plot (Articles)") websites = ["wwwchannelnewsasiacom","wwwstraitstimescom","wwwtnpsg","wwwtodayonlinecom", "sgnewsyahoocom","sgfinanceyahoocom","stompstraitstimescom","mothershipsg", "thehearttruthscom","wwwtremerituscom","yawningbreadwordpresscom", "wwwtheonlinecitizencom","wwwallsingaporestuffcom","alvinologycom","berthahensonwordpresscom"] centroid ={} for website in websites: data = plot_polarity_subjectivity(website) time.sleep(0.2) centroid[website] = np.mean(data, axis=0) plt.legend(loc=4) plt.xlabel("Polarity") plt.ylabel("Subjectivity") plt.show() plt.clf() plt.figure(figsize=(12, 10)) plt.title("Centroids (Sources)") mothershipsg = centroid["wwwchannelnewsasiacom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwchannelnewsasiacom") #plt.annotate("wwwchannelnewsasiacom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["wwwstraitstimescom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwstraitstimescom") #plt.annotate("wwwstraitstimescom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["wwwtnpsg"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtnpsg") #plt.annotate("wwwtnpsg",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["wwwtodayonlinecom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtodayonlinecom") #plt.annotate("wwwtodayonlinecom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["mothershipsg"] plt.scatter(mothershipsg[0],mothershipsg[1],label="mothership") #plt.annotate("mothership",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["sgnewsyahoocom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="sgnewsyahoocom") #plt.annotate("sgnewsyahoocom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["sgfinanceyahoocom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="sgfinanceyahoocom") #plt.annotate("sgfinanceyahoocom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["stompstraitstimescom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="stompstraitstimescom") #plt.annotate("stompstraitstimescom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["alvinologycom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="alvinologycom") #plt.annotate("alvinologycom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["wwwallsingaporestuffcom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwallsingaporestuffcom") #plt.annotate("wwwallsingaporestuffcom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["wwwtheonlinecitizencom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtheonlinecitizencom") #plt.annotate("wwwtheonlinecitizencom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["wwwtremerituscom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtremerituscom") #plt.annotate("wwwtremerituscom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["thehearttruthscom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="thehearttruthscom") #plt.annotate("thehearttruthscom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["berthahensonwordpresscom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="berthahensonwordpresscom") #plt.annotate("berthahensonwordpresscom",(mothershipsg[0],mothershipsg[1])) mothershipsg = centroid["yawningbreadwordpresscom"] plt.scatter(mothershipsg[0],mothershipsg[1],label="yawningbreadwordpresscom") #plt.annotate("yawningbreadwordpresscom",(mothershipsg[0],mothershipsg[1])) plt.xlabel("Polarity") plt.ylabel("Subjectivity") plt.legend(loc=4) plt.show() """ Explanation: Analyse already evaluated components End of explanation """
adamamiller/NUREU17
LSST/VariableStarClassification/scripts/ptf_query/byOid/url_by_oid.ipynb
mit
import numpy as np from astropy.table import Table as tbl import urllib.request import urllib.parse import subprocess import matplotlib.pyplot as plt from cesium import featurize %matplotlib inline import sqlite3 """ Explanation: Written by Nick Easton for the Zooniverse LSST Project. <br> July, 2017 Create a script to query the Ptf database for sources of given constraints. Then compile lightcurves and phase folded lightcurves for the returned sources. End of explanation """ url = "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" values = {'catalog':'ptf_objects', 'spatial':'None', 'outfmt':'1', 'selcols':'ra,dec,oid', 'constraints':'(bestchisq>100)and(ngoodobs>500)'} subprocess.call('curl -F catalog=ptf_objects -F spatial=None -F outfmt=1 -F selcols=ra,dec,oid -F constriants="(bestchisq>100)and(ngoodobs>500)" "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" -o objects.tbl', shell = True) """ Explanation: Query for the given objects End of explanation """ %%timeit data = urllib.parse.urlencode(values) data = data.encode('utf-8') req = urllib.request.Request(url, data) resp = urllib.request.urlopen(req) respdata = resp.read() saveFile = open('objects.tbl', 'wb') saveFile.write(respdata) saveFile.close() """ Explanation: Im not sure why subprocess.call doesnt seem to work for this specific case. However, the urllib work below does the job. End of explanation """ objects = tbl.read('/home/nick/Desktop/NUREU17/LSST/VariableStarClassification/scripts/ptf_query/objects.tbl', format = 'ipac') conn = sqlite3.connect('/home/nick/Desktop/NUREU17/LSST/VariableStarClassification/features.db') cur = conn.cursor() def saveFeat (lc, tName, cur, conn): #pass in lightcurve table and cursor feats_to_use = [ 'amplitude', 'flux_percentile_ratio_mid20', 'flux_percentile_ratio_mid35', 'flux_percentile_ratio_mid50', 'flux_percentile_ratio_mid65', 'flux_percentile_ratio_mid80', 'max_slope', 'maximum', 'median', 'median_absolute_deviation', 'minimum', 'percent_amplitude', 'percent_beyond_1_std', 'percent_close_to_median', 'percent_difference_flux_percentile', 'period_fast', 'qso_log_chi2_qsonu', 'qso_log_chi2nuNULL_chi2nu', 'skew', 'std', 'stetson_j', 'stetson_k', 'weighted_average', 'fold2P_slope_10percentile', 'fold2P_slope_90percentile', 'freq1_amplitude1', 'freq1_amplitude2', 'freq1_amplitude3', 'freq1_amplitude4', 'freq1_freq', 'freq1_lambda', 'freq1_rel_phase2', 'freq1_rel_phase3', 'freq1_rel_phase4', 'freq1_signif', 'freq2_amplitude1', 'freq2_amplitude2', 'freq2_amplitude3', 'freq2_amplitude4', 'freq2_freq', 'freq2_rel_phase2', 'freq2_rel_phase3', 'freq2_rel_phase4', 'freq3_amplitude1', 'freq3_amplitude2', 'freq3_amplitude3', 'freq3_amplitude4', 'freq3_freq', 'freq3_rel_phase2', 'freq3_rel_phase3', 'freq3_rel_phase4', 'freq_amplitude_ratio_21', 'freq_amplitude_ratio_31', 'freq_frequency_ratio_21', 'freq_frequency_ratio_31', 'freq_model_max_delta_mags', 'freq_model_min_delta_mags', 'freq_model_phi1_phi2', 'freq_n_alias', 'freq_signif_ratio_21', 'freq_signif_ratio_31', 'freq_varrat', 'freq_y_offset', 'linear_trend', 'medperc90_2p_p', 'p2p_scatter_2praw', 'p2p_scatter_over_mad', 'p2p_scatter_pfold_over_mad', 'p2p_ssqr_diff_over_var', 'scatter_res_raw' ] string = "insert into " + tName + """ values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""" cur.execute("""select oid from {:}""".format(tName)) check = cur.fetchall() for oid in np.unique(lc['oid']): if (oid not in check): mask = np.logical_and(lc['oid'] == oid, lc['mag_autocorr'] > 0) fset = featurize.featurize_time_series(lc[mask]['obsmjd'], lc[mask]['mag_autocorr'], lc[mask]['magerr_auto'], meta_features = {'oid': str(oid)}, features_to_use = feats_to_use) cur.execute(string, fset.get_values()[0]) else: print('Database already contains a ',oid) conn.commit() cur.execute("""delete from feats2""") cur.fetchall() """ Explanation: Read that data file in End of explanation """ #curves = {} for i in range(0,18849): loc = 'curves_oid{:_>17}.tbl'.format(objects['oid'][i]) cmd = 'curl -F catalog=ptf_lightcurves -F spatial=None -F constraints=' + '"(oid={:})"'.format(objects['oid'][i]) + ' -F outfmt=1 -F selcols=oid,obsmjd,mag_autocorr,magerr_auto,fid,ra,dec "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" -o curves_oid{:_>17}.tbl'.format(objects['oid'][i]) subprocess.call(cmd, shell = True) print(i) #curves = tbl.read(loc, format = 'ipac') #saveFeat(curves, 'feats2', cur, conn) #%run '/home/nke2/NUREU17/LSST/VariableStarClassification/saveFeat.py' (curves, 'feats2', cur, conn) #curves[i] = tbl.read('curves_oid{:_>17}.tbl'.format(objects['oid'][i]), format = 'ipac') """ Explanation: If reading in for the first times then use this cell. This will loop over each oid within the objects queried above and excute queries for their source lightcurves. Additionally, reads that returned data file into a dict to reference later. End of explanation """ #curves = {} cur.execute("""select oid from feats2""") where = cur.fetchall() for i in range(0, 18849): if (objects['oid'][i] not in where): try: curves = tbl.read('curves_oid{:_>17}.tbl'.format(objects['oid'][i]), format = 'ipac') print(objects['oid'][i], i) saveFeat(curves, 'feats2', cur, conn) except: cmd = 'curl -F catalog=ptf_lightcurves -F spatial=None -F constraints=' + '"(oid={:})"'.format(objects['oid'][i]) + ' -F outfmt=1 -F selcols=oid,obsmjd,mag_autocorr,magerr_auto,fid,ra,dec "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" -o curves_oid{:_>17}.tbl'.format(objects['oid'][i]) subprocess.call(cmd, shell = True) curves = tbl.read('curves_oid{:_>17}.tbl'.format(objects['oid'][i]), format = 'ipac') print('queried, ', objects['oid'][i], i) saveFeat(curves, 'feats2', cur, conn) 130122070001802 np.where(objects['oid']==35782020008798) cur.execute("""select * from feats2 where oid=35782020008798""") cur.fetchall() """ Explanation: Same as above. However, if the sources have already been queried just reads in the files to save some time. End of explanation """ fig = plt.figure(figsize = (12, 20)) for i in range(0,3): ax = fig.add_subplot(3,1,i+1) masked = np.where(curves[i]['mag_autocorr'] > 0 ) ax.errorbar(curves[i]['obsmjd'][masked], curves[i]['mag_autocorr'][masked], yerr = curves[i]['magerr_auto'][masked], fmt = 'bo') ax.set_xlabel('Time(days)') ax.set_ylabel('Observed magnitude') plt.tight_layout() """ Explanation: In both of the above cases, the loop has been shortened to so it the script doesnt exceed memory or storage limits (and so I could interact with it much quicker). Plots the raw lightcurves together in a table. Some data has been masked, there seem to be a series of observations whose magnitudes where substantiantally inaccurate. End of explanation """ feats_to_use = ["freq1_freq"] want = np.where(curves[0]['mag_autocorr'] > 0) fset = feat.featurize_time_series(times = curves[0]['obsmjd'][want], values = curves[0]['mag_autocorr'][want], errors = curves[0]['magerr_auto'][want], features_to_use = feats_to_use) per1 = fset['freq1_freq'][0][0] print(per1) plt.errorbar((curves[0]['obsmjd'][want]%per1)/per1, curves[0]['mag_autocorr'][want], yerr = curves[0]['magerr_auto'][want], fmt = 'bo') plt.xlabel('Phase') plt.ylabel('Observed magnitude') feats_to_use = ["freq1_freq"] want = np.where(curves[1]['mag_autocorr'] > 0) fset = feat.featurize_time_series(times = curves[1]['obsmjd'][want], values = curves[1]['mag_autocorr'][want], errors = curves[1]['magerr_auto'][want], features_to_use = feats_to_use) per1 = fset['freq1_freq'][0][0] print(per1) plt.errorbar((curves[1]['obsmjd'][want]%per1)/per1, curves[1]['mag_autocorr'][want], yerr = curves[1]['magerr_auto'][want], fmt = 'bo') plt.xlabel('Phase') plt.ylabel('Observed magnitude') feats_to_use = ["freq1_freq"] want = np.where(curves[2]['mag_autocorr'] > 0) fset = feat.featurize_time_series(times = curves[2]['obsmjd'][want], values = curves[2]['mag_autocorr'][want], errors = curves[2]['magerr_auto'][want], features_to_use = feats_to_use) per1 = fset['freq1_freq'][0][0] print(per1) plt.errorbar((curves[2]['obsmjd'][want]%per1)/per1, curves[2]['mag_autocorr'][want], yerr = curves[2]['magerr_auto'][want], fmt = 'bo') plt.xlabel('Phase') plt.ylabel('Observed magnitude') """ Explanation: Phase folded curves Each lightcurve has been phase folded with the first frequency Lomb-Scargyle detects. It is no surprise that the 3 first sources are not periodic. End of explanation """
NeuroDataDesign/pan-synapse
pipeline_1/background/Thresholding.ipynb
apache-2.0
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import pickle import sys sys.path.insert(0,'../code/functions/') import connectLib as cLib import plosLib as pLib import mouseVis as mv import tiffIO as tIO data0 = tIO.unzipChannels(tIO.loadTiff('../data/SEP-GluA1-KI_tp1.tif'))[0][5:10] plt.imshow(data0[0], cmap='gray') plt.show() """ Explanation: Raw Data Thresholding Exploration A slice at z=0 for reference End of explanation """ data0OtsuThresh = cLib.otsuVox(data0) plt.imshow(data0OtsuThresh[0]) plt.title("Visualization of Slice 0 After Otsu's Binarization") plt.show() clusters = cLib.connectedComponents(data0OtsuThresh) volumeList = np.zeros((len(clusters))) print 'Analysis of Otsu-Thresholded Clusters' for cluster in range(len(clusters)): volumeList[cluster] = clusters[cluster].getVolume() print '\tnumber of clusters: ' + str(len(volumeList)) print '\taverage volume: ' + str(np.average(volumeList)) """ Explanation: Our Previous Method: Otsu's Binarization With our previous pipeline, we found that we were only yielding 2 clusters in total accross the entire volume. After some investigation, we found that the error was occuring in our thresholding method for the raw data. We were using Otsu's Binarization, and it was grouping a large portion of our data together into a couple massive clusters. This was an issue. This is what resulted from Otsu's Binarization. End of explanation """ naiveThreshClusters = cLib.thresholdByVolumeNaive(clusters, limit=200) displayIm = np.zeros_like(data0) clusterMembersList =[] for cluster in range(len(naiveThreshClusters)): clusterMembersList.extend(naiveThreshClusters[cluster].members) for index in range(len(clusterMembersList)): x, y, z = clusterMembersList[index] displayIm[x][y][z] = 100 plt.imshow(displayIm[0], cmap = 'gray') plt.show() volumeList = np.zeros((len(naiveThreshClusters))) print "Analysis of Naively Thresholded Clusters Using Otsu's Binarization" for cluster in range(len(naiveThreshClusters)): volumeList[cluster] = naiveThreshClusters[cluster].getVolume() print '\tnumber of clusters below 200-volume: ' + str(len(volumeList)) print '\taverage volume of clusters below 200-volume: ' + str(np.average(volumeList)) """ Explanation: This result initially seemed reasonable. But, as a sanity check, we naively thresholded these clusters by volume (throwing out all clusters with volume above 200) to understand how clustering was occuring. End of explanation """ from connectLib import adaptiveThreshold for i in range(9): print 'blocksize: ' + str(10*(i + 1) + 1) data0AdaptiveThresh = adaptiveThreshold(data0, 10*(i + 1) + 1, 5) clusters = cLib.connectedComponents(data0AdaptiveThresh) naiveThreshClusters = cLib.thresholdByVolumeNaive(clusters, limit=200) displayIm = np.zeros_like(data0) clusterMembersList =[] for cluster in range(len(naiveThreshClusters)): clusterMembersList.extend(naiveThreshClusters[cluster].members) for index in range(len(clusterMembersList)): x, y, z = clusterMembersList[index] displayIm[x][y][z] = 100 plt.imshow(displayIm[0], cmap = 'gray') plt.show() volumeList = np.zeros((len(naiveThreshClusters))) for cluster in range(len(naiveThreshClusters)): volumeList[cluster] = naiveThreshClusters[cluster].getVolume() print '\tnumber of clusters below 200-volume: ' + str(len(volumeList)) print '\taverage volume of clusters below 200-volume: ' + str(np.average(volumeList)) """ Explanation: What we found was that, after thresholding by volume naively, there were very few clusters in the most concentrated areas. Further investigation showed that, before naive thresholding, there were 33210 clusters. After naive thresholding, there were 33177. This means that using Otsu's + Connected Components yielded 33 clusters that were massive. Most notably, the majority of the most concentrated strip (where most synapses are likely to be found) was grouped into a couple big clusters. Thus, we needed to find a more appropiate method for thresholding the raw data so we could also evaluate the clusters along that concentrated strip. Adaptive Thresholding We first attempted adaptive thresholding, as it allows localized thresholding - this is good because average intensity greatly varies accross the z axis. Adaptive thresholding works by calculating the mean of a blockSize x blockSize x blockSize neighborhood, subtracting a C-value from such mean, and thresholding all voxels in the neighborhood below that value. This seemed like it would work. However, we found that the results weren't as promising as we'd hoped. Such results are shown below. End of explanation """ for i in range(4): print 'C-value: ' + str(i) data0AdaptiveThresh = adaptiveThreshold(data0, 81, i) clusters = cLib.connectedComponents(data0AdaptiveThresh) naiveThreshClusters = cLib.thresholdByVolumeNaive(clusters, limit=200) displayIm = np.zeros_like(data0) clusterMembersList =[] for cluster in range(len(naiveThreshClusters)): clusterMembersList.extend(naiveThreshClusters[cluster].members) for index in range(len(clusterMembersList)): x, y, z = clusterMembersList[index] displayIm[x][y][z] = 100 plt.imshow(displayIm[0], cmap = 'gray') plt.show() volumeList = np.zeros((len(naiveThreshClusters))) for cluster in range(len(naiveThreshClusters)): volumeList[cluster] = naiveThreshClusters[cluster].getVolume() print '\tnumber of clusters below 200-volume: ' + str(len(volumeList)) print '\taverage volume of clusters below 200-volume: ' + str(np.average(volumeList)) """ Explanation: We found that a blocksize of 81 optimized the number of clusters below 200-volume. Thus, we also tried varying the subtracted value (called "C") from the voxels in each window. End of explanation """ def binaryThreshold(img, perc): img = (img/256).astype('uint8') threshImg = np.zeros_like(img) percentile = np.percentile(img, perc) for i in range(len(img)): threshImg[i] = cv2.threshold(img[i], percentile, 255, cv2.THRESH_BINARY)[1] return threshImg """ Explanation: Thus, we found that the best combination of hyperparameters was blockSize = 81, C=0. But even with these hyperparameters, the number of clusters below 200-volume was too low (2300 vs expected ~tens of thousands). Thus, we decided to explore binary thresholding. Binary Thresholding The initial concern with using binary thresholding is that the average intensity of the base slices (around z=0) is 4x that of the top slices (around z=280). An implimentation of binary thresholding that uses a single value for the entire volume would either throw out almost the entire top half of the 3D image (if we used a very restrictive, high value for the hyperparameter) or wouldn't threshold enough of the entire bottom half of the 3D image (if we used a low value for the hyperparameter) and would result in most of the bottom half being grouped together into one cluster. To fix this issue, we decided to impliment our own 3-dimensional binary thresholding method that locally thresholds within each slice based off of percentile. Such implimentation is shown below: End of explanation """ for i in range(5): print 'percentile: ' + str(75 + 5*i) data0AdaptiveThresh = binaryThreshold(data0, 75 + 5*i) clusters = cLib.connectedComponents(data0AdaptiveThresh) naiveThreshClusters = cLib.thresholdByVolumeNaive(clusters, limit=200) displayIm = np.zeros_like(data0) clusterMembersList =[] for cluster in range(len(naiveThreshClusters)): clusterMembersList.extend(naiveThreshClusters[cluster].members) for index in range(len(clusterMembersList)): x, y, z = clusterMembersList[index] displayIm[x][y][z] = 100 plt.imshow(displayIm[0], cmap = 'gray') plt.show() volumeList = np.zeros((len(naiveThreshClusters))) for cluster in range(len(naiveThreshClusters)): volumeList[cluster] = naiveThreshClusters[cluster].getVolume() print '\tnumber of clusters below 200-volume: ' + str(len(volumeList)) print '\taverage volume of clusters below 200-volume: ' + str(np.average(volumeList)) """ Explanation: We decided to try out many different hyperparameters for the percentile value to find which one gave the most number of clusters and the average volume closest to ~54. The results are shown below: End of explanation """ for i in range(3): percentile = 89 + i print 'percentile: ' + str(percentile) data0AdaptiveThresh = binaryThreshold(data0, percentile) clusters = cLib.connectedComponents(data0AdaptiveThresh) naiveThreshClusters = cLib.thresholdByVolumeNaive(clusters, limit=200) displayIm = np.zeros_like(data0) clusterMembersList =[] for cluster in range(len(naiveThreshClusters)): clusterMembersList.extend(naiveThreshClusters[cluster].members) for index in range(len(clusterMembersList)): x, y, z = clusterMembersList[index] displayIm[x][y][z] = 100 plt.imshow(displayIm[0], cmap = 'gray') plt.show() volumeList = np.zeros((len(naiveThreshClusters))) for cluster in range(len(naiveThreshClusters)): volumeList[cluster] = naiveThreshClusters[cluster].getVolume() print '\tnumber of clusters below 200-volume: ' + str(len(volumeList)) print '\taverage volume of clusters below 200-volume: ' + str(np.average(volumeList)) """ Explanation: Analysis of Binary Thresholding Results Our implimentation of binary thresholding at the 90th percentile yielded the most desirable results. It produced the most clusters below 200-volume, contained a significant amount of clusters along the concentrated strip, and yielded clusters with relatively high volume. Further investigation with percentiles neighboring 90 showed that the 90th percentile yielded the best results. End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/recommendation_systems/labs/featurization.ipynb
apache-2.0
!pip install -q --upgrade tensorflow-datasets """ Explanation: Using side features: feature preprocessing Learning Objectives Turning categorical features into embeddings. Normalizing continuous features. Processing text features. Build a User and Movie model. Introduction One of the great advantages of using a deep learning framework to build recommender models is the freedom to build rich, flexible feature representations. The first step in doing so is preparing the features, as raw features will usually not be immediately usable in a model. For example: User and item ids may be strings (titles, usernames) or large, noncontiguous integers (database IDs). Item descriptions could be raw text. Interaction timestamps could be raw Unix timestamps. These need to be appropriately transformed in order to be useful in building models: User and item ids have to be translated into embedding vectors: high-dimensional numerical representations that are adjusted during training to help the model predict its objective better. Raw text needs to be tokenized (split into smaller parts such as individual words) and translated into embeddings. Numerical features need to be normalized so that their values lie in a small interval around 0. Fortunately, by using TensorFlow we can make such preprocessing part of our model rather than a separate preprocessing step. This is not only convenient, but also ensures that our pre-processing is exactly the same during training and during serving. This makes it safe and easy to deploy models that include even very sophisticated pre-processing. In this notebook, we are going to focus on recommenders and the preprocessing we need to do on the MovieLens dataset. If you're interested in a larger tutorial without a recommender system focus, have a look at the full Keras preprocessing guide. Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook The MovieLens dataset Let's first have a look at what features we can use from the MovieLens dataset: End of explanation """ import pprint import tensorflow_datasets as tfds ratings = tfds.load("movielens/100k-ratings", split="train") for x in ratings.take(1).as_numpy_iterator(): pprint.pprint(x) """ Explanation: Please re-run the above cell if you are getting any incompatible warnings and errors. End of explanation """ import numpy as np import tensorflow as tf movie_title_lookup = tf.keras.layers.experimental.preprocessing.StringLookup() """ Explanation: There are a couple of key features here: Movie title is useful as a movie identifier. User id is useful as a user identifier. Timestamps will allow us to model the effect of time. The first two are categorical features; timestamps are a continuous feature. Turning categorical features into embeddings A categorical feature is a feature that does not express a continuous quantity, but rather takes on one of a set of fixed values. Most deep learning models express these feature by turning them into high-dimensional vectors. During model training, the value of that vector is adjusted to help the model predict its objective better. For example, suppose that our goal is to predict which user is going to watch which movie. To do that, we represent each user and each movie by an embedding vector. Initially, these embeddings will take on random values - but during training, we will adjust them so that embeddings of users and the movies they watch end up closer together. Taking raw categorical features and turning them into embeddings is normally a two-step process: Firstly, we need to translate the raw values into a range of contiguous integers, normally by building a mapping (called a "vocabulary") that maps raw values ("Star Wars") to integers (say, 15). Secondly, we need to take these integers and turn them into embeddings. Defining the vocabulary The first step is to define a vocabulary. We can do this easily using Keras preprocessing layers. End of explanation """ movie_title_lookup.adapt(ratings.map(lambda x: x["movie_title"])) print(f"Vocabulary: {movie_title_lookup.get_vocabulary()[:3]}") """ Explanation: The layer itself does not have a vocabulary yet, but we can build it using our data. End of explanation """ movie_title_lookup(["Star Wars (1977)", "One Flew Over the Cuckoo's Nest (1975)"]) """ Explanation: Once we have this we can use the layer to translate raw tokens to embedding ids: End of explanation """ # We set up a large number of bins to reduce the chance of hash collisions. num_hashing_bins = 200_000 movie_title_hashing = tf.keras.layers.experimental.preprocessing.Hashing( num_bins=num_hashing_bins ) """ Explanation: Note that the layer's vocabulary includes one (or more!) unknown (or "out of vocabulary", OOV) tokens. This is really handy: it means that the layer can handle categorical values that are not in the vocabulary. In practical terms, this means that the model can continue to learn about and make recommendations even using features that have not been seen during vocabulary construction. Using feature hashing In fact, the StringLookup layer allows us to configure multiple OOV indices. If we do that, any raw value that is not in the vocabulary will be deterministically hashed to one of the OOV indices. The more such indices we have, the less likley it is that two different raw feature values will hash to the same OOV index. Consequently, if we have enough such indices the model should be able to train about as well as a model with an explicit vocabulary without the disadvantage of having to maintain the token list. We can take this to its logical extreme and rely entirely on feature hashing, with no vocabulary at all. This is implemented in the tf.keras.layers.experimental.preprocessing.Hashing layer. End of explanation """ movie_title_hashing(["Star Wars (1977)", "One Flew Over the Cuckoo's Nest (1975)"]) """ Explanation: We can do the lookup as before without the need to build vocabularies: End of explanation """ # Turns positive integers (indexes) into dense vectors of fixed size. movie_title_embedding = # TODO: Your code goes here # Let's use the explicit vocabulary lookup. input_dim=movie_title_lookup.vocab_size(), output_dim=32 ) """ Explanation: Defining the embeddings Now that we have integer ids, we can use the Embedding layer to turn those into embeddings. An embedding layer has two dimensions: the first dimension tells us how many distinct categories we can embed; the second tells us how large the vector representing each of them can be. When creating the embedding layer for movie titles, we are going to set the first value to the size of our title vocabulary (or the number of hashing bins). The second is up to us: the larger it is, the higher the capacity of the model, but the slower it is to fit and serve. End of explanation """ movie_title_model = tf.keras.Sequential([movie_title_lookup, movie_title_embedding]) """ Explanation: We can put the two together into a single layer which takes raw text in and yields embeddings. End of explanation """ movie_title_model(["Star Wars (1977)"]) """ Explanation: Just like that, we can directly get the embeddings for our movie titles: End of explanation """ user_id_lookup = tf.keras.layers.experimental.preprocessing.StringLookup() user_id_lookup.adapt(ratings.map(lambda x: x["user_id"])) user_id_embedding = tf.keras.layers.Embedding(user_id_lookup.vocab_size(), 32) user_id_model = tf.keras.Sequential([user_id_lookup, user_id_embedding]) """ Explanation: We can do the same with user embeddings: End of explanation """ for x in ratings.take(3).as_numpy_iterator(): print(f"Timestamp: {x['timestamp']}.") """ Explanation: Normalizing continuous features Continuous features also need normalization. For example, the timestamp feature is far too large to be used directly in a deep model: End of explanation """ # Feature-wise normalization of the data. timestamp_normalization = # TODO: Your code goes here timestamp_normalization.adapt(ratings.map(lambda x: x["timestamp"]).batch(1024)) for x in ratings.take(3).as_numpy_iterator(): print(f"Normalized timestamp: {timestamp_normalization(x['timestamp'])}.") """ Explanation: We need to process it before we can use it. While there are many ways in which we can do this, discretization and standardization are two common ones. Standardization Standardization rescales features to normalize their range by subtracting the feature's mean and dividing by its standard deviation. It is a common preprocessing transformation. This can be easily accomplished using the tf.keras.layers.experimental.preprocessing.Normalization layer: End of explanation """ max_timestamp = ratings.map(lambda x: x["timestamp"]).reduce( tf.cast(0, tf.int64), tf.maximum).numpy().max() min_timestamp = ratings.map(lambda x: x["timestamp"]).reduce( np.int64(1e9), tf.minimum).numpy().min() timestamp_buckets = np.linspace( min_timestamp, max_timestamp, num=1000) print(f"Buckets: {timestamp_buckets[:3]}") """ Explanation: Discretization Another common transformation is to turn a continuous feature into a number of categorical features. This makes good sense if we have reasons to suspect that a feature's effect is non-continuous. To do this, we first need to establish the boundaries of the buckets we will use for discretization. The easiest way is to identify the minimum and maximum value of the feature, and divide the resulting interval equally: End of explanation """ timestamp_embedding_model = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.Discretization(timestamp_buckets.tolist()), tf.keras.layers.Embedding(len(timestamp_buckets) + 1, 32) ]) for timestamp in ratings.take(1).map(lambda x: x["timestamp"]).batch(1).as_numpy_iterator(): print(f"Timestamp embedding: {timestamp_embedding_model(timestamp)}.") """ Explanation: Given the bucket boundaries we can transform timestamps into embeddings: End of explanation """ # Text vectorization layer. title_text = # TODO: Your code goes here title_text.adapt(ratings.map(lambda x: x["movie_title"])) """ Explanation: Processing text features We may also want to add text features to our model. Usually, things like product descriptions are free form text, and we can hope that our model can learn to use the information they contain to make better recommendations, especially in a cold-start or long tail scenario. While the MovieLens dataset does not give us rich textual features, we can still use movie titles. This may help us capture the fact that movies with very similar titles are likely to belong to the same series. The first transformation we need to apply to text is tokenization (splitting into constituent words or word-pieces), followed by vocabulary learning, followed by an embedding. The Keras tf.keras.layers.experimental.preprocessing.TextVectorization layer can do the first two steps for us: End of explanation """ for row in ratings.batch(1).map(lambda x: x["movie_title"]).take(1): print(title_text(row)) """ Explanation: Let's try it out: End of explanation """ title_text.get_vocabulary()[40:45] """ Explanation: Each title is translated into a sequence of tokens, one for each piece we've tokenized. We can check the learned vocabulary to verify that the layer is using the correct tokenization: End of explanation """ class UserModel(tf.keras.Model): def __init__(self): super().__init__() self.user_embedding = tf.keras.Sequential([ user_id_lookup, tf.keras.layers.Embedding(user_id_lookup.vocab_size(), 32), ]) self.timestamp_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.Discretization(timestamp_buckets.tolist()), tf.keras.layers.Embedding(len(timestamp_buckets) + 2, 32) ]) self.normalized_timestamp = tf.keras.layers.experimental.preprocessing.Normalization() def call(self, inputs): # Take the input dictionary, pass it through each input layer, # and concatenate the result. return tf.concat([ self.user_embedding(inputs["user_id"]), self.timestamp_embedding(inputs["timestamp"]), self.normalized_timestamp(inputs["timestamp"]) ], axis=1) """ Explanation: This looks correct: the layer is tokenizing titles into individual words. To finish the processing, we now need to embed the text. Because each title contains multiple words, we will get multiple embeddings for each title. For use in a downstream model these are usually compressed into a single embedding. Models like RNNs or Transformers are useful here, but averaging all the words' embeddings together is a good starting point. Putting it all together With these components in place, we can build a model that does all the preprocessing together. User model The full user model may look like the following: End of explanation """ user_model = # TODO: Your code goes here user_model.normalized_timestamp.adapt( ratings.map(lambda x: x["timestamp"]).batch(128)) for row in ratings.batch(1).take(1): print(f"Computed representations: {user_model(row)[0, :3]}") """ Explanation: Let's try it out: End of explanation """ class MovieModel(tf.keras.Model): def __init__(self): super().__init__() max_tokens = 10_000 self.title_embedding = tf.keras.Sequential([ movie_title_lookup, tf.keras.layers.Embedding(movie_title_lookup.vocab_size(), 32) ]) self.title_text_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.TextVectorization(max_tokens=max_tokens), tf.keras.layers.Embedding(max_tokens, 32, mask_zero=True), # We average the embedding of individual words to get one embedding vector # per title. tf.keras.layers.GlobalAveragePooling1D(), ]) def call(self, inputs): return tf.concat([ self.title_embedding(inputs["movie_title"]), self.title_text_embedding(inputs["movie_title"]), ], axis=1) """ Explanation: Movie model We can do the same for the movie model: End of explanation """ movie_model = # TODO: Your code goes here movie_model.title_text_embedding.layers[0].adapt( ratings.map(lambda x: x["movie_title"])) for row in ratings.batch(1).take(1): print(f"Computed representations: {movie_model(row)[0, :3]}") """ Explanation: Let's try it out: End of explanation """
tensorflow/fairness-indicators
g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ !pip install -q -U pip==20.2 !pip install -q -U \ tensorflow-model-analysis==0.39.0 \ tensorflow-data-validation==1.8.0 \ tfx-bsl==1.8.0 """ Explanation: Pandas DataFrame to Fairness Indicators Case Study <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/responsible_ai/fairness_indicators/tutorials/Fairness_Indicators_Pandas_Case_Study"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/fairness-indicators/tree/master/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/fairness-indicators/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Case Study Overview In this case study we will apply TensorFlow Model Analysis and Fairness Indicators to evaluate data stored as a Pandas DataFrame, where each row contains ground truth labels, various features, and a model prediction. We will show how this workflow can be used to spot potential fairness concerns, independent of the framework one used to construct and train the model. As in this case study, we can analyze the results from any machine learning framework (e.g. TensorFlow, JAX, etc) once they are converted to a Pandas DataFrame. For this exercise, we will leverage the Deep Neural Network (DNN) model that was developed in the Shape Constraints for Ethics with Tensorflow Lattice case study using the Law School Admissions dataset from the Law School Admissions Council (LSAC). This classifier attempts to predict whether or not a student will pass the bar, based on their Law School Admission Test (LSAT) score and undergraduate GPA. LSAC Dataset The dataset used within this case study was originally collected for a study called 'LSAC National Longitudinal Bar Passage Study. LSAC Research Report Series' by Linda Wightman in 1998. The dataset is currently hosted here. dnn_bar_pass_prediction: The LSAT prediction from the DNN model. gender: Gender of the student. lsat: LSAT score received by the student. pass_bar: Ground truth label indicating whether or not the student eventually passed the bar. race: Race of the student. ugpa: A student's undergraduate GPA. End of explanation """ import os import tempfile import pandas as pd import six.moves.urllib as urllib import pprint import tensorflow_model_analysis as tfma from google.protobuf import text_format import tensorflow as tf tf.compat.v1.enable_v2_behavior() """ Explanation: Importing required packages: End of explanation """ # Download the LSAT dataset and setup the required filepaths. _DATA_ROOT = tempfile.mkdtemp(prefix='lsat-data') _DATA_PATH = 'https://storage.googleapis.com/lawschool_dataset/bar_pass_prediction.csv' _DATA_FILEPATH = os.path.join(_DATA_ROOT, 'bar_pass_prediction.csv') data = urllib.request.urlopen(_DATA_PATH) _LSAT_DF = pd.read_csv(data) # To simpliy the case study, we will only use the columns that will be used for # our model. _COLUMN_NAMES = [ 'dnn_bar_pass_prediction', 'gender', 'lsat', 'pass_bar', 'race1', 'ugpa', ] _LSAT_DF.dropna() _LSAT_DF['gender'] = _LSAT_DF['gender'].astype(str) _LSAT_DF['race1'] = _LSAT_DF['race1'].astype(str) _LSAT_DF = _LSAT_DF[_COLUMN_NAMES] _LSAT_DF.head() """ Explanation: Download the data and explore the initial dataset. End of explanation """ # Specify Fairness Indicators in eval_config. eval_config = text_format.Parse(""" model_specs { prediction_key: 'dnn_bar_pass_prediction', label_key: 'pass_bar' } metrics_specs { metrics {class_name: "AUC"} metrics { class_name: "FairnessIndicators" config: '{"thresholds": [0.50, 0.90]}' } } slicing_specs { feature_keys: 'race1' } slicing_specs {} """, tfma.EvalConfig()) # Run TensorFlow Model Analysis. eval_result = tfma.analyze_raw_data( data=_LSAT_DF, eval_config=eval_config, output_path=_DATA_ROOT) """ Explanation: Configure Fairness Indicators. There are several parameters that you’ll need to take into account when using Fairness Indicators with a DataFrame Your input DataFrame must contain a prediction column and label column from your model. By default Fairness Indicators will look for a prediction column called prediction and a label column called label within your DataFrame. If either of these values are not found a KeyError will be raised. In addition to a DataFrame, you’ll also need to include an eval_config that should include the metrics to compute, slices to compute the metrics on, and the column names for example labels and predictions. metrics_specs will set the metrics to compute. The FairnessIndicators metric will be required to render the fairness metrics and you can see a list of additional optional metrics here. slicing_specs is an optional slicing parameter to specify what feature you’re interested in investigating. Within this case study race1 is used, however you can also set this value to another feature (for example gender in the context of this DataFrame). If slicing_specs is not provided all features will be included. If your DataFrame includes a label or prediction column that is different from the default prediction or label, you can configure the label_key and prediction_key to a new value. If output_path is not specified a temporary directory will be created. End of explanation """ # Render Fairness Indicators. tfma.addons.fairness.view.widget_view.render_fairness_indicator(eval_result) """ Explanation: Explore model performance with Fairness Indicators. After running Fairness Indicators, we can visualize different metrics that we selected to analyze our models performance. Within this case study we’ve included Fairness Indicators and arbitrarily picked AUC. When we first look at the overall AUC for each race slice we can see a slight discrepancy in model performance, but nothing that is arguably alarming. Asian: 0.58 Black: 0.58 Hispanic: 0.58 Other: 0.64 White: 0.6 However, when we look at the false negative rates split by race, our model again incorrectly predicts the likelihood of a user passing the bar at different rates and, this time, does so by a lot. Asian: 0.01 Black: 0.05 Hispanic: 0.02 Other: 0.01 White: 0.01 Most notably the difference between Black and White students is about 380%, meaning that our model is nearly 4x more likely to incorrectly predict that a black student will not pass the bar, than a whilte student. If we were to continue with this effort, a practitioner could use these results as a signal that they should spend more time ensuring that their model works well for people from all backgrounds. End of explanation """ pp = pprint.PrettyPrinter() print("Slices:") pp.pprint(eval_result.get_slice_names()) print("\nMetrics:") pp.pprint(eval_result.get_metric_names()) """ Explanation: tfma.EvalResult The eval_result object, rendered above in render_fairness_indicator(), has its own API that can be used to read TFMA results into your programs. get_slice_names() and get_metric_names() To get the evaluated slices and metrics, you can use the respective functions. End of explanation """ baseline_slice = () black_slice = (('race1', 'black'),) print("Baseline metric values:") pp.pprint(eval_result.get_metrics_for_slice(baseline_slice)) print("Black metric values:") pp.pprint(eval_result.get_metrics_for_slice(black_slice)) """ Explanation: get_metrics_for_slice() and get_metrics_for_all_slices() If you want to get the metrics for a particular slice, you can use get_metrics_for_slice(). It returns a dictionary mapping metric names to metric values. End of explanation """ pp.pprint(eval_result.get_metrics_for_all_slices()) """ Explanation: If you want to get the metrics for all slices, get_metrics_for_all_slices() returns a dictionary mapping each slice to the corresponding get_metrics_for_slices(slice). End of explanation """ # TensorFlow Estimator to Pandas DataFrame: # _X_VALUE = # X value of binary estimator. # _Y_VALUE = # Y value of binary estimator. # _GROUND_TRUTH_LABEL = # Ground truth value of binary estimator. def _get_predicted_probabilities(estimator, input_df, get_input_fn): predictions = estimator.predict( input_fn=get_input_fn(input_df=input_df, num_epochs=1)) return [prediction['probabilities'][1] for prediction in predictions] def _get_input_fn_law(input_df, num_epochs, batch_size=None): return tf.compat.v1.estimator.inputs.pandas_input_fn( x=input_df[[_X_VALUE, _Y_VALUE]], y=input_df[_GROUND_TRUTH_LABEL], num_epochs=num_epochs, batch_size=batch_size or len(input_df), shuffle=False) def estimator_to_dataframe(estimator, input_df, num_keypoints=20): x = np.linspace(min(input_df[_X_VALUE]), max(input_df[_X_VALUE]), num_keypoints) y = np.linspace(min(input_df[_Y_VALUE]), max(input_df[_Y_VALUE]), num_keypoints) x_grid, y_grid = np.meshgrid(x, y) positions = np.vstack([x_grid.ravel(), y_grid.ravel()]) plot_df = pd.DataFrame(positions.T, columns=[_X_VALUE, _Y_VALUE]) plot_df[_GROUND_TRUTH_LABEL] = np.ones(len(plot_df)) predictions = _get_predicted_probabilities( estimator=estimator, input_df=plot_df, get_input_fn=_get_input_fn_law) return pd.DataFrame( data=np.array(np.reshape(predictions, x_grid.shape)).flatten()) """ Explanation: Conclusion Within this case study we imported a dataset into a Pandas DataFrame that we then analyzed with Fairness Indicators. Understanding the results of your model and underlying data is an important step in ensuring your model doesn't reflect harmful bias. In the context of this case study we examined the the LSAC dataset and how predictions from this data could be impacted by a students race. The concept of “what is unfair and what is fair have been introduced in multiple disciplines for well over 50 years, including in education, hiring, and machine learning.”<sup>1</sup> Fairness Indicator is a tool to help mitigate fairness concerns in your machine learning model. For more information on using Fairness Indicators and resources to learn more about fairness concerns see here. Hutchinson, B., Mitchell, M. (2018). 50 Years of Test (Un)fairness: Lessons for Machine Learning. https://arxiv.org/abs/1811.10104 Appendix Below are a few functions to help convert ML models to Pandas DataFrame. End of explanation """
chezou/tabula-py
examples/tabula_example.ipynb
mit
!java -version """ Explanation: <a href="https://colab.research.google.com/github/chezou/tabula-py/blob/master/examples/tabula_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> tabula-py example notebook tabula-py is a tool for convert PDF tables to pandas DataFrame. tabula-py is a wrapper of tabula-java, which requires java on your machine. tabula-py also enables you to convert tables in a PDF into CSV/TSV files. tabula-py's PDF extraction accuracy is same as tabula-java or tabula app; GUI tool of tabula, so if you want to know the performance of tabula-py, I highly recommend you to try tabula app. tabula-py is good for: - automation with Python script - advanced analytics after converting pandas DataFrame - casual analytics with Jupyter notebook or Google Colabolatory Check Java environment and install tabula-py tabula-py requires a java environment, so let's check the java environment on your machine. End of explanation """ # To be more precisely, it's better to use `{sys.executable} -m pip install tabula-py` !pip install -q tabula-py """ Explanation: After confirming the java environment, install tabula-py by using pip. End of explanation """ import tabula tabula.environment_info() """ Explanation: Before trying tabula-py, check your environment via tabula-py environment_info() function, which shows Python version, Java version, and your OS environment. End of explanation """ import tabula pdf_path = "https://github.com/chezou/tabula-py/raw/master/tests/resources/data.pdf" dfs = tabula.read_pdf(pdf_path, stream=True) # read_pdf returns list of DataFrames print(len(dfs)) dfs[0] """ Explanation: Read a PDF with read_pdf() function Let's read a PDF from GitHub. tabula-py can load a PDF or file like object on both local or internet by using read_pdf() function. End of explanation """ help(tabula.read_pdf) help(tabula.io.build_options) """ Explanation: Options for read_pdf() Note that read_pdf() function reads only page 1 by default. For more details, use ?read_pdf and ?tabula.wrapper.build_options. End of explanation """ # set pages option dfs = tabula.read_pdf(pdf_path, pages=3, stream=True) dfs[0] # pass pages as string tabula.read_pdf(pdf_path, pages="1-2,3", stream=True) """ Explanation: Let's set pages option. Here is the extraction result of page 3: End of explanation """ # extract all pages tabula.read_pdf(pdf_path, pages="all", stream=True) """ Explanation: You can set pages="all" for extration all pages. If you hit OOM error with Java, you should set appropriate -Xmx option for java_options. End of explanation """ # set area option dfs = tabula.read_pdf(pdf_path, area=[126,149,212,462], pages=2) dfs[0] """ Explanation: Read partial area of PDF If you want to set a certain part of page, you can use area option. Note that as of tabula-py 2.0.0, multiple_tables option became True so if you want to use multiple area options like [[0, 0, 100, 50], [0, 50, 100, 100]], you need to set multiple_tables=False. End of explanation """ pdf_path2 = "https://github.com/chezou/tabula-py/raw/master/tests/resources/campaign_donors.pdf" dfs = tabula.read_pdf(pdf_path2, columns=[47, 147, 256, 310, 375, 431, 504], guess=False, pages=1) df = dfs[0].drop(["Unnamed: 0"], axis=1) df """ Explanation: Read giving column information End of explanation """ # read pdf as JSON tabula.read_pdf(pdf_path, output_format="json") """ Explanation: Extract to JSON, TSV, or CSV tabula-py has capability to convert not only DataFrame but also JSON, TSV, or CSV. You can set output format with output_format option. End of explanation """ # You can convert from pdf into JSON, CSV, TSV tabula.convert_into(pdf_path, "test.json", output_format="json") !cat test.json tabula.convert_into(pdf_path, "test.tsv", output_format="tsv") !cat test.tsv tabula.convert_into(pdf_path, "test.csv", output_format="csv", stream=True) !cat test.csv """ Explanation: Convert PDF tables into CSV, TSV, or JSON files You can convert files directly rather creating Python objects with convert_into() function. End of explanation """ pdf_path3 = "https://github.com/tabulapdf/tabula-java/raw/master/src/test/resources/technology/tabula/spanning_cells.pdf" dfs = tabula.read_pdf( pdf_path3, pages="1", lattice=True, pandas_options={"header": [0, 1]}, area=[0, 0, 50, 100], relative_area=True, multiple_tables=False, ) dfs[0] """ Explanation: Use lattice mode for more accurate extraction for spreadsheet style tables If your tables have lines separating cells, you can use lattice option. By default, tabula-py sets guess=True, which is the same behavior for default of tabula app. If your tables don't have separation lines, you can try stream option. As it mentioned, try tabula app before struglling with tabula-py option. Or, PDFplumber can be an alternative since it has different extraction strategy. End of explanation """ template_path = "https://github.com/chezou/tabula-py/raw/master/tests/resources/data.tabula-template.json" tabula.read_pdf_with_template(pdf_path, template_path) """ Explanation: Use tabula app template tabula-py can handle tabula app template, which has area options set by GUI app to reuse. End of explanation """
rvperry/phys202-2015-work
assignments/assignment07/AlgorithmsEx01.ipynb
mit
%matplotlib inline from matplotlib import pyplot as plt import numpy as np """ Explanation: Algorithms Exercise 1 Imports End of explanation """ filter? def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'): """Split a string into a list of words, removing punctuation and stop words.""" t=[] S=s.splitlines() for i in S: w=i.split(' ') t.append(w) #filter(lambda x: x not in punctuation, t) T=filter(lambda x: x not in stop_words, t) return T tokenize("This, is the way; that things will end", stop_words=['the', 'is']) tokenize(""" APRIL is the cruellest month, breeding Lilacs out of the dead land, mixing Memory and desire, stirring Dull roots with spring rain. """, stop_words=['the', 'is']) assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \ ['this', 'way', 'that', 'things', 'will', 'end'] wasteland = """ APRIL is the cruellest month, breeding Lilacs out of the dead land, mixing Memory and desire, stirring Dull roots with spring rain. """ assert tokenize(wasteland, stop_words='is the of and') == \ ['april','cruellest','month','breeding','lilacs','out','dead','land', 'mixing','memory','desire','stirring','dull','roots','with','spring', 'rain'] tokenize(wasteland, stop_words='is the of and') """ Explanation: Word counting Write a function tokenize that takes a string of English text returns a list of words. It should also remove stop words, which are common short words that are often removed before natural language processing. Your function should have the following logic: Split the string into lines using splitlines. Split each line into a list of words and merge the lists for each line. Use Python's builtin filter function to remove all punctuation. If stop_words is a list, remove all occurences of the words in the list. If stop_words is a space delimeted string of words, split them and remove them. Remove any remaining empty words. Make all words lowercase. End of explanation """ def count_words(data): """Return a word count dictionary from the list of words in data.""" # YOUR CODE HERE raise NotImplementedError() assert count_words(tokenize('this and the this from and a a a')) == \ {'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2} """ Explanation: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts. End of explanation """ def sort_word_counts(wc): """Return a list of 2-tuples of (word, count), sorted by count descending.""" # YOUR CODE HERE raise NotImplementedError() assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \ [('a', 4), ('this', 3), ('and', 2), ('the', 1)] """ Explanation: Write a function sort_word_counts that return a list of sorted word counts: Each element of the list should be a (word, count) tuple. The list should be sorted by the word counts, with the higest counts coming first. To perform this sort, look at using the sorted function with a custom key and reverse argument. End of explanation """ # YOUR CODE HERE raise NotImplementedError() assert swc[0]==('i',43) assert len(swc)==848 """ Explanation: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt: Read the file into a string. Tokenize with stop words of 'the of and a to in is it that as'. Perform a word count, the sort and save the result in a variable named swc. End of explanation """ # YOUR CODE HERE raise NotImplementedError() assert True # use this for grading the dotplot """ Explanation: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research... End of explanation """
mlund/openmm-examples
yukawa/yukawa.ipynb
mit
%matplotlib inline import numpy as np from __future__ import print_function from simtk.openmm import app import simtk.openmm as mm from simtk import unit from sys import stdout, exit import math import mdtraj as mdtraj from itertools import combinations """ Explanation: Custom Nonbonded Potential: Yukawa on rigid bodies Here we define a custom force class where particles interact through a Yukawa potential and a soft repulsion, \begin{equation} w(r) / k_BT = \frac{\lambda_Bz_iz_j}{r}e^{-r/\lambda_D} + 4\beta\epsilon_{ij} \left ( \frac{\sigma_{ij}}{r}\right )^{12} \end{equation} where $\lambda_B=e^2/4\pi\epsilon_0\epsilon_rk_BT$ and $\lambda_D=(4\pi\lambda_B\sum \rho_iz_i^2)^{-1/2}$ are the Bjerrum and Debye lengths, respectively. $\rho_i$ is the number density of the $i$th ion. In this example we also create two rigid bodies using harmonic bonds to constrain the positions. Some comments: The potential is defined in CustomNonbonded is defined in cg.zml and must return energy in kJ/mol. The Bjerrum and Debye lengths are set via global parameters End of explanation """ cutoff = 50*unit.angstrom useMinimize = True epsilon_r = 80. temperature = 300*unit.kelvin kT = unit.BOLTZMANN_CONSTANT_kB*temperature timestep = 10*unit.femtoseconds; steps_eq = 5000 steps_production = 2e4 steps_total = steps_eq + steps_production """ Explanation: Simulation setup End of explanation """ def findForce(system, forcetype, add=True): """ Finds a specific force in the system force list - added if not found.""" for force in system.getForces(): if isinstance(force, forcetype): return force if add==True: system.addForce(forcetype()) return findForce(system, forcetype) return None def setGlobalForceParameter(force, key, value): for i in range(force.getNumGlobalParameters()): if force.getGlobalParameterName(i)==key: print('setting force parameter', key, '=', value) force.setGlobalParameterDefaultValue(i, value); def atomIndexInResidue(residue): """ list of atom index in residue """ index=[] for a in list(residue.atoms()): index.append(a.index) return index def getResiduePositions(residue, positions): """ Returns array w. atomic positions of residue """ ndx = atomIndexInResidue(residue) return np.array(positions)[ndx] def uniquePairs(index): """ list of unique, internal pairs """ return list(combinations( range(index[0],index[-1]+1),2 ) ) def addHarmonicConstraint(harmonicforce, pairlist, positions, threshold, k): """ add harmonic bonds between pairs if distance is smaller than threshold """ print('Constraint force constant =', k) for i,j in pairlist: distance = unit.norm( positions[i]-positions[j] ) if distance<threshold: harmonicforce.addBond( i,j, distance.value_in_unit(unit.nanometer), k.value_in_unit( unit.kilojoule/unit.nanometer**2/unit.mole )) print("added harmonic bond between", i, j, 'with distance',distance) def addExclusions(nonbondedforce, pairlist): """ add nonbonded exclusions between pairs """ for i,j in pairlist: nonbondedforce.addExclusion(i,j) def rigidifyResidue(residue, harmonicforce, positions, nonbondedforce=None, threshold=6.0*unit.angstrom, k=2500*unit.kilojoule/unit.nanometer**2/unit.mole): """ make residue rigid by adding constraints and nonbonded exclusions """ index = atomIndexInResidue(residue) pairlist = uniquePairs(index) addHarmonicConstraint(harmonic, pairlist, pdb.positions, threshold, k) if nonbondedforce is not None: for i,j in pairlist: print('added nonbonded exclusion between', i, j) nonbonded.addExclusion(i,j) def centerOfMass(positions, box): """ Calculates the geometric center taking into account periodic boundaries More here: https://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions """ theta=np.divide(positions, box).astype(np.float) * 2*np.pi x1=np.array( [np.cos(theta[:,0]).mean(), np.cos(theta[:,1]).mean(), np.cos(theta[:,2]).mean()] ) x2=np.array( [np.sin(theta[:,0]).mean(), np.sin(theta[:,1]).mean(), np.sin(theta[:,2]).mean()] ) return box * (np.arctan2(-x1,-x2)+np.pi) / (2*np.pi) """ Explanation: Convenience functions A set of independent functions, useful for setting up OpenMM. End of explanation """ pdb = app.PDBFile('squares.pdb') forcefield = app.ForceField('yukawa.xml') system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.CutoffPeriodic, nonbondedCutoff=cutoff ) box = np.array(pdb.topology.getPeriodicBoxVectors()).diagonal() harmonic = findForce(system, mm.HarmonicBondForce) nonbonded = findForce(system, mm.CustomNonbondedForce) setGlobalForceParameter(nonbonded, 'lB', 0.7*unit.nanometer) setGlobalForceParameter(nonbonded, 'kappa', 0.0) for residue in pdb.topology.residues(): p = getResiduePositions(residue, pdb.positions) print(centerOfMass(p, box)) rigidifyResidue(residue, harmonicforce=harmonic, nonbondedforce=nonbonded, positions=pdb.positions) integrator = mm.LangevinIntegrator(temperature, 1.0/unit.picoseconds, timestep) integrator.setConstraintTolerance(0.0001) """ Explanation: Setup simulation End of explanation """ simulation = app.Simulation(pdb.topology, system, integrator) simulation.context.setPositions(pdb.positions) if useMinimize: print('Minimizing...') simulation.minimizeEnergy() print('Equilibrating...') simulation.context.setVelocitiesToTemperature(300*unit.kelvin) simulation.step(steps_eq) simulation.reporters.append(mdtraj.reporters.HDF5Reporter('trajectory.h5', 100)) simulation.reporters.append( app.StateDataReporter(stdout, int(steps_total/10), step=True, potentialEnergy=True, temperature=True, progress=True, remainingTime=False, speed=True, totalSteps=steps_total, volume=True, separator='\t')) print('Production...') simulation.step(steps_production) print('Done!') """ Explanation: Run simulation End of explanation """
mne-tools/mne-tools.github.io
0.23/_downloads/bdc8ac519d8f54d70a73a5e0de598566/50_background_freesurfer_mne.ipynb
bsd-3-clause
import os import numpy as np import nibabel import matplotlib.pyplot as plt import matplotlib.patheffects as path_effects import mne from mne.transforms import apply_trans from mne.io.constants import FIFF """ Explanation: How MNE uses FreeSurfer's outputs This tutorial explains how MRI coordinate frames are handled in MNE-Python, and how MNE-Python integrates with FreeSurfer for handling MRI data and source space data in general. As usual we'll start by importing the necessary packages; for this tutorial that includes :mod:nibabel to handle loading the MRI images (MNE-Python also uses :mod:nibabel under the hood). We'll also use a special :mod:Matplotlib &lt;matplotlib.patheffects&gt; function for adding outlines to text, so that text is readable on top of an MRI image. End of explanation """ data_path = mne.datasets.sample.data_path() subjects_dir = os.path.join(data_path, 'subjects') subject = 'sample' t1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz') t1 = nibabel.load(t1_fname) t1.orthoview() """ Explanation: MRI coordinate frames Let's start out by looking at the sample subject MRI. Following standard FreeSurfer convention, we look at :file:T1.mgz, which gets created from the original MRI :file:sample/mri/orig/001.mgz when you run the FreeSurfer command recon-all &lt;https://surfer.nmr.mgh.harvard.edu/fswiki/recon-all&gt;_. Here we use :mod:nibabel to load the T1 image, and the resulting object's :meth:~nibabel.spatialimages.SpatialImage.orthoview method to view it. End of explanation """ data = np.asarray(t1.dataobj) print(data.shape) """ Explanation: Notice that the axes in the :meth:~nibabel.spatialimages.SpatialImage.orthoview figure are labeled L-R, S-I, and P-A. These reflect the standard RAS (right-anterior-superior) coordinate system that is widely used in MRI imaging. If you are unfamiliar with RAS coordinates, see the excellent nibabel tutorial :doc:nibabel:coordinate_systems. Nibabel already takes care of some coordinate frame transformations under the hood, so let's do it manually so we understand what is happening. First let's get our data as a 3D array and note that it's already a standard size: End of explanation """ print(t1.affine) vox = np.array([122, 119, 102]) xyz_ras = apply_trans(t1.affine, vox) print('Our voxel has real-world coordinates {}, {}, {} (mm)' .format(*np.round(xyz_ras, 3))) """ Explanation: These data are voxel intensity values. Here they are unsigned integers in the range 0-255, though in general they can be floating point values. A value data[i, j, k] at a given index triplet (i, j, k) corresponds to some real-world physical location (x, y, z) in space. To get its physical location, first we have to choose what coordinate frame we're going to use. For example, we could choose a geographical coordinate frame, with origin is at the center of the earth, Z axis through the north pole, X axis through the prime meridian (zero degrees longitude), and Y axis orthogonal to these forming a right-handed coordinate system. This would not be a very useful choice for defining the physical locations of the voxels during the MRI acquisition for analysis, but you could nonetheless figure out the transformation that related the (i, j, k) to this coordinate frame. Instead, each scanner defines a more practical, native coordinate system that it uses during acquisition, usually related to the physical orientation of the scanner itself and/or the subject within it. During acquisition the relationship between the voxel indices (i, j, k) and the physical location (x, y, z) in the scanner's native coordinate frame is saved in the image's affine transformation. .. sidebar:: Under the hood ``mne.transforms.apply_trans`` effectively does a matrix multiplication (i.e., :func:`numpy.dot`), with a little extra work to handle the shape mismatch (the affine has shape ``(4, 4)`` because it includes a *translation*, which is applied separately). We can use :mod:nibabel to examine this transformation, keeping in mind that it processes everything in units of millimeters, unlike MNE where things are always in SI units (meters). This allows us to take an arbitrary voxel or slice of data and know where it is in the scanner's native physical space (x, y, z) (in mm) by applying the affine transformation to the voxel coordinates. End of explanation """ ras_coords_mm = np.array([1, -17, -18]) inv_affine = np.linalg.inv(t1.affine) i_, j_, k_ = np.round(apply_trans(inv_affine, ras_coords_mm)).astype(int) print('Our real-world coordinates correspond to voxel ({}, {}, {})' .format(i_, j_, k_)) """ Explanation: If you have a point (x, y, z) in scanner-native RAS space and you want the corresponding voxel number, you can get it using the inverse of the affine. This involves some rounding, so it's possible to end up off by one voxel if you're not careful: End of explanation """ def imshow_mri(data, img, vox, xyz, suptitle): """Show an MRI slice with a voxel annotated.""" i, j, k = vox fig, ax = plt.subplots(1, figsize=(6, 6)) codes = nibabel.orientations.aff2axcodes(img.affine) # Figure out the title based on the code of this axis ori_slice = dict(P='Coronal', A='Coronal', I='Axial', S='Axial', L='Sagittal', R='Saggital') ori_names = dict(P='posterior', A='anterior', I='inferior', S='superior', L='left', R='right') title = ori_slice[codes[0]] ax.imshow(data[i], vmin=10, vmax=120, cmap='gray', origin='lower') ax.axvline(k, color='y') ax.axhline(j, color='y') for kind, coords in xyz.items(): annotation = ('{}: {}, {}, {} mm' .format(kind, *np.round(coords).astype(int))) text = ax.text(k, j, annotation, va='baseline', ha='right', color=(1, 1, 0.7)) text.set_path_effects([ path_effects.Stroke(linewidth=2, foreground='black'), path_effects.Normal()]) # reorient view so that RAS is always rightward and upward x_order = -1 if codes[2] in 'LIP' else 1 y_order = -1 if codes[1] in 'LIP' else 1 ax.set(xlim=[0, data.shape[2] - 1][::x_order], ylim=[0, data.shape[1] - 1][::y_order], xlabel=f'k ({ori_names[codes[2]]}+)', ylabel=f'j ({ori_names[codes[1]]}+)', title=f'{title} view: i={i} ({ori_names[codes[0]]}+)') fig.suptitle(suptitle) fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) return fig imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice') """ Explanation: Let's write a short function to visualize where our voxel lies in an image, and annotate it in RAS space (rounded to the nearest millimeter): End of explanation """ Torig = t1.header.get_vox2ras_tkr() print(t1.affine) print(Torig) xyz_mri = apply_trans(Torig, vox) imshow_mri(data, t1, vox, dict(MRI=xyz_mri), 'MRI slice') """ Explanation: Notice that the axis scales (i, j, and k) are still in voxels (ranging from 0-255); it's only the annotation text that we've translated into real-world RAS in millimeters. "MRI coordinates" in MNE-Python: FreeSurfer surface RAS While :mod:nibabel uses scanner RAS (x, y, z) coordinates, FreeSurfer uses a slightly different coordinate frame: MRI surface RAS. The transform from voxels to the FreeSurfer MRI surface RAS coordinate frame is known in the FreeSurfer documentation &lt;https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems&gt;_ as Torig, and in nibabel as :meth:vox2ras_tkr &lt;nibabel.freesurfer.mghformat.MGHHeader.get_vox2ras_tkr&gt;. This transformation sets the center of its coordinate frame in the middle of the conformed volume dimensions (N / 2.) with the axes oriented along the axes of the volume itself. For more information, see coordinate_systems. <div class="alert alert-info"><h4>Note</h4><p>In general, you should assume that the MRI coordinate system for a given subject is specific to that subject, i.e., it is not the same coordinate MRI coordinate system that is used for any other FreeSurfer subject. Even though during processing FreeSurfer will align each subject's MRI to ``fsaverage`` to do reconstruction, all data (surfaces, MRIs, etc.) get stored in the coordinate frame specific to that subject. This is why it's important for group analyses to transform data to a common coordinate frame for example by `surface <ex-morph-surface>` or `volumetric <ex-morph-volume>` morphing, or even by just applying `mni-affine-transformation` to points.</p></div> Since MNE-Python uses FreeSurfer extensively for surface computations (e.g., white matter, inner/outer skull meshes), internally MNE-Python uses the Freeurfer surface RAS coordinate system (not the :mod:nibabel scanner RAS system) for as many computations as possible, such as all source space and BEM mesh vertex definitions. Whenever you see "MRI coordinates" or "MRI coords" in MNE-Python's documentation, you should assume that we are talking about the "FreeSurfer MRI surface RAS" coordinate frame! We can do similar computations as before to convert the given voxel indices into FreeSurfer MRI coordinates (i.e., what we call "MRI coordinates" or "surface RAS" everywhere else in MNE), just like we did above to convert voxel indices to scanner RAS: End of explanation """ fiducials = mne.coreg.get_mni_fiducials(subject, subjects_dir=subjects_dir) nasion_mri = [d for d in fiducials if d['ident'] == FIFF.FIFFV_POINT_NASION][0] print(nasion_mri) # note it's in Freesurfer MRI coords """ Explanation: Knowing these relationships and being mindful about transformations, we can get from a point in any given space to any other space. Let's start out by plotting the Nasion on a saggital MRI slice: End of explanation """ nasion_mri = nasion_mri['r'] * 1000 # meters → millimeters nasion_vox = np.round( apply_trans(np.linalg.inv(Torig), nasion_mri)).astype(int) imshow_mri(data, t1, nasion_vox, dict(MRI=nasion_mri), 'Nasion estimated from MRI transform') """ Explanation: When we print the nasion, it displays as a DigPoint and shows its coordinates in millimeters, but beware that the underlying data is actually stored in meters &lt;units&gt;, so before transforming and plotting we'll convert to millimeters: End of explanation """ info = mne.io.read_info( os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')) nasion_head = [d for d in info['dig'] if d['kind'] == FIFF.FIFFV_POINT_CARDINAL and d['ident'] == FIFF.FIFFV_POINT_NASION][0] print(nasion_head) # note it's in "head" coordinates """ Explanation: We can also take the digitization point from the MEG data, which is in the "head" coordinate frame. Let's look at the nasion in the head coordinate frame: End of explanation """ trans = mne.read_trans( os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')) # first we transform from head to MRI, and *then* convert to millimeters nasion_dig_mri = apply_trans(trans, nasion_head['r']) * 1000 # ...then we can use Torig to convert MRI to voxels: nasion_dig_vox = np.round( apply_trans(np.linalg.inv(Torig), nasion_dig_mri)).astype(int) imshow_mri(data, t1, nasion_dig_vox, dict(MRI=nasion_dig_mri), 'Nasion transformed from digitization') """ Explanation: .. sidebar:: Head coordinate frame The head coordinate frame in MNE is the "Neuromag" head coordinate frame. The origin is given by the intersection between a line connecting the LPA and RPA and the line orthogonal to it that runs through the nasion. It is also in RAS orientation, meaning that +X runs through the RPA, +Y goes through the nasion, and +Z is orthogonal to these pointing upward. See `coordinate_systems` for more information. Notice that in "head" coordinate frame the nasion has values of 0 for the x and z directions (which makes sense given that the nasion is used to define the y axis in that system). To convert from head coordinate frame to voxels, we first apply the head → MRI (surface RAS) transform from a :file:trans file (typically created with the MNE-Python coregistration GUI), then convert meters → millimeters, and finally apply the inverse of Torig to get to voxels. Under the hood, functions like :func:mne.setup_source_space, :func:mne.setup_volume_source_space, and :func:mne.compute_source_morph make extensive use of these coordinate frames. End of explanation """ fname = os.path.join(subjects_dir, subject, 'surf', 'rh.white') rr_mm, tris = mne.read_surface(fname) print(f'rr_mm.shape == {rr_mm.shape}') print(f'tris.shape == {tris.shape}') print(f'rr_mm.max() = {rr_mm.max()}') # just to show that we are in mm """ Explanation: Using FreeSurfer's surface reconstructions An important part of what FreeSurfer does is provide cortical surface reconstructions. For example, let's load and view the white surface of the brain. This is a 3D mesh defined by a set of vertices (conventionally called rr) with shape (n_vertices, 3) and a set of triangles (tris) with shape (n_tris, 3) defining which vertices in rr form each triangular facet of the mesh. End of explanation """ renderer = mne.viz.backends.renderer.create_3d_figure( size=(600, 600), bgcolor='w', scene=False) gray = (0.5, 0.5, 0.5) renderer.mesh(*rr_mm.T, triangles=tris, color=gray) view_kwargs = dict(elevation=90, azimuth=0) mne.viz.set_3d_view( figure=renderer.figure, distance=350, focalpoint=(0., 0., 40.), **view_kwargs) renderer.show() """ Explanation: Let's actually plot it: End of explanation """ rr_vox = apply_trans(np.linalg.inv(Torig), rr_mm) fig = imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice') # Based on how imshow_mri works, the "X" here is the last dim of the MRI vol, # the "Y" is the middle dim, and the "Z" is the first dim, so now that our # points are in the correct coordinate frame, we need to ask matplotlib to # do a tricontour slice like: fig.axes[0].tricontour(rr_vox[:, 2], rr_vox[:, 1], tris, rr_vox[:, 0], levels=[vox[0]], colors='r', linewidths=1.0, zorder=1) """ Explanation: We can also plot the mesh on top of an MRI slice. The mesh surfaces are defined in millimeters in the MRI (FreeSurfer surface RAS) coordinate frame, so we can convert them to voxels by applying the inverse of the Torig transform: End of explanation """ renderer_kwargs = dict(bgcolor='w', smooth_shading=False) renderer = mne.viz.backends.renderer.create_3d_figure( size=(800, 400), scene=False, **renderer_kwargs) curvs = [ (mne.surface.read_curvature(os.path.join( subjects_dir, subj, 'surf', 'rh.curv'), binary=False) > 0).astype(float) for subj in ('sample', 'fsaverage') for _ in range(2)] fnames = [os.path.join(subjects_dir, subj, 'surf', surf) for subj in ('sample', 'fsaverage') for surf in ('rh.white', 'rh.sphere')] y_shifts = [-450, -150, 450, 150] z_shifts = [-40, 0, -30, 0] for name, y_shift, z_shift, curv in zip(fnames, y_shifts, z_shifts, curvs): this_rr, this_tri = mne.read_surface(name) this_rr += [0, y_shift, z_shift] renderer.mesh(*this_rr.T, triangles=this_tri, color=None, scalars=curv, colormap='copper_r', vmin=-0.2, vmax=1.2) zero = [0., 0., 0.] width = 50. y = np.sort(y_shifts) y = (y[1:] + y[:-1]) / 2. - width / 2. renderer.quiver3d(zero, y, zero, zero, [1] * 3, zero, 'k', width, 'arrow') view_kwargs['focalpoint'] = (0., 0., 0.) mne.viz.set_3d_view(figure=renderer.figure, distance=1000, **view_kwargs) renderer.show() """ Explanation: This is the method used by :func:mne.viz.plot_bem to show the BEM surfaces. Cortical alignment (spherical) A critical function provided by FreeSurfer is spherical surface alignment of cortical surfaces, maximizing sulcal-gyral alignment. FreeSurfer first expands the cortical surface to a sphere, then aligns it optimally with fsaverage. Because the vertex ordering is preserved when expanding to a sphere, a given vertex in the source (sample) mesh can be mapped easily to the same location in the destination (fsaverage) mesh, and vice-versa. End of explanation """ cyan = '#66CCEE' purple = '#AA3377' renderer = mne.viz.backends.renderer.create_3d_figure( size=(800, 800), scene=False, **renderer_kwargs) fnames = [os.path.join(subjects_dir, subj, 'surf', 'rh.sphere') for subj in ('sample', 'fsaverage')] colors = [cyan, purple] for name, color in zip(fnames, colors): this_rr, this_tri = mne.read_surface(name) renderer.mesh(*this_rr.T, triangles=this_tri, color=color, representation='wireframe') mne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs) renderer.show() """ Explanation: Let's look a bit more closely at the spherical alignment by overlaying the two spherical meshes as wireframes and zooming way in (the purple points are separated by about 1 mm): End of explanation """ src = mne.read_source_spaces(os.path.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')) print(src) blue = '#4477AA' renderer = mne.viz.backends.renderer.create_3d_figure( size=(800, 800), scene=False, **renderer_kwargs) rr_sph, _ = mne.read_surface(fnames[0]) for tris, color in [(src[1]['tris'], cyan), (src[1]['use_tris'], blue)]: renderer.mesh(*rr_sph.T, triangles=tris, color=color, representation='wireframe') mne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs) renderer.show() """ Explanation: You can see that the fsaverage (purple) mesh is uniformly spaced, and the mesh for subject "sample" (in cyan) has been deformed along the spherical surface by FreeSurfer. This deformation is designed to optimize the sulcal-gyral alignment. Surface decimation These surfaces have a lot of vertices, and in general we only need to use a subset of these vertices for creating source spaces. A uniform sampling can easily be achieved by subsampling in the spherical space. To do this, we use a recursively subdivided icosahedron or octahedron. For example, let's load a standard oct-6 source space, and at the same zoom level as before visualize how it subsampled the dense mesh: End of explanation """ renderer = mne.viz.backends.renderer.create_3d_figure( size=(800, 400), scene=False, **renderer_kwargs) y_shifts = [-125, 125] tris = [src[1]['tris'], src[1]['use_tris']] for y_shift, tris in zip(y_shifts, tris): this_rr = src[1]['rr'] * 1000. + [0, y_shift, -40] renderer.mesh(*this_rr.T, triangles=tris, color=None, scalars=curvs[0], colormap='copper_r', vmin=-0.2, vmax=1.2) renderer.quiver3d([0], [-width / 2.], [0], [0], [1], [0], 'k', width, 'arrow') mne.viz.set_3d_view(figure=renderer.figure, distance=400, **view_kwargs) renderer.show() """ Explanation: We can also then look at how these two meshes compare by plotting the original, high-density mesh as well as our decimated mesh white surfaces. End of explanation """ brain = mne.viz.Brain('sample', 'lh', 'white', subjects_dir=subjects_dir, background='w') xyz = np.array([[-55, -10, 35]]) brain.add_foci(xyz, hemi='lh', color='k') brain.show_view('lat') """ Explanation: <div class="alert alert-danger"><h4>Warning</h4><p>Some source space vertices can be removed during forward computation. See `tut-forward` for more information.</p></div> FreeSurfer's MNI affine transformation In addition to surface-based approaches, FreeSurfer also provides a simple affine coregistration of each subject's data to the fsaverage subject. Let's pick a point for sample and plot it on the brain: End of explanation """ mri_mni_trans = mne.read_talxfm(subject, subjects_dir) print(mri_mni_trans) xyz_mni = apply_trans(mri_mni_trans, xyz / 1000.) * 1000. print(np.round(xyz_mni, 1)) """ Explanation: We can take this point and transform it to MNI space: End of explanation """ brain = mne.viz.Brain('fsaverage', 'lh', 'white', subjects_dir=subjects_dir, background='w') brain.add_foci(xyz_mni, hemi='lh', color='k') brain.show_view('lat') """ Explanation: And because fsaverage is special in that it's already in MNI space (its MRI-to-MNI transform is identity), it should land in the equivalent anatomical location: End of explanation """
sbussmann/buda-rank
notebooks/Summer Club and Hat League 2016.ipynb
mit
# Load the "autoreload" extension %load_ext autoreload # always reload modules marked with "%aimport" %autoreload 1 import os import sys # add the 'src' directory as one where we can import modules src_dir = os.path.join(os.getcwd(), os.pardir, 'src', 'data') sys.path.append(src_dir) %aimport scrape_buda import pandas as pd import numpy as np import matplotlib.pyplot as plt import scrape_buda ratings = scrape_buda.BudaRating() data_dir = os.path.join(os.getcwd(), os.pardir, 'data', 'interim') prefix = os.path.join(data_dir, 'data20160521') ratings.scrape_buda(prefix) indx = (ratings.allteams['season'] == 'Summer') & \ (ratings.allteams['type'] == 'Club') & \ (ratings.allteams['year'] == '2016') len(ratings.allteams[indx]) ratings.observed_rating() ratings.predicted_rating() pd.set_option('display.max_rows', 500) ratings.allteams[indx].sort('observed_ratings') sns.set_context('poster') indx = (ratings.allteams['season'] == 'Summer') & \ (ratings.allteams['type'] == 'Club') & \ (ratings.allteams['year'] == '2016') plt.plot(ratings.allteams.ix[indx, 'predicted_rating'], ratings.allteams.ix[indx, 'observed_ratings'], '.', label='Summer Club 2016') plt.xlabel('Predicted Rating') plt.ylabel('Observed Rating') plt.plot([600, 2000], [600, 2000], label='1:1 Relationship') plt.title('Summer Club League 2016') plt.legend(loc='upper left') """ Explanation: Summary How well does historical experience rating predict average plus/minus differential in summer hat league 2016? End of explanation """ ratings.allteams[indx] """ Explanation: That green line above is not a fit to the data -- it's a straight up 1:1 line. So just based on what teams someone has played on in the past, we can predict how well that person's new club team will do pretty well. In general, the observed ratings are a bit higher than the predicted ratings. This could be related to the way I handle players with no experience in the database. Would be interesting to see if amount of departure from the 1:1 line corresponds to number of players with no experience. End of explanation """ sns.set_context('poster') indx = (ratings.allteams['season'] == 'Summer') & \ (ratings.allteams['type'] == 'Hat') & \ (ratings.allteams['year'] == '2016') divnames = ratings.allteams.ix[indx, 'divname'].unique() for divname in divnames: subindx = indx = (ratings.allteams['season'] == 'Summer') & \ (ratings.allteams['type'] == 'Hat') & \ (ratings.allteams['year'] == '2016') & \ (ratings.allteams['divname'] == divname) plt.plot(ratings.allteams.ix[indx, 'predicted_rating'], ratings.allteams.ix[indx, 'observed_ratings'], 'o', label=divname) plt.xlabel('Predicted Rating') plt.ylabel('Observed Rating') # plt.plot([600, 2000], [600, 2000], label='1:1 Relationship') plt.title('Summer Hat League 2016') plt.legend(loc='best') """ Explanation: Now let's take a look at Summer Hat League data. End of explanation """
neurodata/ndreg
ndreg_demo_real_data.ipynb
apache-2.0
%matplotlib inline import matplotlib.pyplot as plt import matplotlib import ndreg from ndreg import preprocessor, util, plotter import SimpleITK as sitk matplotlib.rcParams['figure.figsize'] = (10.0, 8.0) def myshow(img, cmap='gray', colorbar=False): plt.imshow(sitk.GetArrayViewFromImage(img), cmap=cmap) if colorbar: plt.colorbar() plt.axis('off') plt.show() """ Explanation: Import necessary libraries End of explanation """ params = { # input image path 'image_path': './Thy1eYFP_Control_9.tiff', # voxel spacing is in mm and corresponds to (x, y, z) spacing 'image_spacing': (0.04128, 0.04128, 0.04128), 'image_orientation': 'rpi', # the modality can be 'lavision' or 'colm' 'image_modality': 'lavision', 'atlas_spacing': (0.05, 0.05, 0.05), 'atlas_path': './ARA_50um.tiff', } """ Explanation: Some metadata is required before registration End of explanation """ img = util.imgRead(params['image_path']) img.SetSpacing(params['image_spacing']) atlas = util.imgRead(params['atlas_path']) atlas.SetSpacing(params['atlas_spacing']) plotter.imgShow(img, vmax=2000) plotter.imgShow(atlas, vmax=400) """ Explanation: Load the sample data End of explanation """ img_p = preprocessor.preprocess_brain(img, params['atlas_spacing'], params['image_modality'], params['image_orientation']) """ Explanation: Preprocessing This step preprocesses the input CLARITY images by resampling them to match the resolution of the atlas, bias correcting the images, and normalizing them by subtracting the mean and dividing by the standard deviation of the image intensities. End of explanation """ atlas_registered = ndreg.register_brain(atlas, img_p) """ Explanation: Registration We want to obtain the parameters to transform the original image to the new image. The transformation from the original image to the new image can be described as a composition of an affine transformation which can perform a combination of translation, scaling, rotation, and shear and deformable registration called LDDMM. The output of this method is the atlas registered to the raw data End of explanation """ plotter.imgShow(atlas_registered) plotter.imgShow(plotter.imgChecker(atlas_registered, img_p), vmax=2) """ Explanation: Visualize registered image The two images below should match if the registration worked successfully! End of explanation """ ndreg.imgMSE(sitk.Normalize(atlas), sitk.Normalize(img)) ndreg.imgMSE(sitk.Normalize(atlas), sitk.Normalize(img_p)) ndreg.imgMSE(sitk.Normalize(atlas_registered), sitk.Normalize(img_p)) """ Explanation: Quantitative evaluation Here, we print out the Mean Squared Error between both the atlas and the observed data. As we can see, this metric decreases from the unprocessed data (first cell below this one) to the final atlas registered to our data (3rd cell below this one) End of explanation """
mnschmit/LMU-Syntax-nat-rlicher-Sprachen
11-notebook.ipynb
apache-2.0
test_sentences = [ "the men saw a car .", "the woman gave the man a book .", "she gave a book to the man .", "yesterday , all my trouble seemed so far away ." ] import nltk from nltk.corpus import treebank from nltk.grammar import ProbabilisticProduction, PCFG # Production count: the number of times a given production occurs pcount = {} # LHS-count: counts the number of times a given lhs occurs lcount = {} for tree in []: pass productions = [ ProbabilisticProduction( p.lhs(), p.rhs(), prob=None ) for p in pcount ] start = nltk.Nonterminal('S') # grammar = PCFG(start, productions) # parser = nltk.ViterbiParser(grammar) """ Explanation: Übungsblatt 11 Präsenzaufgaben Aufgabe 1 &nbsp;&nbsp;&nbsp; Grammatikinduktion In dieser Aufgabe soll vollautomatisch aus Daten (Syntaxbäumen) eine probabilistische, kontextfreie Grammatik erzeugt werden. Füllen Sie die Lücken und versuchen Sie mithilfe Ihrer automatisch erstellten Grammatik die folgenden Sätze zu parsen: End of explanation """ from nltk.parse.stanford import StanfordDependencyParser PATH_TO_CORE = "/pfad/zu/stanford-corenlp-full-2017-06-09/" jar = PATH_TO_CORE + "stanford-corenlp-3.8.0.jar" model = PATH_TO_CORE + "stanford-corenlp-3.8.0-models.jar" dep_parser = StanfordDependencyParser( jar, model, model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" ) def generate_predicates_for_sentence(sentence): for result in dep_parser.raw_parse(sentence): for triple in result.triples(): print(*triple) return [] for pred in generate_predicates_for_sentence( "I shot an elephant in my pajamas." ): print(pred) def generate_predicates_for_text(text): predicates = [] for sent in nltk.tokenize.sent_tokenize(text): predicates.extend(generate_predicates_for_sentence(sent)) return predicates text = """ I shot an elephant in my pajamas. The elephant was seen by a giraffe. The bird I need is a raven. The man who saw the raven laughed out loud. """ for pred in generate_predicates_for_text(text): print(pred) """ Explanation: Aufgabe 2 &nbsp;&nbsp;&nbsp; Informationsextraktion per Syntaxanalyse Gegenstand dieser Aufgabe ist eine anwendungsnahe Möglichkeit, Ergebnisse einer Syntaxanalyse weiterzuverarbeiten. Aus den syntaktischen Abhängigkeiten eines Textes soll (unter Zuhilfenahme einiger Normalisierungsschritte) eine semantische Repräsentation der im Text enthaltenen Informationen gewonnen werden. Für die syntaktische Analyse soll der DependencyParser der Stanford CoreNLP Suite verwendet werden. Die semantische Repräsentation eines Satzes sei ein zweistelliges, logisches Prädikat, dessen Argumente durch Subjekt und Objekt gefüllt sind. (Bei Fehlen eines der beiden Elemente soll None geschrieben werden.) Folgendes Beispiel illustriert das gewünschte Ergebnis: Eingabe: I shot an elephant in my pajamas. The elephant was seen by a giraffe in the desert. The bird I need is a raven. The man who saw the raven laughed out loud. Ausgabe: shot(I, elephant) seen(giraffe, elephant) need(I, bird) raven(bird, None) saw(man, raven) laughed(man, None) Beachten Sie, dass PATH_TO_CORE in folgender Code-Zelle Ihrem System entsprechend angepasst werden muss! End of explanation """ def parent_annotation(tree, parentHistory=0, parentChar="^"): pass test_tree = nltk.Tree( "S", [ nltk.Tree("NP", [ nltk.Tree("DET", []), nltk.Tree("N", []) ]), nltk.Tree("VP", [ nltk.Tree("V", []), nltk.Tree("NP", [ nltk.Tree("DET", []), nltk.Tree("N", []) ]) ]) ] ) parent_annotation( test_tree ) """ Explanation: Hausaufgaben Aufgabe 3 &nbsp;&nbsp;&nbsp; Parent Annotation Parent Annotation kann die Performanz einer CFG wesentlich verbessern. Schreiben Sie eine Funktion, die einen gegebenen Syntaxbaum dieser Optimierung unterzieht. Auf diese Art und Weise transformierte Bäume können dann wiederum zur Grammatikinduktion verwendet werden. parentHistory soll dabei die Anzahl der Vorgänger sein, die zusätzlich zum direkten Elternknoten berücksichtigt werden. (Kann bei der Lösung der Aufgabe auch ignoriert werden.) parentChar soll ein Trennzeichen sein, das bei den neuen Knotenlabels zwischen dem ursprünglichen Knotenlabel und der Liste von Vorgängern eingefügt wird. End of explanation """ def generate_predicates_for_sentence(sentence): pass def generate_predicates_for_text(text): pass text = """ I see an elephant. You didn't see the elephant. Peter saw the elephant and drank wine. """ """ Explanation: Aufgabe 4 &nbsp;&nbsp;&nbsp; Mehr Semantik für IE Zusätzlich zu den in Aufgabe 2 behandelten Konstruktionen sollen jetzt auch negierte und komplexe Sätze mit Konjunktionen sinnvoll verarbeitet werden. Eingabe: I see an elephant. You didn't see the elephant. Peter saw the elephant and drank wine. Gewünschte Ausgabe: see(I, elephant) not_see(You, elephant) saw(Peter, elephant) drank(Peter, wine) Kopieren Sie am besten Ihren aktuellen Stand von oben herunter und fügen Sie Ihre Erweiterungen dann hier ein. End of explanation """
tmm/DS501
2/CaseStudy2.ipynb
mit
from IPython.lib.display import YouTubeVideo YouTubeVideo('6O43gOxtaWo', start=14) """ Explanation: Case Study 2 : Analyzing data from MovieLens Due Date: March 5, 2016 5:59PM *------------ The MovieLens data sets <img src="https://pbs.twimg.com/profile_images/378800000380161537/b6fa868dce43807d4e67462587d0b0d2_400x400.png"> http://grouplens.org/datasets/movielens/ End of explanation """ %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt # Import Users Data unames = ['user_id','gender','age','occupation','zip'] users = pd.read_table('data/users.dat', sep = '::', header = None, names = unames, engine='python') users[:5] # Import Ratings Data rnames = ['user_id','movie_id','rating','timestamp'] ratings = pd.read_table('data/ratings.dat', sep = '::', header = None, names = rnames,engine='python') ratings[:5] # Import movies Data mnames = ['movie_id','title','genres'] movies = pd.read_table('data/movies.dat', sep = '::', header = None, names = mnames,engine='python') movies[:5] # Merge the data into a single data frame data = pd.merge(pd.merge(ratings,users),movies) data[:5] #Store the data into an HDF5 file data_hdf = pd.HDFStore('data/movies.h5') data_hdf['data1'] = data data_hdf.close() """ Explanation: TEAM Members: Please EDIT this cell and add the names of all the team members in your team Helen Hong Haley Huang Tom Meagher Tyler Reese Desired outcome of the case study. * In this case study we will look at the MovieLens 1M Data Set. * It contains data about users and how the rate movies. * The idea is to analyze the data set, make conjectures, support or refute those conjectures with data, and tell a story about the data! Required Readings: * Chapter 2 (only the section on the MovieLens 1M Data Set), Chapter 5, Chapter 6 Pg 171-172, and Chapter 8 of the book Python for Data Analysis (available from the WPI library as an e-book). * If you want your code to be really compact then you might want to also look into the pivot_table method of Panda's DataFrame, though there are many other ways to complete the case study! Case study assumptions: * You have access to a python installation Required Python libraries: * Pandas (pandas.pydata.org) * Matplotlib (matplotlib.org) * If you need help installing them then you can refer to Chapter 1 of Python for Data Analysis book above. NOTE * Please don't forget to save the notebook frequently when working in IPython Notebook, otherwise the changes you made can be lost. *---------------------- Problem 1 (20 points total): (10 points) Importing the MovieLens data set and merging it into a single Pandas DataFrame Download the 1 million ratings data set from http://grouplens.org/datasets/movielens/ (though if you are feeling adventerous you can download the 10 million ratings file instead) Merge all of the data into a single Pandas DataFrame Store the data into an HDF5 file. (10 points) Report some basic details of the data you collected. For example: How many movies have an average rating over 4.5 overall? How many movies have an average rating over 4.5 among men? How about women? How many movies have an median rating over 4.5 among men over age 30? How about women over age 30? What are the ten most popular movies? Choose what you consider to be a reasonable defintion of "popular". Be perpared to defend this choice. Make some conjectures about how easy various groups are to please? Support your answers with data! For example, one might conjecture that people between the ages of 1 and 10 are the easiest to please since they are all young children. This conjecture may or may not be true, but how would you support or disprove either conclusion with with data? Be sure to come up with your own conjectures and support them with data! End of explanation """ #check statistics of data data[['rating','age']].describe() """ Explanation: Compute some Summary Statistics for the data End of explanation """ # Use a pivot table to compute mean ratings by title mean_ratings = data.pivot_table('rating',index = 'title',aggfunc = 'mean') # Determine titles with high mean ratings top_overall_titles = mean_ratings.index[mean_ratings >= 4.5] #Extract those titles top_overall_movies = mean_ratings.ix[top_overall_titles] print 'Total movies with an average ranking of (at least) 4.5 overall:' print len(top_overall_movies) print print 'Examples:' print top_overall_movies[:5] """ Explanation: How many movies have an average rating over 4.5 overall? End of explanation """ # Use a pivot table to compute mean ratings per title, stratified by gender. mean_ratings = data.pivot_table('rating',index = 'title',columns = 'gender',aggfunc = 'mean') #Determine those title ranked high among females. top_female_titles = mean_ratings.index[mean_ratings['F'] >= 4.5] # Extract those titles top_female_movies = mean_ratings.ix[top_female_titles] print 'Total movies with an average ranking of (at least) 4.5 among women:' print len(top_female_movies) print print 'Examples (average rankings):' print top_female_movies[:5] mean_ratings = data.pivot_table('rating',index = 'title',columns = 'gender',aggfunc = 'mean') # Determine which titles had high average ratings among men top_male_titles = mean_ratings.index[mean_ratings['M'] >= 4.5] # Extract those titles top_male_movies = mean_ratings.ix[top_male_titles] print 'Total movies with an average ranking of (at least) 4.5 among men:' print len(top_male_movies) print print 'Examples (average rankings):' print top_male_movies[:5] """ Explanation: How many movies have an average rating over 4.5 among men? How about women? End of explanation """ # Restrict data to those with raters aged over 30 data_over30 = data.ix[data['age']>30] # Use a pivot table to compute the median ratings by title on this restricted data median_ratings = data_over30.pivot_table('rating',index = 'title',columns = ['gender'],aggfunc = 'median') # Determine which movies had a high median among men and extract those titles top_male_median_titles = median_ratings.index[median_ratings['M'] >= 4.5] top_male_median_movies = median_ratings.ix[top_male_median_titles] print 'Total movies with an median ranking of (at least) 4.5 among men over 30:' print len(top_male_median_movies) print print 'Examples, median scores among people over 30:' print top_male_median_movies[:5] # Determine which movies had a high median among men and extract those titles top_female_median_titles = median_ratings.index[median_ratings['F'] >= 4.5] top_female_median_movies = median_ratings.ix[top_female_median_titles] print 'Total movies with an median ranking of (at least) 4.5 among women over 30:' print len(top_female_median_movies) print print 'Examples, median scores among people over 30:' print top_female_median_movies[:5] """ Explanation: How many movies have an median rating over 4.5 among men over age 30? How about women over age 30? End of explanation """ # Determine the overall total ratings and mean ratings per title popularity_test = data.pivot_table('rating',index = 'title', aggfunc = [len, np.mean]) # Determine the mean ratings per title by gender gender_popularity_test = data.pivot_table('rating',index = 'title', columns = 'gender', aggfunc = np.mean) popularity_test[:5] gender_popularity_test[:5] # Calculate total number of ratings for each title ratings_by_title = data.groupby('title').size() # Determine the average number of total ratings per title average_total_ratings = sum(ratings_by_title)/len(ratings_by_title) # Determine which titles had above average total ratings and isolate those titles. high_total_titles = popularity_test.index[popularity_test['len'] >= average_total_ratings] high_total = popularity_test.ix[high_total_titles] high_total[:5] # Determine the average of ALL ratings given by men and by women. gender_average_ratings = data.pivot_table('rating', index = 'gender',aggfunc = np.mean) gender_average_ratings # Determine the titles with above average female ratings and isolate those titles among the movies with above average total ratings. high_female_titles = gender_popularity_test.index[gender_popularity_test['F'] >= gender_average_ratings['F']] high_total_female = high_total.ix[high_female_titles] # Among the above isolated titles, determine those with above average male ratings and isolate those titles. high_male_titles = gender_popularity_test.index[gender_popularity_test['M'] >= gender_average_ratings['M']] high_total_female_male = high_total_female.ix[high_male_titles] # Determine the popular movies, given the definition above. from numpy import nan as NA popular_movies = high_total_female_male.dropna(how = 'all') popular_movies[:5] # Given the popluar movies, determine the 10 most popular. most_popular_movies = popular_movies.sort_values(by='mean',ascending = False) most_popular_movies[:10] """ Explanation: What are the ten most popular movies? * Choose what you consider to be a reasonable defintion of "popular". * Be perpared to defend this choice. We propose the following definition of a "Popular" movie: * Above-average total number of ratings * Above-average rating among women (i.e. the movie's average rating among women is above the average of ALL ratings given by women) * Above-average rating among men ((i.e. the movie's average rating among men is above the average of ALL ratings given by men) Among these "popular" movies we determine the top 10 MOST popular by using highest average rating overall. End of explanation """ # Compute average rating by age group age_avg_ratings = data.pivot_table('rating', index = 'age',aggfunc = np.mean) age_avg_ratings # Compute weighted average by weighting each rating by the total number of ratings that individual submits avg_by_user = data.pivot_table('rating',index = ['age','user_id'], aggfunc = [ len , np.mean]) avg_by_user[:10] avg_ratings = np.mean(avg_by_user['len']) avg_by_user['weight'] = avg_by_user['len']/avg_ratings avg_by_user['weighted_mean'] = avg_by_user['mean']*avg_by_user['weight'] age_avg_weighted_ratings = avg_by_user.pivot_table('weighted_mean', index = avg_by_user.index.droplevel(1), aggfunc = np.mean) age_avg_weighted_ratings # Compute average age per rating avg_age_ratings = data.pivot_table('age', index = 'rating',aggfunc = np.mean) avg_age_ratings age_counts = data.pivot_table('title', index='age', columns='rating', aggfunc='count') age_counts.rename(index={1: 'Under 18', 18: '18-24', 25: '25-34', 35: '35-44', 45: '45-49', 50: '50-55', 56: '56+'}, inplace=True) print 'Frequency of Age Groups Ratings' print age_counts #normalize age_counts_norm = age_counts.div(age_counts.sum(1).astype(float), axis=0) age_counts_norm # plot percentage of each rate from each age group age_counts_norm.plot(ylim=[0,0.4],kind='bar', color=['yellow','#E50E14','#ec971f','#00b27f','#5898f1'],title = "Percent of Ratings By Age").legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) """ Explanation: Make some conjectures about how easy various groups are to please? Support your answers with data! Conjecture 1.) The older a person gets, the more difficult they are to please. End of explanation """ import time timestamps = data['timestamp'] # Time stamps are reported in seconds since epoch. Convert these values to local time, and extract the hour. hour = [time.localtime(stamp).tm_hour for stamp in timestamps.values] hour_series = pd.DataFrame(hour, index=data.index) # Append the hour each rating was reported to the data set. data['hour'] = hour_series # Use a pivot table to determine the average overall rating by hour. avg_by_hour = data.pivot_table('rating',index = 'hour', aggfunc = np.mean) avg_by_hour wee_hours_data = data[np.logical_or(data['hour']>= 22,data['hour']<=5)] wee_hours_5 = wee_hours_data[wee_hours_data['rating']==5] wee_hours_1 = wee_hours_data[wee_hours_data['rating']==1] total_5 = data[data['rating']==5] total_1 = data[data['rating']==1] wee_hours_5_percent = float(len(wee_hours_5))/len(wee_hours_data) wee_hours_1_percent = float(len(wee_hours_1))/len(wee_hours_data) total_5_percent = float(len(total_5))/len(data) total_1_percent = float(len(total_1))/len(data) compdat = {'Percent Ratings 5':[wee_hours_5_percent , total_5_percent], 'Percent Raings 1':[wee_hours_1_percent, total_1_percent]} comp = pd.DataFrame(compdat, columns=['Percent Ratings 5','Percent Raings 1'], index=['Wee Hours','Total']) comp """ Explanation: Conclusion: False. In fact, an older person is more likely than a younger person to give a rating of 5, and less likely to give a rating of 1. More details presented in the report. Conjecture 2.) Tired people are more easy to please End of explanation """ #Plot a histogram of the ratings of all movies. Rating_all=data.pivot_table('title',index='rating',aggfunc='count') Rating_all.plot(kind='bar', color='#FA5744') plt.title('Histogram of all ratings') plt.ylabel('Total number') """ Explanation: Conclusion: False. If the conjecture were true, we would expect to see noticeably higher average ratings at very large and very small hours. This is clearly not the case. Problem 2 (20 points total): Expand our investigation to histograms An obvious issue with any inferences drawn from Problem 1 is that we did not consider how many times a movie was rated. * (2 points) Plot a histogram of the ratings of all movies. * (2 points) Plot a histogram of the number of ratings each movie recieved. * (2 points) Plot a histogram of the average rating for each movie. * (6 points) Plot a histogram of the average rating for movies which are rated more than 100 times. * What do you observe about the tails of the histogram where you use all the movies versus the one where you only use movies rated more than 100 times? * Which highly rated movies would you trust are actually good? Those rated more than 100 times or those rated less than 100 times? * (8 points) Make some conjectures about the distribution of ratings? Support your answers with data! * For example, what age range do you think has more extreme ratings? Do you think children are more or less likely to rate a movie 1 or 5? * Be sure to come up with your own conjectures and support them with data! Plot a histogram of the ratings of all movies. End of explanation """ #Plot a histogram of the number of ratings each movie recieved. Rating_each=data.pivot_table('rating',index='title',aggfunc='count') Rating_each.hist() plt.title('Histogram of Number of ratings each movie received') plt.ylabel('Number of Movies') plt.xlabel('Number of Ratings') """ Explanation: Plot a histogram of the number of ratings each movie recieved. End of explanation """ #Plot a histogram of the average rating for each movie. Avg_rating_each=data.pivot_table('rating',index='title',aggfunc='mean') Avg_rating_each.hist(color='orange') plt.title('Histogram of Average rating for each movie') plt.ylabel('Number of Movies') plt.xlabel('Average Rating') """ Explanation: Plot a histogram of the average rating for each movie. End of explanation """ #Plot a histogram of the average rating for movies which are rated more than 100 times. rating_by_title = data.groupby('title').size() active_titles = rating_by_title.index[rating_by_title > 100] avg_ratings_each_active = Avg_rating_each.ix[active_titles] avg_ratings_each_active.hist(color='red') plt.title('average rating for movies rated more than 100 times') plt.ylabel('Number of Movies') plt.xlabel('Average rating') """ Explanation: Plot a histogram of the average rating for movies which are rated more than 100 times. End of explanation """ # Select the movies with less than half the average number of total ratings. rating_by_title = data.groupby('title').size() inactive_titles = rating_by_title.index[rating_by_title <= average_total_ratings/2] inactive = [title in inactive_titles.values for title in data['title']] inactive_series = pd.DataFrame(inactive, index = data.index) data['Inactive'] = inactive_series inactive_data = data[data['Inactive']] inactive_rating_all=inactive_data.pivot_table('title',index='rating',aggfunc='count') inactive_rating_all.plot(kind='bar', color='blue') plt.title('Histogram of ratings of movies \n with less than half the average number of ratings') plt.ylabel('Total number') # Select the movies with less more than twice the average number of total ratings. rating_by_title = data.groupby('title').size() wayactive_titles = rating_by_title.index[rating_by_title >= average_total_ratings*2] wayactive = [title in wayactive_titles.values for title in data['title']] wayactive_series = pd.DataFrame(wayactive, index = data.index) data['wayactive'] = wayactive_series wayactive_data = data[data['wayactive']] wayactive_rating_all=wayactive_data.pivot_table('title',index='rating',aggfunc='count') wayactive_rating_all.plot(kind='bar', color='blue') plt.title('Histogram of ratings of movies with \n more than twice the average number of ratings') plt.ylabel('Total number') """ Explanation: Make some conjectures about the distribution of ratings? Support your answers with data! Conjecture 1: Movies with fewer total ratings have a distribution of all ratings which is closer to a uniform distribution due to a larger percent of low-end extreme ratings. End of explanation """ # Extract the year from the title def extract(string, start='(', stop=')'): while string.index(stop) - (string.index(start)+1)!= 4: string = string[:string.index(start)] + string[string.index(stop)+1:] return string[string.index(start)+1:string.index(stop)] titles = data['title'] year = [int(extract(title)) for title in titles] year_series = pd.DataFrame(year, index=data.index) data['year'] = year_series data[:5] year_array = list(set(data['year'].values)) average_year = int(float(sum(year_array))/len(year_array)) average_year old_data = data[data['year']<= average_year] old_rating_all=old_data.pivot_table('title',index='rating',aggfunc='count') old_rating_all.plot(kind='bar', color='#FA5744') plt.title('Histogram of all of early movies') plt.ylabel('Total number') newer_data = data[data['year']>= average_year] new_rating_all=newer_data.pivot_table('title',index='rating',aggfunc='count') new_rating_all.plot(kind='bar', color='#00b27f') plt.title('Histogram of all of newer movies') plt.ylabel('Total number') """ Explanation: Conjecture 2: The distribution of all ratings of older movies is less normally-distributed than that of newer movies. Newer movies are likely being watched by audiences looking to be entertained. Those watching older movies (i.e. movies made before 1960) likely have some nostalgia tied up in these older films. End of explanation """ %matplotlib inline import matplotlib import matplotlib.pyplot as plt # among total6040 users, how many female? how may male? users.groupby('gender').size() #among total 100209 rating records, how many was made by female? how many was made by male? data.groupby('gender').size() """ Explanation: *------------------------ Problem 3: (20 points total) Correlation: Men versus women Let look more closely at the relationship between the pieces of data we have. (2 points) Make a scatter plot of men versus women and their mean rating for every movie. (2 points) Make a scatter plot of men versus women and their mean rating for movies rated more than 200 times. (6 points) Compute the correlation coefficent between the ratings of men and women. What do you observe? Are the ratings similiar or not? Support your answer with data! (10 points) Conjecture under what circumstances the rating given by one gender can be used to predict the rating given by the other gender. For example, are men and women more similar when they are younger or older? Be sure to come up with your own conjectures and support them with data! End of explanation """ # Use a pivot table to compute mean ratings per title by gender mean_ratings = data.pivot_table('rating',index = 'title',columns = 'gender',aggfunc = 'mean') # Scatter this data. plt.scatter(mean_ratings['M'], mean_ratings['F']) plt.title('Average Ratings by Movie') plt.ylabel('Average female rating') plt.xlabel('Average male rating') """ Explanation: Make a scatter plot of men versus women and their mean rating for every movie. End of explanation """ # Determine titles with more than 200 total ratings. ratings_by_title = data.groupby('title').size() active_titles = ratings_by_title.index[ratings_by_title > 200] # Extract these titles over_200_mean_ratings = mean_ratings.ix[active_titles] #Produce scatter plot plt.scatter(over_200_mean_ratings['M'], over_200_mean_ratings['F']) plt.title('Average Ratings by Movie, \n Among movies rated more than 200 times') plt.ylabel('Average female rating') plt.xlabel('Average male rating') """ Explanation: Make a scatter plot of men versus women and their mean rating for movies rated more than 200 times. End of explanation """ # Compute the correlation coefficient print 'correlation coefficient between averege male and female ratings: {0}'.format(mean_ratings.M.corr(mean_ratings.F)) # Based on scatter plots above, it is clear that men and women tend to agree more when the movies have a higher total number # of ratings. Calculate the correlation coeffcient in this case print 'correlation coefficient between averege male and female ratings among movies with over 200 ratings: {0}'.format( over_200_mean_ratings.M.corr(over_200_mean_ratings.F)) # Given this observed in crease in correlation coefficient, we now compute the correlation coefficient based on the number of # total ratings: ratings_by_title = data.groupby('title').size() mean_ratings = data.pivot_table('rating',index = 'title',columns = 'gender',aggfunc = 'mean') i = 1 IND = ['0'] RAT = [0] while i < max(ratings_by_title): titles = ratings_by_title.index[np.logical_and(ratings_by_title >= i, ratings_by_title < 2*i)] subset_mean_ratings = mean_ratings.ix[titles] correl = subset_mean_ratings.M.corr(subset_mean_ratings.F) IND.append('Total ratings between {0} and {1}'.format(i, 2*i)) RAT.append(correl) j = i i = 2*j correl_comp = pd.Series(RAT, index=IND) correl_comp.index.name = 'Total number of Ratings' correl_comp.name = 'Correlation coefficient between average Male and Female Ratings per Movie' correl_comp """ Explanation: Compute the correlation coefficent between the ratings of men and women. * What do you observe? * Are the ratings similiar or not? Support your answer with data! End of explanation """ fives_data = data[data['rating']==5] five_ratings = fives_data.pivot_table('rating', index = 'title', columns = 'gender', aggfunc = 'count') total_ratings = data.pivot_table('rating',index ='title',columns = 'gender',aggfunc = 'count') fives_percent = pd.DataFrame(index = five_ratings.index) fives_percent['M'] = five_ratings['M']/total_ratings['M'] fives_percent['F'] = five_ratings['F']/total_ratings['F'] print 'correlation coefficient between percent ratings of 5 by male and female per title: {0}'.format(fives_percent.M.corr(fives_percent.F)) over_200_fives_percent = fives_percent.ix[active_titles] print 'correlation coefficient between percent ratings of 5 by male and female among titles with more than 200 ratings: {0}'.format(over_200_fives_percent.M.corr(over_200_fives_percent.F)) plt.scatter(over_200_fives_percent['M'], over_200_fives_percent['F']) plt.title('Percent ratings 5 by Movie, \n Among movies rated more than 200 times') plt.ylabel('Percent 5, female') plt.xlabel('Percent 5, male') """ Explanation: This data seems to be somewhat misleading. Based on the high correlation values, it seems that the ratings between men and women are similar, especially among movies watched more than 200 times. However, this is the correlation between the MEAN rating per title between men and women. What this is saying is that ON AVERAGE, men and women rate movies similarly. This doesn't indicate that the ratings themselves are actually similar! For example, there could be a movie in which both men and women have an average rating of 3, but women rate it as either a 1 or a 5 and all men rate it as 3. We need to explore the data more to understand if the ratings between men and women are actually similar. For example, rather than consider mean rating, let's consider the percentage of ratings that are a 5. That is, for each title we compute the total number of 5 ratings given by men and divide by the total number of ratings given by men. We then determine the correlation in this data. End of explanation """ low_data = data[data['rating']<= 2] low_ratings = low_data.pivot_table('rating', index = 'title', columns = 'gender', aggfunc = 'count') total_ratings = data.pivot_table('rating',index ='title',columns = 'gender',aggfunc = 'count') low_percent = pd.DataFrame(index = low_ratings.index) low_percent['M'] = low_ratings['M']/total_ratings['M'] low_percent['F'] = low_ratings['F']/total_ratings['F'] print 'correlation coefficient between percent ratings of 1 or 2 by male and female per title: {0}'.format(low_percent.M.corr(low_percent.F)) over_200_low_percent = low_percent.ix[active_titles] print 'correlation coefficient between percent ratings of 5 by male and female among titles with more than 200 ratings: {0}'.format(over_200_low_percent.M.corr(over_200_low_percent.F)) plt.scatter(over_200_low_percent['M'], over_200_low_percent['F']) plt.title('Percent low ratings by Movie, \n Among movies rated more than 200 times') plt.ylabel('Low percent, female') plt.xlabel('Low percent, male') """ Explanation: Similarly, we perform the same analysis for number of ratings of 1 or 2 End of explanation """ #freqency of men vs. wen ratings for each age group gender_counts = data.pivot_table('title', index='gender', columns='rating', aggfunc='count') print 'Frequency of men vs. wemen Ratings' print gender_counts #normalize to sum to 1, giving us the percent of each rating given by men and women. gender_counts_norm = gender_counts.div(gender_counts.sum(1).astype(float), axis=0) gender_counts_norm gender_counts_norm.plot(kind='bar') print print 'Percent of each Rating, men vs women' print gender_counts_norm # Calculate the correlation coefficient among these average ratings. gender_counts_norm.ix['M'].corr(gender_counts_norm.ix['F']) """ Explanation: This indicates that male and females tend to agree on average and in distribution (especially on movies rated more than 200 times). This does not, however, indicate we can predict a single male rating given female ratings! The average behavior of the two is similar, but not single instances. Conjecture under what circumstances the rating given by one gender can be used to predict the rating given by the other gender. * For example, are men and women more similar when they are younger or older? * Be sure to come up with your own conjectures and support them with data! Observation 1.) The percent of each rating given by men and women overall is nearly identical. End of explanation """ import time # Convert time stamps to local time and extract the hour. timestamps = data['timestamp'] hour = [time.localtime(stamp).tm_hour for stamp in timestamps.values] hour_series = pd.DataFrame(hour, index=data.index) data['hour'] = hour_series # Isolate data for ratings submitted between 10PM and 5AM local time wee_hours_data = data[np.logical_or(data['hour']>= 22,data['hour']<=5)] # Determine the average ratings per title by gender during these late-night hours. wee_hours_mean_ratings = wee_hours_data.pivot_table('rating', index = 'title', columns = 'gender', aggfunc = np.mean) wee_hours_mean_ratings[:5] #Calculate the correlation coefficient. print 'Correlation coefficient between averege male and female ratings between 10PM and 5AM: {0}'.format( wee_hours_mean_ratings.M.corr(wee_hours_mean_ratings.F)) # We already know that men and women tend to disagree on movies with lower total ratings. Segment from the late-night data those with # high total numbers of ratings. wee_hours_over_200_mean_ratings = wee_hours_mean_ratings.ix[active_titles] wee_hours_over_200_mean_ratings[:5] #Compute Correlation Coefficient 'Correlation coefficient between averege male and female ratings between 10PM and 5AM, among movies with at least 200 total ratings: {0}'.format(wee_hours_over_200_mean_ratings.M.corr(wee_hours_over_200_mean_ratings.F)) """ Explanation: Conjecture 1.) People rate more similarly when they are tired. End of explanation """ # Determine which movies have "Comedy" listed within its genres. genres = data['genres'] all_genres = [string.split('|') for string in genres] comedy_truth = [ 'Comedy' in genres for genres in all_genres] comedy_series = pd.DataFrame(comedy_truth, index=data.index) data['comedy'] = comedy_series comedy_data = data.ix[data['comedy'] == True] # Determine comedies with at least 100 ratings comedy_ratings_by_title = comedy_data.groupby('title').size() comedy_active_titles = comedy_ratings_by_title.index[comedy_ratings_by_title > 100] # Extract these titles comedy_mean_ratings = comedy_data.pivot_table('rating',index = 'title',columns = 'gender',aggfunc = 'mean') active_comedy_mean_ratings = comedy_mean_ratings.ix[comedy_active_titles] active_comedy_mean_ratings[:10] # Compute correlation between average men's and women's ratings. active_comedy_mean_ratings.M.corr(active_comedy_mean_ratings.F) from sklearn import cross_validation, linear_model, feature_selection, metrics # Train a linear model to examine predictability # Can't have any NaN values for linear regression. active_comedy_mean_ratings = active_comedy_mean_ratings.dropna() # Select out our predictor columns and our response columns X = active_comedy_mean_ratings.ix[:,['M']] y = active_comedy_mean_ratings.ix[:,['F']] # Split the data into training data and testing data X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.8) # Run the solver reg = linear_model.LinearRegression(fit_intercept=True) reg.fit(X_train,y_train) # Plot the data and the model plotX = np.linspace(0,5,100) plotY = reg.predict(np.matrix(plotX).T) plt.plot(X_train,y_train,'o', color='#FA5744') plt.plot(X_test,y_test,'o', color='#00b27f') plt.plot(plotX,plotY,'-', color='#5898f1') plt.title('Average Rating of Comedies') plt.ylabel('Female Average') plt.xlabel('Male Average') # Compute the slope and intercept of the linear model print reg.intercept_ # Beta_1 print reg.coef_ # Compute testing and training error. print 'training error' print metrics.mean_squared_error(y_train,reg.predict(X_train)) print 'testing error' print metrics.mean_squared_error(y_test,reg.predict(X_test)) """ Explanation: Conclusion: False. Both correlation coefficients actually went down by about 0.1: this change in the largest significant digit should be meaningful. That is, during late night/early morning hours, even the average behavior between the two genders is less correlated, and thus they are not behaving similarly Conjecture 2.) Genders agree on what is "funny"! End of explanation """ comedy_fives_data = comedy_data[comedy_data['rating']==5] comedy_gender_fives = comedy_fives_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') comedy_gender_totals = comedy_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') comedy_gender_percents = comedy_gender_fives / comedy_gender_totals comedy_gender_percents = comedy_gender_percents.ix[active_titles] comedy_gender_percents.M.corr(comedy_gender_percents.F) # Train a linear model to examine predictability # Can't have any NaN values for linear regression. comedy_gender_percents = comedy_gender_percents.dropna() # Select out our predictor columns and our response columns X = comedy_gender_percents.ix[:,['M']] y = comedy_gender_percents.ix[:,['F']] # Split the data into training data and testing data X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.8) # Run the solver reg = linear_model.LinearRegression(fit_intercept=True) reg.fit(X_train,y_train) # Plot the data and the model plotX = np.linspace(0,1,100) plotY = reg.predict(np.matrix(plotX).T) plt.plot(X_train,y_train,'o', color='#FA5744') plt.plot(X_test,y_test,'o', color='#00b27f') plt.plot(plotX,plotY,'-', color='#5898f1') plt.title('Percent 5 Rating of Comedies') plt.ylabel('Female Average') plt.xlabel('Male Average') # Compute the slope and intercept of the linear model print reg.intercept_ # Beta_1 print reg.coef_ # Compute testing and training error. print 'training error' print metrics.mean_squared_error(y_train,reg.predict(X_train)) print 'testing error' print metrics.mean_squared_error(y_test,reg.predict(X_test)) """ Explanation: As before, we consider the percent ratings of 5 (per title) given by each age group. End of explanation """ comedy_low_data = comedy_data[comedy_data['rating']<=2] comedy_low = comedy_low_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') comedy_totals = comedy_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') comedy_low_percents = comedy_low / comedy_totals comedy_low_percents = comedy_low_percents.ix[active_titles] comedy_low_percents.M.corr(comedy_low_percents.F) # Train a linear model to determine predictability # Can't have any NaN values for linear regression. comedy_low_percents = comedy_low_percents.dropna() # Select out our predictor columns and our response columns X = comedy_low_percents.ix[:,['M']] y = comedy_low_percents.ix[:,['F']] # Split the data into training data and testing data X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.8) # Run the solver reg = linear_model.LinearRegression(fit_intercept=True) reg.fit(X_train,y_train) # Plot the data and the model plotX = np.linspace(0,1,100) plotY = reg.predict(np.matrix(plotX).T) plt.plot(X_train,y_train,'o', color='#FA5744') plt.plot(X_test,y_test,'o', color='#00b27f') plt.plot(plotX,plotY,'-', color='#5898f1') plt.title('Percent 1 or 2 Rating of Comedies') plt.ylabel('Female Average') plt.xlabel('Male Average') # Compute the slope and intercept of the linear model print reg.intercept_ # Beta_1 print reg.coef_ # Compute testing and training error. print 'training error' print metrics.mean_squared_error(y_train,reg.predict(X_train)) print 'testing error' print metrics.mean_squared_error(y_test,reg.predict(X_test)) """ Explanation: And the percent of low (1 or 2) ratings. End of explanation """ # Extract those movies made in the last 10 years of those available. newest_data = data[data['year']>= 1990] # Use a pivot table to compute mean ratings per title by gender newest_mean_ratings = newest_data.pivot_table('rating',index = 'title',columns = 'gender',aggfunc = 'mean') over_200_newest_mean_ratings = newest_mean_ratings.ix[active_titles] # Scatter this data. plt.scatter(over_200_newest_mean_ratings['M'], over_200_newest_mean_ratings['F']) plt.title('Average Ratings by Movie, after 1990') plt.ylabel('Average female rating') plt.xlabel('Average male rating') print 'correlation coefficient between percent average males and females per title: {0}'.format( over_200_newest_mean_ratings.M.corr(over_200_newest_mean_ratings.F)) # Compute the percent 5 ratings by males and females for movies made in each year of this 10 year window. new_fives_data = newest_data[newest_data['rating']==5] year_gender_fives = new_fives_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') year_gender_totals = newest_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') year_gender_percents = year_gender_fives / year_gender_totals year_gender_percents = year_gender_percents.ix[active_titles] # Scatter this data. plt.scatter(year_gender_percents['M'], year_gender_percents['F']) plt.title('Percent ratings 5 by Movie, after 1990') plt.ylabel('Female percent 5') plt.xlabel('Male percent 5') print 'Correlation coefficient between percent ratings of 5 between males and females by year of movie release: {0}'.format( year_gender_percents.M.corr(year_gender_percents.F)) """ Explanation: Conjecture 3.) Men and Women rate similarly on highly-watched movies made most recently. End of explanation """ new_low_data = newest_data[newest_data['rating']<=2] year_low = new_low_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') year_totals = newest_data.pivot_table('rating', index='title', columns='gender', aggfunc='count') year_low_percents = year_low / year_totals year_low_percents = year_low_percents.ix[active_titles] plt.scatter(year_low_percents['M'], year_low_percents['F']) plt.title('Percent ratings 1 or 2 by Movie, after 1990') plt.ylabel('Female percent 1 or 2') plt.xlabel('Male percent 1 or 2') print 'Correlation between percent ratings of 1 or 2 between males and females on movies with the same release year: {0}'.format( year_low_percents.M.corr(year_low_percents.F)) # Train a linear model to predict average ratings between genders. from sklearn import cross_validation, linear_model, feature_selection, metrics # Can't have any NaN values for linear regression. over_200_newest_mean_ratings = over_200_newest_mean_ratings.dropna() # Select out our predictor columns and our response columns X = over_200_newest_mean_ratings.ix[:,['M']] y = over_200_newest_mean_ratings.ix[:,['F']] # Split the data into training data and testing data X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.8) # Run the solver reg = linear_model.LinearRegression(fit_intercept=True) reg.fit(X_train,y_train) # Compute the slope and intercept of the linear model print reg.intercept_ # Beta_1 print reg.coef_ # Plot the data and the model plotX = np.linspace(0,5,100) plotY = reg.predict(np.matrix(plotX).T) plt.plot(X_train,y_train,'ro') plt.plot(X_test,y_test,'go') plt.plot(plotX,plotY,'b-') plt.title('Average Ratings by Movie, after 1990') plt.ylabel('Average female rating') plt.xlabel('Average male rating') # Compute testing and training error. print 'training error' print metrics.mean_squared_error(y_train,reg.predict(X_train)) print 'testing error' print metrics.mean_squared_error(y_test,reg.predict(X_test)) # Train a linear model to predict percent of ratings given as 5 per-movie between genders. # Can't have any NaN values for linear regression. year_gender_percents = year_gender_percents.dropna() # Select out our predictor columns and our response columns X = year_gender_percents.ix[:,['M']] y = year_gender_percents.ix[:,['F']] # Split the data into training data and testing data X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.8) # Run the solver reg = linear_model.LinearRegression(fit_intercept=True) reg.fit(X_train,y_train) # Plot the data and the model plotX = np.linspace(0,1,100) plotY = reg.predict(np.matrix(plotX).T) plt.plot(X_train,y_train,'ro') plt.plot(X_test,y_test,'go') plt.plot(plotX,plotY,'b-') plt.title('Percent 5 Ratings by Movie, after 1990') plt.ylabel('Female % 5') plt.xlabel('Male % 5') # Compute the slope and intercept of the linear model print reg.intercept_ # Beta_1 print reg.coef_ # Compute testing and training error. print 'training error' print metrics.mean_squared_error(y_train,reg.predict(X_train)) print 'testing error' print metrics.mean_squared_error(y_test,reg.predict(X_test)) # Train a linear model to predict percent of ratings given as 1 or 2 per-movie between genders. # Can't have any NaN values for linear regression. year_low_percents = year_low_percents.dropna() # Select out our predictor columns and our response columns X = year_low_percents.ix[:,['M']] y = year_low_percents.ix[:,['F']] # Split the data into training data and testing data X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.8) # Run the solver reg = linear_model.LinearRegression(fit_intercept=True) reg.fit(X_train,y_train) # Plot the data and the model plotX = np.linspace(0,1,100) plotY = reg.predict(np.matrix(plotX).T) plt.plot(X_train,y_train,'ro') plt.plot(X_test,y_test,'go') plt.plot(plotX,plotY,'b-') plt.title('Percent 1 or 2 Ratings by Movie, after 1990') plt.ylabel('Female % 1 or 2') plt.xlabel('Male % 1 or 2') # Compute the slope and intercept of the linear model print reg.intercept_ # Beta_1 print reg.coef_ # Compute testing and training error. print 'training error' print metrics.mean_squared_error(y_train,reg.predict(X_train)) print 'testing error' print metrics.mean_squared_error(y_test,reg.predict(X_test)) """ Explanation: We see that, for movies made in the 90's, the correlation coefficient for the percent of ratings given as 5 between males and females is 0.9023! And the correlation coefficient for the percent of ratings given as one or two (based on the year the movie was released in the 90's) is 0.905 End of explanation """ #Convert genres into 18 dummies. New dataset has total 100209 rows*28 columns #generate 18 dummies variables for movie genres genre_iter=(set(x.split('|')) for x in movies.genres) genres=sorted(set.union(*genre_iter)) dummies=pd.DataFrame(np.zeros((len(movies), len(genres))), columns=genres) for i, gen in enumerate(movies.genres): dummies.ix[i,gen.split('|')]=1 movies_windic=movies.join(dummies) movies_windic.ix[0] # newdata has total 100209 rows 28 columns newdata = pd.merge(pd.merge(pd.merge(ratings,users),movies), movies_windic) newdata.columns #How many movies for each genre are in this dataset? moviegenre=movies_windic moviegenre.drop(moviegenre[[0,1,2]],axis=1,inplace=True) moviegenre.sum().plot(kind='bar',color='g') plt.title('Number of Movies in Each Genre') #Total number of ratings received for each type of movie genres_rating_received=newdata genres_rating_received.drop(genres_rating_received.columns[[0,1,2,3,4,5,6,7,8,9]], axis=1, inplace=True) genres_rating_received.sum().plot(kind='bar') plt.title('Number of total ratings for Movies in each genre') average_ratings_genre = (genres_rating_received.sum())*moviegenre.sum()/sum(moviegenre.sum()) average_ratings_genre # Percent of movies watched by each gender classified as comedies genres = data['genres'] all_genres = [string.split('|') for string in genres] comedy_truth = [ 'Comedy' in genres for genres in all_genres] comedy_series = pd.DataFrame(comedy_truth, index=data.index) data['comedy'] = comedy_series comedy_data = data.ix[data['comedy'] == True] comedy_gender_counts = comedy_data.pivot_table('rating', index = 'gender', aggfunc = 'count') total_gender_counts = data.pivot_table('rating', index = 'gender', aggfunc = 'count') gender_counts = pd.concat([total_gender_counts,comedy_gender_counts],axis = 1) gender_counts.columns = ['total_gender_counts', 'comedy_gender_counts'] gender_counts['comedy_gender_percent'] = gender_counts['comedy_gender_counts']/gender_counts['total_gender_counts'] gender_counts """ Explanation: *------------------------ Problem 4: (20 points total) Open Ended Question: Business Intelligence Do any of your conjectures in Problems 1, 2, and 3 provide insights that a movie company might be interested in? Propose a business question that you think this data can answer. Suppose you are a Data Sciencetist at a movie company. Convince your boss that your conjecture is correct! Online movie services such as Netflix acquire new customers every day. Without any previous movie ratings to analyze, Netflix must recommend movies to new customers based solely upon their registration information. This initial recommendation is extremely important- Netflix want's its new customers to have a very positive first experience. While there are many facets to this question, we ask the following: what genre of movie should Netflix recommend to a first time user? This question is extremely broad so we asked the following very specific questions: - What is the best time of day to recommend a drama? - Which occupation is most likely to enjoy a comedy? - What age group watches the most adventure movies? A good way to attack this question is to use total numbers of ratings - for example, for the first question above, we can calculate the number of dramas rated per hour, and divide that by the total number of ratings per hour. So, for example, we would know the percentage of movies watched at 3pm that are dramas. Preliminary Analysis End of explanation """ import time # convert timestamps to localized hours timestamps = data['timestamp'] hours = [time.localtime(timestamp).tm_hour for timestamp in timestamps.values] hour_series = pd.DataFrame(hours, index=data.index) data['hour'] = hour_series genres = data['genres'] all_genres = [string.split('|') for string in genres] drama_truth = [ 'Drama' in genres for genres in all_genres] drama_series = pd.DataFrame(drama_truth, index=data.index) data['drama'] = drama_series drama_data = data.ix[data['drama'] == True] drama_ratings_per_hours=drama_data.pivot_table('title', index='hour', columns = 'gender', aggfunc='count') drama_ratings_per_hours.plot(kind='bar', color=['#E50E14','#5898f1']).legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('Dramas Rated per Hour') plt.ylabel('Count') plt.xlabel('Hour') movie_ratings_per_hours=data.pivot_table('title', index='hour', columns = 'gender', aggfunc='count') movie_ratings_per_hours.plot(kind='bar', color=['#E50E14','#5898f1']).legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('Total Movies Rated per Hour') plt.ylabel('Count') plt.xlabel('Hour') percent_dramas_per_hours = drama_ratings_per_hours/movie_ratings_per_hours percent_dramas_per_hours.plot(kind='bar', color=['#E50E14','#5898f1']).legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('Percent Dramas Rated Per Hour') plt.ylabel('Percent') plt.xlabel('Hour') """ Explanation: What is the best time of day to recommend a drama for each gender? End of explanation """ # histogram of occupation vs count of comedy ratings # Percent of movies watched by each gender classified as comedies genres = data['genres'] all_genres = [string.split('|') for string in genres] comedy_truth = [ 'Comedy' in genres for genres in all_genres] comedy_series = pd.DataFrame(comedy_truth, index=data.index) data['comedy'] = comedy_series comedy_data = data.ix[data['comedy'] == True] job_avg_ratings = comedy_data.pivot_table('rating', index='occupation', aggfunc=np.mean) job_avg_ratings.rename(index={0:'other', 1:'academic/educator',2: 'artist',3: 'clerical/admin',4: 'college/grad student', 5: 'customer service',6: 'doctor/health care',7:'executive/managerial',8:'farmer', 9: 'homemaker',10: 'K-12 student',11: 'lawyer',12 :'programmer',13: 'retired', 14:'sales/marketing',15:'scientist',16: 'self-employed',17: 'technician/engineer', 18: 'tradesman/craftsman',19 :'unemployed',20: 'writer'}, inplace=True) print job_avg_ratings print "RANGE: %s" % (job_avg_ratings.max() - job_avg_ratings.min()) job_avg_ratings.plot(kind='bar', color='#00b27f') plt.title('Average Comedy Rating vs. Occupation') plt.xlabel('Occupation') plt.ylabel('Average Rating') """ Explanation: The most dramas are rated at 4pm (16th hour) during the day, therefore the best time to recommend a drama is likely before 4pm. To make a more precise determination, instead of our current answer: 'before 4pm,' more data about movie lengths is needed. Since we assume movies are rated after they are viewed, we could use these movie lengths to determine the average start time - which is also the best time to recommend a movie. Which occupation is most likely to enjoy a comedy? End of explanation """ job_total_comedy_ratings = comedy_data.pivot_table('title', index='occupation', aggfunc='count') job_total_comedy_ratings.rename(index={0:'other', 1:'academic/educator',2: 'artist',3: 'clerical/admin',4: 'college/grad student', 5: 'customer service',6: 'doctor/health care',7:'executive/managerial',8:'farmer', 9: 'homemaker',10: 'K-12 student',11: 'lawyer',12 :'programmer',13: 'retired', 14:'sales/marketing',15:'scientist',16: 'self-employed',17: 'technician/engineer', 18: 'tradesman/craftsman',19 :'unemployed',20: 'writer'}, inplace=True) print job_total_comedy_ratings job_total_comedy_ratings.plot(kind='bar', color='#ec971f') plt.title('Comedies Rated vs. Occupation') plt.xlabel('Occupation') plt.ylabel('Number of Ratings') """ Explanation: When comparing the average rating versus occuptation for comedy movies, scientists have the highest average rating (3.687170) followed by retired (3.663825) and clerical/admin (3.601516). Does this mean that those occupations are most likely to enjoy comedies? Possibly, but since we are using mean as our comparison metric and the range of the data is 0.285, we also looked at the number of comedy ratings per occupation. End of explanation """ job_total_ratings = data.pivot_table('title', index='occupation', aggfunc='count') job_total_ratings.rename(index={0:'other', 1:'academic/educator',2: 'artist',3: 'clerical/admin',4: 'college/grad student', 5: 'customer service',6: 'doctor/health care',7:'executive/managerial',8:'farmer', 9: 'homemaker',10: 'K-12 student',11: 'lawyer',12 :'programmer',13: 'retired', 14:'sales/marketing',15:'scientist',16: 'self-employed',17: 'technician/engineer', 18: 'tradesman/craftsman',19 :'unemployed',20: 'writer'}, inplace=True) job_percent_comedy_ratings = job_total_comedy_ratings / job_total_ratings job_percent_comedy_ratings.plot(kind='bar', color='#E50E14') plt.title('Percent Comedies vs. Occupation') plt.xlabel('Occupation') plt.ylabel('Percent of all movies watched') """ Explanation: From the chart, we can see that even though scientist, retired, and clerical/admin have the highest average rating for comedies, they also have low numbers of ratings, 7771, 4340, and 11870 respectively. In contrast, college/grad student, other, and executive/managerial have significantly more ratings (at least triple) 48672, 46500, and 35784 respectively. Although scientist has the highest average rating, it might be better to recommend comedies to students because while they have a lower average rating, they have almost seven times more ratings than scientists. End of explanation """ # histogram of age group vs adventure genre genres = data['genres'] all_genres = [string.split('|') for string in genres] adventure_truth = [ 'adventure' in genres for genres in all_genres] adventure_series = pd.DataFrame(drama_truth, index=data.index) data['adventure'] = adventure_series adventure_data = data.ix[data['adventure'] == True] adventure_ratings_per_age = adventure_data.pivot_table('title', index='age', columns='gender', aggfunc='count') adventure_ratings_per_age.rename(index={1: 'Under 18', 18: '18-24', 25: '25-34', 35: '35-44', 45: '45-49', 50: '50-55', 56: '56+'}, inplace=True) adventure_ratings_per_age.plot(kind='bar', color=['#E50E14','#5898f1']) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('Adventure Movies Rated per Age Group') plt.xlabel('Age Group') plt.ylabel('Count') total_per_age = data.pivot_table('title', index='age', columns='gender', aggfunc='count') total_per_age.rename(index={1: 'Under 18', 18: '18-24', 25: '25-34', 35: '35-44', 45: '45-49', 50: '50-55', 56: '56+'}, inplace=True) adventure_percent = adventure_ratings_per_age / total_per_age adventure_percent.plot(kind='bar', color=['#E50E14','#5898f1']) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('Percent Adventure Movies per Age Group') plt.xlabel('Age Group') plt.ylabel('Percent') print adventure_ratings_per_age print total_per_age print adventure_percent """ Explanation: What age group watches the most adventure movies? End of explanation """ # avg rating vs. gender gender_avg_ratings = data.pivot_table('rating', index = 'gender',aggfunc = np.mean) gender_avg_ratings gender_avg_ratings.plot(kind='barh', color='yellow') plt.title('avg rating vs. gender') # avg rating vs. occupation job_avg_ratings = data.pivot_table('rating', index = 'occupation',aggfunc = np.mean) job_avg_ratings job=job_avg_ratings.rename(index={0:'other',1:'academic/educator',2: 'artist',3: 'clerical/admin',4: 'college/grad student', 5 :'customer service',6: 'doctor/health care',7:'executive/managerial',8:'farmer', 9: 'homemaker',10: 'K-12 student',11: 'lawyer',12 :'programmer',13: 'retired', 14:'sales/marketing',15:'scientist',16: 'self-employed',17: 'technician/engineer', 18: 'tradesman/craftsman',19 :'unemployed',20: 'writer'}) job.plot(kind='bar') plt.title('avg rating vs. occupation') plt.ylabel('average rating') """ Explanation: Additional Data Exploration (not used in Problems 1-4) End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive/06_structured/labs/2_sample.ipynb
apache-2.0
# change these to try this notebook out BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi """ Explanation: <h1> 2. Creating a sampled dataset </h1> This notebook illustrates: <ol> <li> Sampling a BigQuery dataset to create datasets for ML <li> Preprocessing with Pandas </ol> End of explanation """ # Create SQL query using natality data after the year 2000 from google.cloud import bigquery query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ """ Explanation: <h2> Create ML dataset by sampling using BigQuery </h2> <p> Let's sample the BigQuery data to create smaller datasets. </p> End of explanation """ traindf.to_csv('train.csv', index=False, header=False) evaldf.to_csv('eval.csv', index=False, header=False) %%bash wc -l *.csv head *.csv tail *.csv """ Explanation: Lab Task #1 Sample the BigQuery resultset (above) so that you have approximately 12,000 training examples and 3000 evaluation examples. The training and evaluation datasets have to be well-distributed (not all the babies are born in Jan 2005, for example) and should not overlap (no baby is part of both training and evaluation datasets). Hint (highlight to see): <p style='color:white'>You will use ABS(MOD()) on the hashmonth to divide the dataset into non-overlapping training and evaluation datasets, and RAND() to sample these to the desired size.</p> Lab Task #2 Use Pandas to: * Clean up the data to remove rows that are missing any of the fields. * Simulate the lack of ultrasound. * Change the plurality column to be a string. Hint (highlight to see): <p> Filtering: <pre style='color:white'> df = df[df.weight_pounds > 0] </pre> Lack of ultrasound: <pre style='color:white'> nous = df.copy(deep=True) nous['is_male'] = 'Unknown' </pre> Modify plurality to be a string: <pre style='color:white'> twins_etc = dict(zip([1,2,3,4,5], ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)'])) df['plurality'].replace(twins_etc, inplace=True) </pre> </p> Lab Task #3 Write the cleaned out data into CSV files. Change the name of the Pandas dataframes (traindf, evaldf) appropriately. End of explanation """
DaveBackus/Data_Bootcamp
Code/IPython/bootcamp_pandas-clean.ipynb
mit
import sys # system module import pandas as pd # data package import matplotlib.pyplot as plt # graphics module import datetime as dt # date and time module import numpy as np # foundation for Pandas %matplotlib inline # check versions (overkill, but why not?) print('Python version: ', sys.version) print('Pandas version: ', pd.__version__) print('Today: ', dt.date.today()) """ Explanation: Pandas 2: Cleaning data Probably the best thing about Pandas is its extensive toolset for managing data. Here we describe features of Pandas that allow us to clean data that, for reasons beyond our control, comes in a form that's not immediately amendable to analysis. This is the first of several such notebooks. Outline: Want operator. Start with what we want to end up, then figure out how to get there. String methods. Fixing string variables, especially strings that should really be numbers. Missing values. Marking, dropping, counting missing values. Selecting variables and observations. Choose the variables and observations we want by their labels. Boolean selection. This is mostly what we do: choose observations from conditions. We use comparisons to produce Boolean variables and then use the Boolean variables to select observations that are True. The next two methods extend this capability. The isin method. Choose observations whose values are in lists you specify. The contains method. Flag observations that contain a specific piece of text. Another string method, operates through Booleans. <!-- * [The `query` method](#query). Similar capability using database syntax. This is one of many examples in which **SQL database** tools have been built into Pandas. --> <!-- * [Indexing](#index). Setting and resetting the index. Multi-indexes. * [Switching rows and columns](#pivot). Transpose. Pivot and melt. * [Stack and unstack](#stack). Managing column structure and labels. Melt. --> Note: requires internet access to run. <!-- internal links http://sebastianraschka.com/Articles/2014_ipython_internal_links.html --> This IPython notebook was created by Dave Backus, Chase Coleman, and Spencer Lyon for the NYU Stern course Data Bootcamp. <a id=prelims></a> Preliminaries End of explanation """ url = 'https://raw.githubusercontent.com/TheUpshot/chipotle/master/orders.tsv' chp = pd.read_csv(url, sep='\t') # tab (\t) delimited print('Variable dtypes:\n', chp.dtypes, sep='') chp.head() """ Explanation: <a id=want></a> The want operator We need to know what we're trying to do -- what we want the data to look like. We say we apply the want operator. Some problems we've run across that ask to be solved: Numerical data is contaminated by commas (marking thousands) or dollar signs. Row and column labels are contaminated. Missing values are marked erratically. We have too much data, would prefer to choose a subset. Variables run across rows rather than down columns. What we want in each case is the opposite of what we have: we want nicely formatted numbers, clean row and column labels, and so on. We'll solve the first four problems here, the last one in the next notebook. Example: Chipotle data This data comes from a New York Times story End of explanation """ pd.set_option("display.width", 80) import pandas as pd url1 = 'http://pages.stern.nyu.edu/~dbackus/Data/' url2 = 'Data-Bootcamp-entry-poll_s16.csv' url = url1 + url2 ep = pd.read_csv(url, header=0) print('Dimensions:', ep.shape) print('\nData types:\n', ep.dtypes, sep='') ep.head(2) """ Explanation: Comment. Note that the variable item_price has dtype object. The reason is evidently the dollar sign. We'd prefer to have it as a number, specifically a float. Example: Data Bootcamp entry poll This is the poll we did at the start of the course. Responses were collected in a Google spreadsheet, which we converted to a csv and uploaded to our website. End of explanation """ # rename variables newnames = ['time', 'program', 'career', 'programming', 'stats', 'media', 'other', 'major', 'data', 'why', 'topics'] newnames = [name.title() for name in newnames] ep.columns = newnames ep.head() # check multi-response question to see what we're dealing with ep['Media'].head(20) """ Explanation: Comments. This is mostly text data, which means it's assigned the dtype object. Which is fine. But there are two things that would make the data easier to work with: The column names are excessively verbose. This one's easy: We replace them with single words. Which we do below. The second one is harder. Two of the questions -- social media and special topics -- say "mark all that apply." In the spreadsheet, we have a list of every choice the person checked. Our want is to count the number of each type of response. For example, we might want a bar chart that gives us the number of each response. The question is how we get there. End of explanation """ url1 = 'http://www.oecd.org/health/health-systems/' url2 = 'OECD-Health-Statistics-2015-Frequently-Requested-Data.xls' docs = pd.read_excel(url1+url2, skiprows=3, usecols=[0, 51, 52, 53, 54, 55, 57], sheetname='Physicians', # na_values=['..'], skip_footer=21) print('Dimensions:', docs.shape) print('\nIndex', docs.index.tolist(), sep='') print('\nVariable dtypes:\n', docs.dtypes.tail(8), sep='') docs.head() """ Explanation: Comment. Note the commas separating answers with more than one choice. We want to unpack them somehow. Example: OECD healthcare statistics The OECD collects healthcare data on lots of (mostly rich) countries, which is helpful in producing comparisons. Here we use a spreadsheet linked in one of their documents. End of explanation """ names = list(docs) docs = docs.rename(columns={names[0]: 'Country'}) docs.head(2) """ Explanation: Comments. Here we have a couple issues. The first column includes a space and a number: Australia 1, Chile 3, etc. We care about this because when we plot the data across countries, the country labels are going to be country names, so we want them in a better form than this. The ..'s in the sheet lead us to label any column that includes them as dtype object. Here we want to label them as missing values. If we want to plot each country against time, then we'll need to switch the rows and columns somehow, so that the x axis in the plot (the year) is the index and not the column label. One more thing before we proceeed: change the name of the country variable. End of explanation """ url1 = 'http://www.imf.org/external/pubs/ft/weo/2015/02/weodata/' url2 = 'WEOOct2015all.xls' url = url1 + url2 weo = pd.read_csv(url, sep='\t', usecols=[1,2,3,4,6,40,41,42,43,44], thousands=',', na_values=['n/a', '--'] ) print('Variable dtypes:\n', weo.dtypes, sep='') weo.head() """ Explanation: Example: World Economic Outlook The IMF's World Economic Outlook database contains a broad range of macroeconomic data for a large number of countries. It's updated twice a year and is a go-to source for things like current account balances (roughly, the trade balance) and government debt and deficits. It also has a few quirks, as we'll see. Example. Run the following code as is, and with the thousands and na_values parameters commented out. How do the dtypes differ? End of explanation """ weo.T.head(10) """ Explanation: Comment. This has several issues: The variables run across rows with observations labeled 1980, 1981, etc across the top. We saw the same problem in the previous example. If we run the first version of the read_csv statement, the data columns (1980, 1981, etc) have dtype object. A little work suggests that this is because they include commas marking thousands. The entries labeled n/a need to be marked as missing values. We can solve the last two in the read_csv function by deleting the hash -- which is what we see in the second read_csv statement. The other one takes some work. Question. Can we transpose the whole thing to get the data running down columns? End of explanation """ dollars = '$123.45' print('Type of variable dollars:', type(dollars)) num = dollars.replace('$', '') num = float(num) print('Type of variable num:', type(num)) """ Explanation: <a id='strings'></a> String methods We can treat variables as strings in Pandas in much the same way we dealt with strings in core Python. Run the code below to remind yourself how this works. End of explanation """ chp.head() chpnum = chp.copy() print('Original dtype:', chpnum['item_price'].dtype) # create a copy of the df to play with # delete dollar signs chpnum['item_price'].str.replace('$', '').head() # delete dollar signs, convert to float, and assign back to chpnum chpnum['item_price'] = chpnum['item_price'].str.replace('$', '').astype(float) print('New dtype:', chpnum['item_price'].dtype) # assign back to chp for future use chp = chpnum """ Explanation: Pandas string methods. We can do the same thing to all the observations of a variable with so-called string methods. We append .str to a variable in a dataframe and then apply the string method of our choice. If this is part of converting a number-like entry that has mistakenly been given dtype object, we then convert its dtype with the astype method. Example. Let's use a string method to fix the item_price variable in the Chipotle dataframe. This has three parts: Use the method str to identify this as a string method. Apply the string method of our choice (here replace) to fix the string. Use the astype method to convert the fixed-up string to a float. We start by making a copy of the chp dataframe that we can experiment with. End of explanation """ # try this with an example first country = 'United States 1' # get documentation for the rsplit method #country.rsplit? # an example country.rsplit() """ Explanation: Comment. We did everything here in one line: replace the dollar sign with a string method, then converted to float using astype. If you think this is too dense, you might break it into two steps. Example. Here we strip off the numbers at the end of the indexes in the OECD docs dataframe. This involves some experimentation: Play with the rsplit method to see how it works. Apply rsplit to the example country = 'United States 1'. Use a string method to do this to all the entries of the variable Country. End of explanation """ # what about this? country.rsplit(maxsplit=1) # one more step, we want the first component of the list country.rsplit(maxsplit=1)[0] docs["Country"].head() # now do this for the variable Country #docs['Country'].str.rsplit(maxsplit=1).str[0].head() # explain why this doesn't work docs['Country'].str.rsplit(n=1).str[0].head() # Spencer prefers the get method to slicing docs['Country'].str.rsplit(n=1).str.get(0).head() # now assign it to newdocs and see what we have newdocs = docs.copy() newdocs['Country'] = newdocs['Country'].str.rsplit(n=1).str.get(0) newdocs.head() # assign it back to docs for future use docs = newdocs """ Explanation: Comment. Not quite, we only want to split once. End of explanation """ docs = newdocs docs.head() """ Explanation: Comments. Note that we need two str's here: one to do the split, the other to extract the first element. For reasons that mystify us, we ran into problems when we used maxsplit=1, but it works with n=1. This is probably more than you want to know, but file away the possibilities in case you need them. <a id='missing'></a> Missing values It's important to label missing values, so that Pandas doesn't interpret entries as strings. Pandas is also smart enough to ignore things labeled missing when it does calculations or graphs. If we compute, for example, the mean of a variable, the default is to ignore missing values. We've seen that we can label certain entries as missing values in read statements: read_csv, read_excel, and so on. Here we do it directly, mostly to remind ourselves what's involved. Marking missing values Example. The docs dataframe contains a number of instances of .. (double period). How can we mark them as missing values? End of explanation """ docs.replace(to_replace=['..'], value=[None]).head() """ Explanation: What to do. We use the replace method on the whole dataframe. To mark something as missing, we replace it as None, which Pandas interprets as missing and labels NaN. End of explanation """ docs.dtypes.head() docsna = docs.replace(to_replace=['..'], value=[None]) docsna.dtypes.head() """ Explanation: Comment. Replace automatically updates the dtypes. Here the double dots led us to label the variables as objects. After the replace, they're now floats, as they should be. End of explanation """ docs.replace(to_replace=['..'], value=[np.nan]).head() # assign back to docs docs = docs.replace(to_replace=['..'], value=[np.nan]) """ Explanation: Comment. Some people prefer to use the numpy nan. Here's an example. The only advantage is that we avoid possible conflicts with other uses of the value None. End of explanation """ docs.replace(to_replace=['.'], value=['*']).head() """ Explanation: Comment. Unlike the string methods we described earlier, this use of replace affects complete entries, not elements of string entries. For example, suppose we tried to replace the periods in decimal numbers with an asterisk. We could try the following, but it doesn't work: the decimal numbers don't change. End of explanation """ # grab a variable to play with var = docsna[2013].head(10) var # which ones are missing ("null")? var.isnull() # which ones are not missing ("not null")? var.notnull() # drop the missing var.dropna() """ Explanation: Working with missing values End of explanation """ docs[2013].plot.barh(figsize=(4, 12)) """ Explanation: Comment. We usually don't have to worry about this, Pandas takes care of missing values automatically. Comment. Let's try a picture to give us a feeling of accomplishment. What else would you say we need? How would we get it? End of explanation """ # we create a small dataframe to experiment with small = weo.head() small """ Explanation: <a id='selection'></a> Selecting variables and observations The word selection refers to choosing a subset of variables or observations using their labels or index. Similar methods are sometimes referred to as slicing, subsetting, indexing, or filtering. We'll treat the terms as synonymous. There are lots of ways to do this. Mostly we do "Boolean" selection, which we address in the next section. We review more direct options here, mostly at high speed because they're not things we use much. In the outline below, df is a dataframe, var and varn are variable names, vlist = ['var1', 'var2'] is a list of variable names, and nlist = [0, 3, 4] is a list of numerical variable or observation indexes, n1 and n2 are integers, and bools ia a list or pandas Series of booleans (True and False). Some of the basic selection/indexing/slicing methods have the form: df[var] extracts a variable -- a series, in other words. df[vlist] extracts a new dataframe consisting of the variables in vlist. df[nlist] does the same thing. df[bools]: extracts each row where the corresponding element in bools is true. len(bools) must be equal to df.size[0] df[n1:n2] extracts observations n1 to n2-1, the traditional slicing syntax. We find the last one confusing: it extracts rows, not columns. Pandas guru Wes McKinney notes: "This might seem inconsistent to some readers." Yup! We don't do it much, partly for that reason. <!-- page 127 top --> The Pandas docs push the loc and iloc methods. We'll ignore them -- we don't use them much -- but if you're interested, see the docs. End of explanation """ small[['ISO', 'Units']] small[[0, 4]] small['2011'] small['2011'][3] small[1:3] small[[False, True, True, False, False]] """ Explanation: Example. Let's try each of these in a different cell and see what they do: small[['ISO', 'Units']] small[[0, 4]] small['2011'] small[1:3] Can you explain the results? End of explanation """ s1 = pd.Series([5, 6, 7, 8], index=["a", "b", "c", "d"]) s1 s2 = pd.Series([50, 60, 70, 80], index=[0, 4, 2, 999]) s2 # index has dtype object, so using an int returns the value in that row (starting at 0) s1[1] # index has dtype int, so using an integer tries to find the that int in the # index and return the corresponding value and throws an error if it can't find it s2[1] s2[0] # no error, 0 is in the index # index has dtype object, so a list of ints extracts those rows s1[[0, 3]] # index has dtype int, so a list of ints tries to match each int to the index # it returns NaN where it can't find the index. Notice it **did not** return # `80` for 3 s2[[0, 3, 999]] # index has type object, so a string finds row with matching index s1["c"] # index has dtype int, so using a string causes an error s2["c"] # similar behavior for lists of strings s1[["a", "b", "penguin"]] # index has dtype int, so list of strings returns NaN's everywhere s2[["a", "b"]] # lists of True/False work the same for any dtype of index bools = [True, False, False, True] s1[bools] s2[bools] """ Explanation: Series indexing Indexing a Series is a little different because we only have one column, so all indexing operations interact with rows. The rules here are a little subtle, so we'll show examples and add comments that explain what each example does In the list below s is a Series, n is an integer, nlist = [0, 3] is a list of integers, and i is a string, and is is a list of strings s[n]: if the index has dtype int, this extracts the row with index n. Otherwise extracts the nth row (starting at zero) s[nlist]: if the index has dtype int, this extracts rows with indices in nlist returning NaN if they don't appear. Otherwise extracts the rows at positions in nlist, filling with NaN for invalid positions s[i]: if the index has dtype object, this extracts the row with index i, otherwise it is an error s[is]: End of explanation """ weo.head() """ Explanation: <a id='boolean'></a> Boolean selection This is mostly what we do: we choose observations that satisfy one or more conditions. Boolean selection consists of two steps that we typically combine in one statement: Use a comparison to construct a Boolean variable consisting of True and False. Compute df[comparison], where df is a dataframe and comparison is a comparison. This will select the observations (rows) for which comparison is true and throw away the others. We work through this one step at a time: Example: apply the want operator Comparisons for dataframes Boolean selection: select observations for which the comparison is True The isin method This is easier to describe with an example. Example: Apply the want operator to WEO Our want here is to take the weo dataframe and extract government debt and deficits for a given set of countries. Putting this to work involves several steps. Here's the head of the dataframe to remind us what we're dealing with. End of explanation """ variable_list = weo[['WEO Subject Code', 'Subject Descriptor', 'Units']].drop_duplicates() print('Number of variables: ', variable_list.shape[0]) variable_list.head() country_list = weo[['ISO', 'Country']].drop_duplicates() print('Number of countries: ', country_list.shape[0]) country_list.head() """ Explanation: Find variable and country codes. Which ones do we want? Let's start by seeing that's available. Here we create special dataframes that include all the variables and their definitions and all the countries. Note the use of the drop_duplicates method, which does what it sounds like. End of explanation """ small['Units'] == 'National currency' small['2011'] >= 100 (small['Units'] == 'National currency') & (small['2011'] >= 100) (small['Units'] == 'National currency') | (small['2011'] >= 100) """ Explanation: Exercise. Construct a list of countries with countries = weo[['ISO', 'Country']]; that is, without applying the drop_duplicates method. How large is it? How many duplicates have we dropped? What are the country codes (ISO) for Argentina and the United States? What are the variable codes (WEO Subject Code) for government debt (gross debt, percent of GDP) and net lending/borrowing (also percent of GDP)? Comment. Now that we have the country and variable codes, we can be more explicit about what we want. We want observations with those country and variable codes. We work up to the solution one step at a time. Comparisons for series We can construct comparisons for series (dataframe columns) much as we did with simple variables. The difference is that we get a complete column of True/False responses, not just one. Mutiple comparisons have a different syntax than we saw earlier: and is replaced by &amp;, and or is replaced by |. And when we have more than one comparison, we need to enclose them in parentheses. Examples. Consider the comparisons: small['Units'] == 'National currency' small['2011'] &gt;= 100 (small['Units'] == 'National currency') &amp; (small['2011'] &gt;= 100) (small['Units'] == 'National currency') | (small['2011'] &gt;= 100) Remind yourself what the &amp; and | do. End of explanation """ # remind ourslves what we're starting with small # two steps: comparison, then selection ncunits = small['Units'] == 'National currency' # comparison print(ncunits) small[ncunits] # selection # put the steps together in one line small[small['Units'] == 'National currency'] """ Explanation: Boolean selection Boolean selection simply chooses those observations for which a condition is True. Some people refer to this as filtering. The syntax is python df[comparison] The result is a new dataframe of observations in which comparison is true. Example. We choose obervations for which the units are 'National currency'. We do this first in two steps, then in one. End of explanation """ vlist = ['GGXWDG_NGDP', 'GGXCNL_NGDP'] weo['WEO Subject Code'].isin(vlist).head(45) """ Explanation: Exercise. Construct dataframes for which small['Units'] does not equal 'National currency'. small['Units'] equals 'National currency' and small['2011'] is greater than 100. <a id='isin'></a> The isin method Pay attention now, this is really useful. Suppose we want to extract the data for which weo['ISO'] == 'ARG' (Argentina) or weo['ISO'] == 'GRC' (Greece). We could do that by combining the comparisons: python (weo['ISO'] == 'ARG') | (weo['ISO'] == 'GRC') Remind youself that | stands for "or." (What do we use for "and"?) A simpler approach is to apply the isin method to a variable. This sets the comparison equal to True if the value of the observation is of weo['ISO'] equals any element in a list. We could do the same thing using mulitple comparisons, but this is a lot easier. Let's see how this works. Example. Let's apply the same logic to variable codes. If we want to extract the observations with codes vlist = ['GGXWDG_NGDP', 'GGXCNL_NGDP'] we would use End of explanation """ # this time let's use the result of isin for selection vlist = ['GGXWDG_NGDP', 'GGXCNL_NGDP'] weo[weo['WEO Subject Code'].isin(vlist)].head(6) # we've combined several things in one line comparison = weo['WEO Subject Code'].isin(vlist) selection = weo[comparison] selection.head(6) """ Explanation: Comment. We're choosing 2 variables from 45, so there are lots of Falses. End of explanation """ variables = ['GGXWDG_NGDP', 'GGXCNL_NGDP'] countries = ['ARG', 'DEU', 'GRC'] weo_sub = weo[weo['WEO Subject Code'].isin(variables) & weo['ISO'].isin(countries)] weo_sub """ Explanation: Comment. We can do the same thing with countries. If we want to choose two variables and three countries, the code looks like: End of explanation """ # recall ep['Media'].head(10) # the contains method ep['Media'].str.contains('Twitter').head(10) """ Explanation: Comments. We've now done what we described when we applied the want operator. This is a go-to method. Circle it for later reference. This is a go-to method. Circle it for later reference. Exercise. Use the isin method to extract Gross domestic product in US dollars for China, India, and the United States. Assign the result to the dataframe gdp. Hint: You can adapt the code we just ran. The variable code is NGDPD. The country codes are CHN, IND, and USA. Exercise (challenging). Plot the variable gdp['2015'] as a bar chart. What would you say it needs? <a id='contains'></a> The contains method Another useful one. The contains string method for series identifies observations that contain a specific string. If yes, the observation is labelled True, if no, False. A little trick converts the True/False outcomes to ones and zeros. We apply it to the Media variable of the Entry Poll dataframe ep. You may recall that this variable could have more than one response. We tease them apart with the contains method. Our want is to have a yes/no variable for each response. End of explanation """ ep['Media'].str.contains('Twitter').head(10)*1 """ Explanation: Comment. That's pretty good, we now know which students mentioned Twitter and which did not. It's more useful, though, to convert this to zeros (False) and ones (True), which we do with this trick: we multiply by 1. End of explanation """ media = ['None', 'Twitter', 'Facebook', 'Blog'] oldep = ep.copy() vnames = [] for x in media: newname = 'Media' + ':' + x vnames.append(newname) ep[newname] = ep['Media'].str.contains(x)*1 vnames """ Explanation: Comment. Now let's do the same for some of the other entries and save them in new variables. End of explanation """ # create new df of just these variables media = ep[vnames] media.head() # count them with the sum method media_counts = media.sum() media_counts """ Explanation: Comment. You might want to think about this a minute. Or two. End of explanation """ media_counts.plot.barh() """ Explanation: Comment. Just for fun, here's a bar graph of the result. End of explanation """ data = {'Size': ['a) 1 to 4', 'b) 5 to 9', 'c) 10 to 19', 'd) 20 to 49', 'e) 50 to 99', 'f) 100 to 249', 'g) 250 to 499', 'h) 500 to 999', 'i) 1000 to 2499', 'j) 2500 to 4999', 'k) 5000 to 9999', 'l) 10000+'], 'Firms': [2846416, 1020772, 598153, 373345, 115544, 63845, 19389, 9588, 6088, 2287, 1250, 1357], 'Emp': [5998912, 6714924, 8151891, 11425545, 8055535, 9788341, 6611734, 6340775, 8321486, 6738218, 6559020, 32556671]} bds = pd.DataFrame(data) bds .head(3) """ Explanation: Exercise. What would you change in this graph? How would you do it? (Words are enough.) Review Let's remind ourselves what we've learned. Exercise. We explore the Census's Business Dynamics Statistics, a huge collection of data about firms. We've extracted a small piece of one of their databases that includes these variables for 2013: Size: size category of firms based on number of employees Firms: number of firms in each size category Emp: number of employees in each size category Run the code cell below to load the data. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/csiro-bom/cmip6/models/sandbox-1/landice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-1', 'landice') """ Explanation: ES-DOC CMIP6 Model Properties - Landice MIP Era: CMIP6 Institute: CSIRO-BOM Source ID: SANDBOX-1 Topic: Landice Sub-Topics: Glaciers, Ice. Properties: 30 (21 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:55 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Grid 4. Glaciers 5. Ice 6. Ice --&gt; Mass Balance 7. Ice --&gt; Mass Balance --&gt; Basal 8. Ice --&gt; Mass Balance --&gt; Frontal 9. Ice --&gt; Dynamics 1. Key Properties Land ice key properties 1.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of land surface model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of land surface model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.ice_albedo') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "prescribed" # "function of ice age" # "function of ice density" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Ice Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Specify how ice albedo is modelled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Atmospheric Coupling Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which variables are passed between the atmosphere and ice (e.g. orography, ice mass) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.5. Oceanic Coupling Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which variables are passed between the ocean and ice End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "ice velocity" # "ice thickness" # "ice temperature" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.6. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which variables are prognostically calculated in the ice model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of land ice code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Grid Land ice grid 3.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the grid in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is an adative grid being used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.base_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Base Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The base resolution (in metres), before any adaption End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.resolution_limit') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Resolution Limit Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If an adaptive grid is being used, what is the limit of the resolution (in metres) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.projection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.5. Projection Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The projection of the land ice grid (e.g. albers_equal_area) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Glaciers Land ice glaciers 4.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of glaciers in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of glaciers, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 4.3. Dynamic Areal Extent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does the model include a dynamic glacial extent? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Ice Ice sheet and ice shelf 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the ice sheet and ice shelf in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.grounding_line_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "grounding line prescribed" # "flux prescribed (Schoof)" # "fixed grid size" # "moving grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.2. Grounding Line Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.ice_sheet') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.3. Ice Sheet Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are ice sheets simulated? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.ice_shelf') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Ice Shelf Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are ice shelves simulated? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Ice --&gt; Mass Balance Description of the surface mass balance treatment 6.1. Surface Mass Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Ice --&gt; Mass Balance --&gt; Basal Description of basal melting 7.1. Bedrock Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of basal melting over bedrock End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of basal melting over the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Ice --&gt; Mass Balance --&gt; Frontal Description of claving/melting from the ice shelf front 8.1. Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of calving from the front of the ice shelf End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Melting Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of melting from the front of the ice shelf End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Ice --&gt; Dynamics ** 9.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description if ice sheet and ice shelf dynamics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.approximation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SIA" # "SAA" # "full stokes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Approximation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Approximation type used in modelling ice dynamics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 9.3. Adaptive Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there an adaptive time scheme for the ice scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep. End of explanation """
achave11/bioapi-examples
python_notebooks/1kg_read_service.ipynb
apache-2.0
import ga4gh_client.client as client c = client.HttpClient("http://1kgenomes.ga4gh.org") """ Explanation: GA4GH 1000 Genomes Reads Protocol Example This example illustrates how to access alignment data made available using a GA4GH interface. Initialize the client In this step we create a client object which will be used to communicate with the server. It is initialized using the URL. End of explanation """ counter = 0 for read_group_set in c.search_read_group_sets(dataset_id="WyIxa2dlbm9tZXMiXQ"): counter += 1 if counter < 4: print "Read Group Set: {}".format(read_group_set.name) print "id: {}".format(read_group_set.id) print "dataset_id: {}".format(read_group_set.dataset_id) print "Aligned Read Count: {}".format(read_group_set.stats.aligned_read_count) print "Unaligned Read Count: {}\n".format(read_group_set.stats.unaligned_read_count) for read_group in read_group_set.read_groups: print " Read group:" print " id: {}".format(read_group.id) print " Name: {}".format(read_group.name) print " Description: {}".format(read_group.description) print " Biosample Id: {}\n".format(read_group.bio_sample_id) else: break """ Explanation: Search read group sets Read group sets are logical containers for read groups similar to BAM. We can obtain read group sets via a search_read_group_sets request. Observe that this request takes as it's main parameter dataset_id, which was obtained using the example in 1kg_metadata_service using a search_datasets request. End of explanation """ read_group_set = c.get_read_group_set(read_group_set_id="WyIxa2dlbm9tZXMiLCJyZ3MiLCJOQTE5Njc4Il0") print "Read Group Set: {}".format(read_group_set.name) print "id: {}".format(read_group_set.id) print "dataset_id: {}".format(read_group_set.dataset_id) print "Aligned Read Count: {}".format(read_group_set.stats.aligned_read_count) print "Unaligned Read Count: {}\n".format(read_group_set.stats.unaligned_read_count) for read_group in read_group_set.read_groups: print " Read Group: {}".format(read_group.name) print " id: {}".format(read_group.bio_sample_id) print " bio_sample_id: {}\n".format(read_group.bio_sample_id) """ Explanation: Note: only a small subset of elements is being illustrated, the data returned by the servers is richer, that is, it contains other informational fields which may be of interest. Get read group set Similarly, we can obtain a specific Read Group Set by providing a specific identifier. End of explanation """ for read_group in read_group_set.read_groups: print "Alignment from {}\n".format(read_group.name) alignment = c.search_reads(read_group_ids=[read_group.id], start=0, end=1000000, reference_id="WyJOQ0JJMzciLCIxIl0").next() print " id: {}".format(alignment.id) print " fragment_name: {}".format(alignment.fragment_name) print " aligned_sequence: {}\n".format(alignment.aligned_sequence) """ Explanation: Note, like in the previous example. Only a selected amount of parameters are selected for illustration, the data returned by the server is far richer, this format is only to have a more aesthetic presentation. Search reads This request returns reads were the read group set names we obtained above. The reference ID provided corresponds to chromosome 1 as obtained from the 1kg_reference_service examples. A search_reads request searches for read alignments in a region using start and end coordinates. End of explanation """
darkomen/TFG
medidas/20072015/BQ/Untitled.ipynb
cc0-1.0
#Importamos las librerías utilizadas import numpy as np import pandas as pd import seaborn as sns #Mostramos las versiones usadas de cada librerías print ("Numpy v{}".format(np.__version__)) print ("Pandas v{}".format(pd.__version__)) print ("Seaborn v{}".format(sns.__version__)) #Abrimos el fichero csv con los datos de la muestra datos = pd.read_csv('BQ.CSV') %pylab inline #Mostramos un resumen de los datos obtenidoss datos.describe() #datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']] #Almacenamos en una lista las columnas del fichero con las que vamos a trabajar columns = ['Diametro X [mm]', 'Diametro Y [mm]', 'RPM'] #Mostramos en varias gráficas la información obtenida tras el ensayo datos[columns].plot(subplots=True, figsize=(20,20)) """ Explanation: Uso de ipython para el análsis y muestra de los datos obtenidos durante la producción. Los datos analizados son del filamento de bq el día 20 de Julio del 2015 End of explanation """ datos.ix[:, "Diametro X [mm]":"Diametro Y [mm]"].plot(figsize=(16,3)) datos.ix[:, "Diametro X [mm]":"Diametro Y [mm]"].boxplot(return_type='axes') """ Explanation: Representamos ambos diámetros en la misma gráfica End of explanation """ pd.rolling_mean(datos[columns], 50).plot(subplots=True, figsize=(12,12)) """ Explanation: Mostramos la representación gráfica de la media de las muestras End of explanation """ plt.scatter(x=datos['Diametro X [mm]'], y=datos['Diametro Y [mm]'], marker='.') """ Explanation: Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento End of explanation """ datos_filtrados = datos[(datos['Diametro X [mm]'] >= 0.9) & (datos['Diametro Y [mm]'] >= 0.9)] """ Explanation: Filtrado de datos Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas. End of explanation """ plt.scatter(x=datos_filtrados['Diametro X [mm]'], y=datos_filtrados['Diametro Y [mm]'], marker='.') """ Explanation: Representación de X/Y End of explanation """ ratio = datos_filtrados['Diametro X [mm]']/datos_filtrados['Diametro Y [mm]'] ratio.describe() rolling_mean = pd.rolling_mean(ratio, 50) rolling_std = pd.rolling_std(ratio, 50) rolling_mean.plot(figsize=(12,6)) # plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5) ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5)) """ Explanation: Analizamos datos del ratio End of explanation """ Th_u = 1.85 Th_d = 1.65 data_violations = datos[(datos['Diametro X [mm]'] > Th_u) | (datos['Diametro X [mm]'] < Th_d) | (datos['Diametro Y [mm]'] > Th_u) | (datos['Diametro Y [mm]'] < Th_d)] data_violations.describe() data_violations.plot(subplots=True, figsize=(12,12)) """ Explanation: Límites de calidad Calculamos el número de veces que traspasamos unos límites de calidad. $Th^+ = 1.85$ and $Th^- = 1.65$ End of explanation """
pastas/pasta
concepts/hantush_response.ipynb
mit
import numpy as np import pandas as pd import pastas as ps ps.show_versions() """ Explanation: Hantush response functions This notebook compares the two Hantush response function implementations in Pastas. Developed by D.A. Brakenhoff (Artesia, 2021) Contents Hantush versus HantushWellModel Which Hantush should I use? Synthetic example End of explanation """ # A defined so that 100 m3/day results in 5 m drawdown A = -5 / 100.0 a = 200 b = 0.5 d = 0.0 # reference level # auto-correlated residuals AR(1) sigma_n = 0.05 alpha = 50 sigma_r = sigma_n / np.sqrt(1 - np.exp(-2 * 14 / alpha)) print(f'sigma_r = {sigma_r:.2f} m') """ Explanation: Hantush versus HantushWellModel The reason there are two implementations in Pastas is that each implementation currently has advantages and disadvantages. We will discuss those soon, but first let's introduce the two implementations. The two Hantush response functions are very similar, but differ in the definition of the parameters. The table below shows the formulas for both implementations. | Name | Parameters | Formula | Description | |------------------|-------------|:------------------------------------------------------------------------|--------------------------------------------------------------------------------| | Hantush | 3 - A, a, b | $$ \theta(t) = At^{-1} e^{-t/a - ab/t} $$ | Response function commonly used for groundwater abstraction wells. | | HantushWellModel | 3 - A, a, b | $$ \theta(t) = A K_0 \left( \sqrt{4b} \right) t^{-1} e^{-t/a - ab/t} $$ | Implementation of the Hantush well function that allows scaling with distance. | In the first implementation the parameters $A$, $a$, and $b$ can be written as: $$ \begin{align} A &= \frac{1}{2 \pi T} K_0 \left( \sqrt{4b} \right) \ a &= cS \ b &= \frac{r^2}{4 \lambda^2} \end{align} $$ In this case parameter $A$ is also known as the "gain", which is equal to the steady-state contribution of a stress with unit 1. For example, the drawdown caused by a well with a continuous extraction rate of 1.0 (the units don't really matter here and are determined by what units the user puts in). In the second implementation, the definition of the parameters $A$ is different, which allows the distance $r$ between an extraction well and an observation well to be passed as a variable. This allows multiple wells to have the same response function, which can be useful to e.g. reduce the number of parameters in a model with multiple extraction wells. When $r$ is passed as a parameter, the formula for $b$ below is simplified by substituting in $1$ for $r$. Note that $r$ is never optimized, but has to be provided by the user. $$ \begin{align} A &= \frac{1}{2 \pi T} \ a &= cS \ b &= \frac{r^2}{4 \lambda^2} \end{align} $$ Which Hantush should I use? So why two implementations? Well, there are advantages and disadvantages to both implementations, which are listed below. <!-- Table Does not render pretty in docs...> <!-- |Name | Pro| Con| |:--|:----|:-----| |**Hantush**|<ul><li>Parameter A is the gain, which makes it easier to interpret the results.</li> <li>Estimates the uncertainty of the gain directly.</li></ul>|<ul><li>Cannot be used to simulate multiple wells.</li><li>More challenging to relate to aquifer characteristics.</li></ul>| |**HantushWellModel**|<ul><li>Can be used with WellModel to simulate multiple wells with one response function.</li><li>Easier to relate parameters to aquifer characteristics.</li></ul>|<ul><li>Does not directly estimate the uncertainty of the gain but can be calculated using special methods.</li><li>More sensitive to the initial value of parameters, in rare cases the initial parameter values have to be tweaked to get a good fit result.</li></ul>| --> Hantush Pro: - Parameter A is the gain, which makes it easier to interpret the results. - Estimates the uncertainty of the gain directly. Con: - Cannot be used to simulate multiple wells. - More challenging to relate to aquifer characteristics. HantushWellModel Pro: - Can be used with WellModel to simulate multiple wells with one response function. - Easier to relate parameters to aquifer characteristics. Con: - Does not directly estimate the uncertainty of the gain but this can be calculated using special methods. - More sensitive to the initial value of parameters, in rare cases the initial parameter values have to be tweaked to get a good fit result. So which one should you use? It depends on your use-case: Use Hantush if you are considering a single extraction well and you're interested in calculating the gain and the uncertainty of the gain. Use HantushWellModel if you are simulating multiple extraction wells or want to pass the distance between extraction and observation well as a known parameter. Of course these aren't strict rules and it is encouraged to explore different model structures when building your timeseries models. But as a first general guiding principle this should help in selecting which approach is appropriate to your specific problem. Synthetic example A synthetic example is used to show both Hantush implementations. First, we create a synthetic timeseries generated with the Hantush response function to which we add autocorrelated residuals. We set the parameter values for the Hantush response function: End of explanation """ # head observations between 2000 and 2010 idx = pd.date_range("2000", "2010", freq="D") ho = pd.Series(index=idx, data=0) # extraction of 100 m3/day between 2002 and 2006 well = pd.Series(index=idx, data=0.0) well.loc["2002":"2006"] = 100.0 """ Explanation: Create a head observations timeseries and a timeseries with the well extraction rate. End of explanation """ ml0 = ps.Model(ho) # alleen de tijdstippen waarop gemeten is worden gebruikt rm = ps.StressModel(well, ps.Hantush, name='well', up=False) ml0.add_stressmodel(rm) ml0.set_parameter('well_A', initial=A) ml0.set_parameter('well_a', initial=a) ml0.set_parameter('well_b', initial=b) ml0.set_parameter('constant_d', initial=d) hsynthetic_no_error = ml0.simulate()[ho.index] """ Explanation: Create the synthetic head timeseries based on the extraction rate and the parameters we defined above. End of explanation """ delt = (ho.index[1:] - ho.index[:-1]).values / pd.Timedelta("1d") np.random.seed(1) noise = sigma_n * np.random.randn(len(ho)) residuals = np.zeros_like(noise) residuals[0] = noise[0] for i in range(1, len(ho)): residuals[i] = np.exp(-delt[i - 1] / alpha) * residuals[i - 1] + noise[i] hsynthetic = hsynthetic_no_error + residuals """ Explanation: Add the auto-correlated residuals. End of explanation """ ax = hsynthetic_no_error.plot(label='synthetic heads (no error)', figsize=(10, 5)) hsynthetic.plot(ax=ax, color="C1", label="synthetic heads (with error)") ax.legend(loc='best') ax.set_ylabel("head (m+ref)") ax.grid(b=True) """ Explanation: Plot the timeseries. End of explanation """ # Hantush ml_h1 = ps.Model(hsynthetic, name="gain") wm_h1 = ps.StressModel(well, ps.Hantush, name='well', up=False) ml_h1.add_stressmodel(wm_h1) ml_h1.solve(report=False, noise=True) """ Explanation: Create three models: Model with Hantush response function. Model with HantushWellModel response function, but $r$ is not passed as a known parameter. Model with WellModel, which uses HantushWellModel and where $r$ is set to 1.0 m. All three models should yield the similar results and be able to estimate the true values of the parameters reasonably well. End of explanation """ # HantushWellModel ml_h2 = ps.Model(hsynthetic, name="scaled") wm_h2 = ps.StressModel(well, ps.HantushWellModel, name='well', up=False) ml_h2.add_stressmodel(wm_h2) ml_h2.solve(report=False, noise=True) # WellModel r = np.array([1.0]) # parameter r well.name = "well" ml_h3 = ps.Model(hsynthetic, name="wellmodel") wm_h3 = ps.WellModel([well], ps.HantushWellModel, "well", r, up=False) ml_h3.add_stressmodel(wm_h3) ml_h3.solve(report=False, noise=True, solver=ps.LmfitSolve) """ Explanation: Solve with noise model and Hantush_scaled End of explanation """ axes = ps.plots.compare([ml_h1, ml_h2, ml_h3], adjust_height=True, figsize=(10, 8)); """ Explanation: Plot a comparison of all three models. The three models all yield similar results (all the lines overlap). End of explanation """ df = pd.DataFrame(index=["well_gain", "well_a", "well_b"], columns=["True value", "Hantush", "HantushWellModel", "WellModel"]) df["True value"] = A, a, b df["Hantush"] = ( # gain (same as A in this case) wm_h1.rfunc.gain(ml_h1.get_parameters("well")), # a ml_h1.parameters.loc["well_a", "optimal"], # b ml_h1.parameters.loc["well_b", "optimal"] ) df["HantushWellModel"] = ( # gain (not same as A) wm_h2.rfunc.gain(ml_h2.get_parameters("well")), # a ml_h2.parameters.loc["well_a", "optimal"], # b ml_h2.parameters.loc["well_b", "optimal"] ) df["WellModel"] = ( # gain, use WellModel.get_parameters() to get params: A, a, b and r wm_h3.rfunc.gain(wm_h3.get_parameters(model=ml_h3, istress=0)), # a ml_h3.parameters.loc["well_a", "optimal"], # b (multiply parameter value by r^2 for comparison) ml_h3.parameters.loc["well_b", "optimal"] * r[0]**2 ) df """ Explanation: Compare the optimized parameters for each model with the true values we defined at the beginning of this example. Note that we're comparing the value of the gain (not parameter $A$) and that each model has its own method for calculating the gain. As expected, the parameter estimates are reasonably close to the true values defined above. End of explanation """ def variance_gain(ml, wm_name, istress=None): """Calculate variance of the gain for WellModel. Variance of the gain is calculated based on propagation of uncertainty using optimal values and the variances of A and b and the covariance between A and b. Parameters ---------- ml : pastas.Model optimized model wm_name : str name of the WellModel istress : int or list of int, optional index of stress to calculate variance of gain for Returns ------- var_gain : float variance of the gain calculated from model results for parameters A and b See Also -------- pastas.HantushWellModel.variance_gain """ wm = ml.stressmodels[wm_name] if ml.fit is None: raise AttributeError("Model not optimized! Run solve() first!") if wm.rfunc._name != "HantushWellModel": raise ValueError("Response function must be HantushWellModel!") # get parameters and (co)variances A = ml.parameters.loc[wm_name + "_A", "optimal"] b = ml.parameters.loc[wm_name + "_b", "optimal"] var_A = ml.fit.pcov.loc[wm_name + "_A", wm_name + "_A"] var_b = ml.fit.pcov.loc[wm_name + "_b", wm_name + "_b"] cov_Ab = ml.fit.pcov.loc[wm_name + "_A", wm_name + "_b"] if istress is None: r = np.asarray(wm.distances) elif isinstance(istress, int) or isinstance(istress, list): r = wm.distances[istress] else: raise ValueError("Parameter 'istress' must be None, list or int!") return wm.rfunc.variance_gain(A, b, var_A, var_b, cov_Ab, r=r) # create dataframe var_gain = pd.DataFrame(index=df.columns[1:]) # add calculated gain var_gain["gain"] = df.iloc[0, 1:].values # Hantush: variance gain is computed directly var_gain.loc["Hantush", "var gain"] = ml_h1.fit.pcov.loc["well_A", "well_A"] # HantushWellModel: calculate variance gain var_gain.loc["HantushWellModel", "var gain"] = wm_h2.rfunc.variance_gain( ml_h2.parameters.loc["well_A", "optimal"], # A ml_h2.parameters.loc["well_b", "optimal"], # b ml_h2.fit.pcov.loc["well_A", "well_A"], # var_A ml_h2.fit.pcov.loc["well_b", "well_b"], # var_b ml_h2.fit.pcov.loc["well_A", "well_b"] # cov_Ab ) # WellModel: calculate variance gain using helper function var_gain.loc["WellModel", "var gain"] = variance_gain(ml_h3, "well", istress=0) # calculate std dev gain var_gain["std gain"] = np.sqrt(var_gain["var gain"]) # show table var_gain.style.format("{:.5e}") """ Explanation: Recall from earlier that when using ps.Hantush the gain and uncertainty of the gain are calculated directly. This is not the case for ps.HantushWellModel, so to obtain the uncertainty of the gain when using that response function there is a method called ps.HantushWellModel.variance_gain() that computes the variance based on the optimal values and (co)variance of parameters $A$ and $b$. The code below shows the calculated gain for each model, and how to calculate the variance and standard deviation of the gain for each model. The results show that the calculated values are all very close, as was expected. End of explanation """
GoogleCloudPlatform/training-data-analyst
blogs/nexrad2/visualize/radardata.ipynb
apache-2.0
%bash rm -rf data mkdir data cd data RADAR=KIWA YEAR=2013 MONTH=07 DAY=23 HOUR=23 gsutil cp gs://gcp-public-data-nexrad-l2/$YEAR/$MONTH/$DAY/$RADAR/*_$RADAR_${YEAR}${MONTH}${DAY}${HOUR}0000_${YEAR}${MONTH}${DAY}${HOUR}5959.tar temp.tar tar xvf temp.tar rm *.tar ls """ Explanation: <h1> Reading NEXRAD Level II data from Google Cloud public datasets </h1> This notebook demonstrates how to use PyART to visualize data from the Google Cloud public dataset. End of explanation """ # Based on # http://arm-doe.github.io/pyart/dev/auto_examples/plotting/plot_nexrad_multiple_moments.html # by Jonathan J. Helmus ([email protected]) import matplotlib.pyplot as plt import pyart def plot_data(filename): radar = pyart.io.read_nexrad_archive(infilename) display = pyart.graph.RadarDisplay(radar) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(221) display.plot('velocity', 1, ax=ax, title='Doppler Velocity', colorbar_label='', axislabels=('', 'North South distance from radar (km)')) display.set_limits((-300, 300), (-300, 300), ax=ax) ax = fig.add_subplot(222) display.plot('reflectivity', 0, ax=ax, title='Reflectivity lowest', colorbar_label='', axislabels=('', '')) display.set_limits((-300, 300), (-300, 300), ax=ax) ax = fig.add_subplot(223) display.plot('reflectivity', 1, ax=ax, title='Reflectivity second', colorbar_label='') display.set_limits((-300, 300), (-300, 300), ax=ax) ax = fig.add_subplot(224) display.plot('cross_correlation_ratio', 0, ax=ax, title='Correlation Coefficient', colorbar_label='', axislabels=('East West distance from radar (km)', '')) display.set_limits((-300, 300), (-300, 300), ax=ax) plt.show() """ Explanation: <h3> Install Py-ART </h3> See https://github.com/ARM-DOE/pyart/wiki/Simple-Install-of-Py-ART-using-Anaconda <h3> Plot volume scans using Py-ART within Jupyter </h3> End of explanation """ %writefile plot_pngs.py import matplotlib.pyplot as plt import pyart def plot_data(infilename, outpng): radar = pyart.io.read_nexrad_archive(infilename) display = pyart.graph.RadarDisplay(radar) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(221) display.plot('velocity', 1, ax=ax, title='Doppler Velocity', colorbar_label='', axislabels=('', 'North South distance from radar (km)')) display.set_limits((-300, 300), (-300, 300), ax=ax) ax = fig.add_subplot(222) display.plot('reflectivity', 0, ax=ax, title='Reflectivity lowest', colorbar_label='', axislabels=('', '')) display.set_limits((-300, 300), (-300, 300), ax=ax) ax = fig.add_subplot(223) display.plot('reflectivity', 1, ax=ax, title='Reflectivity second', colorbar_label='') display.set_limits((-300, 300), (-300, 300), ax=ax) ax = fig.add_subplot(224) display.plot('cross_correlation_ratio', 0, ax=ax, title='Correlation Coefficient', colorbar_label='', axislabels=('East West distance from radar (km)', '')) display.set_limits((-300, 300), (-300, 300), ax=ax) fig.savefig(outpng) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='plot some radar data') parser.add_argument('nexrad', help="volume scan filename") parser.add_argument('png', help="output png filename") args = parser.parse_args() print "Plotting {} into {}".format(args.nexrad, args.png) plot_data(args.nexrad, args.png) %bash python plot_pngs.py data/KIWA20130723_235451_V06.gz radarplot.png """ Explanation: <h2> Plot into png </h2> End of explanation """ %bash rm -rf images mkdir images for volumefile in $(ls data); do base=$(basename $volumefile) python plot_pngs.py data/$volumefile images/$base.png done """ Explanation: <h2> Create animating PNG </h2> End of explanation """
palrogg/foundations-homework
extra/join_lines.ipynb
mit
import re filename = 'tabula-Actelion_transparency-report-2015' file = open(filename+'.csv', 'r') content = file.readlines() content[:10] """ Explanation: Change the filename here The csv file without extension. Its new name will be [filename]-corrected.csv End of explanation """ twodigits = re.compile('\.?\d{2}"?$') #re.match(twodigits, '14"') re.match(twodigits, '.14') """ Explanation: Regexp to match the two last digits of each line Change it if your doc has another format End of explanation """ c = 0 def join_broken_lines(line_list): global c full_lines = [] full_line = '' for line in line_list: full_line += line[:-1] if len(line) > 1: if line[-2] == '"' or re.match(twodigits, line[-4:-1]): # full_lines.append(str(c)+") " + full_line) full_lines.append(str(c)+") " + full_line) full_line = '' c += 1 return full_lines result = join_broken_lines(content) print(*result[:5], sep="\n") # we print the 5 first lines """ Explanation: Function to join the broken lines End of explanation """ newcontent = str.join("\n", result) fp = open(filename + '-corrected.csv', 'w') fp.write(newcontent) fp.close() """ Explanation: Save to the new file End of explanation """
phoebe-project/phoebe2-docs
2.3/tutorials/features.ipynb
gpl-3.0
#!pip install -I "phoebe>=2.3,<2.4" import phoebe from phoebe import u # units logger = phoebe.logger() b = phoebe.default_binary() """ Explanation: Features Features within PHOEBE are anything that can be "attached" to a component or a dataset to inform how to compute the forward-model. These currently include spots and gaussian processes - but the framework is flexible enough to handle future development to support pulsations, rings, disks, etc. Although features are entirely optional and may not be used for most systems, let's get familiar with the basics before moving on to computing the forward model. Setup Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab). End of explanation """ phoebe.list_available_features() """ Explanation: Available Features As you may expect by now, adding a feature will be done through a call to b.add_feature where the first argument is the "kind" of the feature - a list of available options which can be accessed via phoebe.list_available_features. End of explanation """ help(phoebe.parameters.feature.spot) """ Explanation: The API docs for each of these can be found in phoebe.parameters.feature. Each entry will list the allowable component and/or dataset-types that that kind of feature can be attached to. For example: End of explanation """ b.add_feature('spot', component='primary', feature='spot01') b.get_feature('spot01') """ Explanation: Adding a Feature If we look at the API docs for a spot, we can see that it can be attached to any star component, but not attached to a dataset. So when calling b.add_feature, we need to send a valid tag for component that points to a star (i.e. 'primary' or 'secondary') End of explanation """
liganega/Gongsu-DataSci
previous/y2017/GongSu06_Errors_and_Exception_Handling.ipynb
gpl-3.0
input_number = input("A number please: ") number = int(input_number) print("제곱의 결과는", number**2, "입니다.") input_number = input("A number please: ") number = int(input_number) print("제곱의 결과는", number**2, "입니다.") """ Explanation: 오류 및 예외 처리 수정 사항 좀 더 실용적인 수학함수 활용 가능 개요 코딩할 때 발생할 수 있는 다양한 오류 살펴 보기 오류 메시지 정보 확인 방법 예외 처리, 즉 오류가 발생할 수 있는 예외적인 상황을 미리 고려하는 방법 소개 오늘의 주요 예제 아래 코드는 input() 함수를 이용하여 사용자로부터 숫자를 입력받아 그 숫자의 제곱을 리턴하는 내용을 담고 있다. 코드를 실행하면 숫자를 입력하라는 창이 나오며, 여기에 숫자 3을 입력하면 정상적으로 작동한다. 하지만, 예를 들어, 3.2를 입력하면 값 오류(value error)가 발생한다. End of explanation """ sentence = 'I am a sentence """ Explanation: 위 코드는 정수들의 제곱을 계산하는 프로그램이다. 하지만 사용자가 경우에 따라 정수 이외의 값을 입력하면 시스템이 다운된다. 이에 대한 해결책을 다루고자 한다. 오류 예제 먼저 오류의 다양한 예제를 살펴보자. 다음 코드들은 모두 오류를 발생시킨다. 예제: 0으로 나누기 오류 python 4.6/0 오류 설명: 0으로 나눌 수 없다. 예제: 문법 오류 python sentence = 'I am a sentence 오류 설명: 문자열 양 끝의 따옴표가 짝이 맞아야 한다. * 작은 따옴표끼리 또는 큰 따옴표끼리 예제: 들여쓰기 문법 오류 python for i in range(3): j = i * 2 print(i, j) 오류 설명: 2번 줄과 3번 줄의 들여쓰기 정도가 동일해야 한다. 예제: 자료형 오류 아래 연산은 모두 오류를 발생시킨다. ```python new_string = 'cat' - 'dog' new_string = 'cat' * 'dog' new_string = 'cat' / 'dog' new_string = 'cat' + 3 new_string = 'cat' - 3 new_string = 'cat' / 3 ``` 이유: 문자열 끼리의 합, 문자열과 정수의 곱셈만 정의되어 있다. 예제: 이름 오류 python print(party) 오류 설명: 미리 선언된 변수만 사용할 수 있다. 예제: 인덱스 오류 python a_string = 'abcdefg' a_string[12] 오류 설명: 인덱스는 문자열의 길이보다 작은 수만 사용할 수 있다. 예제: 값 오류 python int(a_string) 오류 설명: int() 함수는 정수로만 구성된 문자열만 처리할 수 있다. 예제: 속성 오류 python print(a_string.len()) 오류 설명: 문자열 자료형에는 len() 메소드가 존재하지 않는다. 주의: len() 이라는 함수는 문자열의 길이를 확인하지만 문자열 메소드는 아니다. 이후에 다룰 리스트, 튜플 등에 대해서도 사용할 수 있는 함수이다. 오류 확인 앞서 언급한 코드들을 실행하면 오류가 발생하고 어디서 어떤 오류가 발생하였는가에 대한 정보를 파이썬 해석기가 바로 알려 준다. 예제 End of explanation """ a = 0 4/a """ Explanation: 오류를 확인하는 메시지가 처음 볼 때는 매우 생소하다. 위 오류 메시지를 간단하게 살펴보면 다음과 같다. File "&lt;ipython-input-3-a6097ed4dc2e&gt;", line 1 1번 줄에서 오류 발생 sentence = 'I am a sentence ^ 오류 발생 위치 명시 SyntaxError: EOL while scanning string literal 오류 종류 표시: 문법 오류(SyntaxError) 예제 아래 예제는 0으로 나눌 때 발생하는 오류를 나타낸다. 오류에 대한 정보를 잘 살펴보면서 어떤 내용을 담고 있는지 확인해 보아야 한다. End of explanation """ number_to_square = input("정수를 입력하세요: ") # number_to_square 변수의 자료형이 문자열(str)임에 주의하라. # 따라서 연산을 하고 싶으면 정수형(int)으로 형변환을 먼저 해야 한다. number = int(number_to_square) print("제곱의 결과는", number**2, "입니다.") number_to_square = input("정수를 입력하세요: ") # number_to_square 변수의 자료형이 문자열(str)임에 주의하라. # 따라서 연산을 하고 싶으면 정수형(int)으로 형변환을 먼저 해야 한다. number = int(number_to_square) print("제곱의 결과는", number**2, "입니다.") """ Explanation: 오류의 종류 앞서 예제들을 통해 살펴 보았듯이 다양한 종류의 오류가 발생하며, 코드가 길어지거나 복잡해지면 오류가 발생할 가능성은 점차 커진다. 오류의 종류를 파악하면 어디서 왜 오류가 발생하였는지를 보다 쉽게 파악하여 코드를 수정할 수 있게 된다. 따라서 코드의 발생원인을 바로 알아낼 수 있어야 하며 이를 위해서는 오류 메시지를 제대로 확인할 수 있어야 한다. 하지만 여기서는 언급된 예제 정도의 수준만 다루고 넘어간다. 코딩을 하다 보면 어차피 다양한 오류와 마주치게 될 텐데 그때마다 스스로 오류의 내용과 원인을 확인해 나가는 과정을 통해 보다 많은 경험을 쌓는 길 외에는 달리 방법이 없다. 예외 처리 코드에 문법 오류가 포함되어 있는 경우 아예 실행되지 않는다. 그렇지 않은 경우에는 일단 실행이 되고 중간에 오류가 발생하면 바로 멈춰버린다. 이렇게 중간에 오류가 발생할 수 있는 경우를 미리 생각하여 대비하는 과정을 예외 처리(exception handling)라고 부른다. 예를 들어, 오류가 발생하더라도 오류발생 이전까지 생성된 정보들을 저장하거나, 오류발생 이유를 좀 더 자세히 다루거나, 아니면 오류발생에 대한 보다 자세한 정보를 사용자에게 알려주기 위해 예외 처리를 사용한다. 사용방식은 다음과 같다. python try: 코드1 except: 코드2 * 먼저 코드1 부분을 실행한다. * 코드1 부분이 실행되면서 오류가 발생하지 않으면 코드2 부분은 무시하고 다음으로 넘어간다. * 코드1 부분이 실행되면서 오류가 발생하면 더이상 진행하지 않고 바로 코드2 부분을 실행한다. 예제 아래 코드는 input() 함수를 이용하여 사용자로부터 숫자를 입력받아 그 숫자의 제곱을 리턴하고자 하는 내용을 담고 있으며, 코드에는 문법적 오류가 없다. 그리고 코드를 실행하면 숫자를 입력하라는 창이 나온다. 여기에 숫자 3을 입력하면 정상적으로 작동하지만 예를 들어, 3.2를 입력하면 값 오류(value error)가 발생한다. End of explanation """ number_to_square = input("정수를 입력하세요: ") try: number = int(number_to_square) print("제곱의 결과는", number ** 2, "입니다.") except: print("정수를 입력해야 합니다.") """ Explanation: 3.2를 입력했을 때 오류가 발생하는 이유는 int() 함수가 정수 모양의 문자열만 처리할 수 있기 때문이다. 사실 정수들의 제곱을 계산하는 프로그램을 작성하였지만 경우에 따라 정수 이외의 값을 입력하는 경우가 발생하게 되며, 이런 경우를 대비해야 한다. 즉, 오류가 발생할 것을 미리 예상해야 하며, 어떻게 대처해야 할지 준비해야 하는데, try ... except ...문을 이용하여 예외를 처리하는 방식을 활용할 수 있다. End of explanation """ while True: try: number = int(input("정수를 입력하세요: ")) print("제곱의 결과는", number**2, "입니다.") break except: print("정수를 입력해야 합니다.") """ Explanation: 올바른 값이 들어올 때까지 입력을 요구할 수 있다. End of explanation """ number_to_square = input("정수를 입력하세요: ") try: number = int(number_to_square) a = 5/(number - 4) print("결과는", a, "입니다.") except ValueError: print("정수를 입력해야 합니다.") except ZeroDivisionError: print("4는 빼고 하세요.") """ Explanation: 오류 종류에 맞추어 다양한 대처를 하기 위해서는 오류의 종류를 명시하여 예외처리를 하면 된다. 아래 코드는 입력 갑에 따라 다른 오류가 발생하고 그에 상응하는 방식으로 예외처리를 실행한다. 값 오류(ValueError)의 경우 End of explanation """ number_to_square = input("A number please: ") try: number = int(number_to_square) a = 5/(number - 4) print("결과는", a, "입니다.") except ValueError: print("정수를 입력해야 합니다.") except ZeroDivisionError: print("4는 빼고 하세요.") """ Explanation: 0으로 나누기 오류(ZeroDivisionError)의 경우 End of explanation """ try: a = 1/0 except ValueError: print("This program stops here.") """ Explanation: 주의: 이와 같이 발생할 수 예외를 가능한 한 모두 염두하는 프로그램을 구현해야 하는 일은 매우 어려운 일이다. 앞서 보았듯이 오류의 종류를 정확히 알 필요가 발생한다. 다음 예제에서 보듯이 오류의 종류를 틀리게 명시하면 예외 처리가 제대로 작동하지 않는다. End of explanation """ def to_define(): """아주 복잡하지만 지금 당장 불필요""" raise NotImplementedError("아직 정의되어 있지 않음") print(to_define()) """ Explanation: raise 함수 강제로 오류를 발생시키고자 하는 경우에 사용한다. 예제 어떤 함수를 정확히 정의하지 않은 상태에서 다른 중요한 일을 먼저 처리하고자 할 때 아래와 같이 함수를 선언하고 넘어갈 수 있다. 그런데 아래 함수를 제대로 선언하지 않은 채로 다른 곳에서 호출하면 "아직 정의되어 있지 않음" 이란 메시지로 정보를 알려주게 된다. End of explanation """ def to_define1(): """아주 복잡하지만 지금 당장 불필요""" print(to_define1()) """ Explanation: 주의: 오류 처리를 사용하지 않으면 오류 메시지가 보이지 않을 수도 있음에 주의해야 한다. End of explanation """ def square(number): """ 정수를 인자로 입력 받아 제곱을 리턴한다. """ square_of_number = number * 2 return square_of_number """ Explanation: 코드의 안전성 문제 문법 오류 또는 실행 중에 오류가 발생하지 않는다 하더라도 코드의 안전성이 보장되지는 않는다. 코드의 안정성이라 함은 코드를 실행할 때 기대하는 결과가 산출된다는 것을 보장한다는 의미이다. 예제 아래 코드는 숫자의 제곱을 리턴하는 square() 함수를 제대로 구현하지 못한 경우를 다룬다. End of explanation """ square(3) """ Explanation: 위 함수를 아래와 같이 호출하면 오류가 전혀 발생하지 않지만, 엉뚱한 값을 리턴한다. End of explanation """ help(square) """ Explanation: 주의: help() 를 이용하여 어떤 함수가 무슨 일을 하는지 내용을 확인할 수 있다. 단, 함수를 정의할 때 함께 적힌 문서화 문자열(docstring) 내용이 확인된다. 따라서, 함수를 정의할 때 문서화 문자열에 가능한 유효한 정보를 입력해 두어야 한다. End of explanation """ number_to_square = input("100을 나눌 숫자를 입력하세요: ") number = int(number_to_square) print("100을 입력한 값으로 나눈 결과는", 100/number, "입니다.") """ Explanation: 오류에 대한 보다 자세한 정보 파이썬에서 다루는 오류에 대한 보다 자세한 정보는 아래 사이트들에 상세하게 안내되어 있다. 파이썬 기본 내장 오류 정보 문서: https://docs.python.org/3.4/library/exceptions.html 파이썬 예외처리 정보 문서: https://docs.python.org/3.4/tutorial/errors.html 연습문제 연습 아래 코드는 100을 입력한 값으로 나누는 함수이다. 다만 0을 입력할 경우 0으로 나누기 오류(ZeroDivisionError)가 발생한다. End of explanation """ number_to_square = input("A number to divide 100: ") try: number = float(number_to_square) print("100을 입력한 값으로 나눈 결과는", 100/number, "입니다.") except ZeroDivisionError: raise ZeroDivisionError('0이 아닌 숫자를 입력하세요.') except ValueError: raise ValueError('숫자를 입력하세요.') number_to_square = input("A number to divide 100: ") try: number = float(number_to_square) print("100을 입력한 값으로 나눈 결과는", 100/number, "입니다.") except ZeroDivisionError: raise ZeroDivisionError('0이 아닌 숫자를 입력하세요.') except ValueError: raise ValueError('숫자를 입력하세요.') """ Explanation: 아래 내용이 충족되도록 위 코드를 수정하라. 나눗셈이 부동소수점으로 계산되도록 한다. 0이 아닌 숫자가 입력될 경우 100을 그 숫자로 나눈다. 0이 입력될 경우 0이 아닌 숫자를 입력하라고 전달한다. 숫자가 아닌 값이 입력될 경우 숫자를 입력하라고 전달한다. 견본답안: End of explanation """ while True: try: a, b = input("정수 두 개를 입력하세요. 쉼표를 사용해야 합니다.\n").split(',') a, b = int(a), int(b) print("계산의 결과는", a/b, "입니다.") break except ValueError: print("정수 두 개를 쉼표로 구분해서 입력해야 합니다.\n") except ZeroDivisionError: print("둘째 수는 0이 아니어야 합니다.\n") """ Explanation: 연습 두 개의 정수 a와 b를 입력 받아 a/b를 계산하여 출력하는 코드를 작성하라. 견본답안 1: End of explanation """ while True: try: a, b = map(int, input("정수 두 개를 입력하세요. 쉼표를 사용해야 합니다.\n").split(',')) print("계산의 결과는", a/b, "입니다.") break except ValueError: print("정수 두 개를 쉼표로 구분해서 입력해야 합니다.\n") except ZeroDivisionError: print("둘째 수는 0이 아니어야 합니다.\n") """ Explanation: 견본답안 2: map 함수를 활용하여 a, b 각각에 int 함수를 자동으로 적용할 수 있다. map 함수에 대한 설명은 여기를 참조하면 된다. End of explanation """ while True: try: print("키와 몸무게를 입력하세요: ") a, b = map(float, input().split(", ")) BMI = b/(a**2) if BMI <= 18.5: print("BMI는", BMI, "입니다. 저체중입니다.") elif 18.5 < BMI <= 23: print("BMI는", BMI, "입니다. 정상 체중입니다.") elif 23 < BMI <= 25: print("BMI는", BMI, "입니다. 비만입니다.") elif 25 < BMI <= 30: print("BMI는", BMI, "입니다. 과체중입니다.") else: print("BMI는", BMI, "입니다. 고도비만입니다.") break except ValueError: print("숫자를 입력하세요.") except ZeroDivisionError: print("0이 아닌 숫자를 입력하세요.") """ Explanation: 연습 키와 몸무게를 인자로 받아 체질량지수(BMI)를 구하는 코드를 작성하라. 아래 사항들을 참고한다. $$BMI = \frac{weight}{height^2}$$ 단위: 몸무게(weight): kg 키(height): m BMI 수치에 따른 체중 분류 BMI &lt;= 18.5이면 저체중 18.5 &lt; BMI &lt;= 23이면 정상 23 &lt; BMI &lt;= 25이면 과체중 25 &lt; BMI &lt;= 30이면 비만 BMI &gt; 30이면 고도비만 견본답안: End of explanation """
XinyiGong/pymks
notebooks/intro.ipynb
mit
%matplotlib inline %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt """ Explanation: Meet PyMKS In this short introduction, we will demonstrate the functionality of PyMKS to compute 2-point statistics in order to objectively quantify microstructures, predict effective properties using homogenization and predict local properties using localization. If you would like more technical details amount any of these methods please see the theory section. End of explanation """ from pymks.datasets import make_microstructure X_1 = make_microstructure(n_samples=1, grain_size=(25, 25)) X_2 = make_microstructure(n_samples=1, grain_size=(15, 95)) X = np.concatenate((X_1, X_2)) """ Explanation: Quantify Microstructures using 2-Point Statistics Lets make two dual phase microstructures with different morphologies. End of explanation """ from pymks.tools import draw_microstructures draw_microstructures(X) """ Explanation: Throughout PyMKS X is used to represent microstructures. Now that we have made the two microstructures, lets take a look at them. End of explanation """ from pymks import PrimitiveBasis from pymks.stats import correlate prim_basis = PrimitiveBasis(n_states=2, domain=[0, 1]) X_ = prim_basis.discretize(X) X_corr = correlate(X_, periodic_axes=[0, 1]) """ Explanation: We can compute the 2-point statistics for these two periodic microstructures using the correlate function from pymks.stats. This function computes all of the autocorrelations and cross-correlation(s) for a microstructure. Before we compute the 2-point statistics, we will discretize them using the PrimitiveBasis function. End of explanation """ from pymks.tools import draw_correlations print X_corr[0].shape draw_correlations(X_corr[0]) draw_correlations(X_corr[1]) """ Explanation: Let's take a look at the two autocorrelations and the cross-correlation for these two microstructures. End of explanation """ from pymks.datasets import make_elastic_stress_random grain_size = [(47, 6), (4, 49), (14, 14)] n_samples = [200, 200, 200] X_train, y_train = make_elastic_stress_random(n_samples=n_samples, size=(51, 51), grain_size=grain_size, seed=0) """ Explanation: 2-Point statistics provide an object way to compare microstructures, and have been shown as an effective input to machine learning methods. Predict Homogenized Properties In this section of the intro, we are going to predict the effective stiffness for two phase microstructures using the MKSHomogenizationModel, but we could have chosen any other effective material property. First we need to make some microstructures and their effective stress values to fit our model. Let's create 200 random instances 3 different types of microstructures, totaling to 600 microstructures. End of explanation """ draw_microstructures(X_train[::200]) """ Explanation: Once again, X_train is our microstructures. Throughout PyMKS y is used as either the prpoerty or the field we would like to predict. In this case y_train is the effective stress values for X_train. Let's look at one of each of the three different types of microstructures. End of explanation """ from pymks import MKSHomogenizationModel prim_basis = PrimitiveBasis(n_states=2, domain=[0, 1]) homogenize_model = MKSHomogenizationModel(basis=prim_basis, correlations=[(0, 0), (1, 1), (0, 1)]) """ Explanation: The MKSHomogenizationModel uses 2-point statistics, so we need provide a discretization method for the microstructures by providing a basis function. We will also specify which correlations we want. End of explanation """ homogenize_model.fit(X_train, y_train, periodic_axes=[0, 1]) """ Explanation: Let's fit our model with the data we created. End of explanation """ n_samples = [10, 10, 10] X_test, y_test = make_elastic_stress_random(n_samples=n_samples, size=(51, 51), grain_size=grain_size, seed=100) """ Explanation: Now let's make some new data to see how good our model is. End of explanation """ y_pred = homogenize_model.predict(X_test, periodic_axes=[0, 1]) """ Explanation: We will try and predict the effective stress of our X_test microstructures. End of explanation """ from pymks.tools import draw_components draw_components([homogenize_model.reduced_fit_data, homogenize_model.reduced_predict_data], ['Training Data', 'Testing Data']) """ Explanation: The MKSHomogenizationModel generates low dimensional representations of microstructures and regression methods to predict effective properties. Take a look at the low dimensional representations. End of explanation """ from pymks.tools import draw_goodness_of_fit fit_data = np.array([y_train, homogenize_model.predict(X_train, periodic_axes=[0, 1])]) pred_data = np.array([y_test, y_pred]) draw_goodness_of_fit(fit_data, pred_data, ['Training Data', 'Testing Data']) """ Explanation: Now let's look at a goodness of fit plot for our MKSHomogenizationModel. End of explanation """ from pymks.datasets import make_elastic_FE_strain_delta X_delta, y_delta = make_elastic_FE_strain_delta() """ Explanation: Looks good. The MKSHomogenizationModel can be used to predict effective properties and processing-structure evolutions. Predict Local Properties In this section of the intro, we are going to predict the local strain field in a microstructure using MKSLocalizationModel, but we could have predicted another local property. First we need some data, so let's make some. End of explanation """ from pymks import MKSLocalizationModel prim_basis = PrimitiveBasis(n_states=2) localize_model = MKSLocalizationModel(basis=prim_basis) """ Explanation: Once again, X_delta is our microstructures and y_delta is our local strain fields. We need to discretize the microstructure again so we will also use the same basis function. End of explanation """ localize_model.fit(X_delta, y_delta) """ Explanation: Let's use the data to fit our MKSLocalizationModel. End of explanation """ from pymks.datasets import make_elastic_FE_strain_random X_test, y_test = make_elastic_FE_strain_random() """ Explanation: Now that we have fit our model, we will create a random microstructure and compute its local strain field using finite element analysis. We will then try and reproduce the same strain field with our model. End of explanation """ from pymks.tools import draw_microstructure_strain draw_microstructure_strain(X_test[0], y_test[0]) """ Explanation: Let's look at the microstructure and its local strain field. End of explanation """ from pymks.tools import draw_strains_compare y_pred = localize_model.predict(X_test) draw_strains_compare(y_test[0], y_pred[0]) """ Explanation: Now let's pass that same microstructure to our MKSLocalizationModel and compare the predicted and computed local strain field. End of explanation """
deepchem/deepchem
examples/tutorials/Conditional_Generative_Adversarial_Networks.ipynb
mit
!pip install --pre deepchem import deepchem deepchem.__version__ """ Explanation: Conditional Generative Adversarial Network A Generative Adversarial Network (GAN) is a type of generative model. It consists of two parts called the "generator" and the "discriminator". The generator takes random values as input and transforms them into an output that (hopefully) resembles the training data. The discriminator takes a set of samples as input and tries to distinguish the real training samples from the ones created by the generator. Both of them are trained together. The discriminator tries to get better and better at telling real from false data, while the generator tries to get better and better at fooling the discriminator. A Conditional GAN (CGAN) allows additional inputs to the generator and discriminator that their output is conditioned on. For example, this might be a class label, and the GAN tries to learn how the data distribution varies between classes. Colab This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link. Setup To run DeepChem within Colab, you'll need to run the following cell of installation commands. End of explanation """ import deepchem as dc import numpy as np import tensorflow as tf n_classes = 4 class_centers = np.random.uniform(-4, 4, (n_classes, 2)) class_transforms = [] for i in range(n_classes): xscale = np.random.uniform(0.5, 2) yscale = np.random.uniform(0.5, 2) angle = np.random.uniform(0, np.pi) m = [[xscale*np.cos(angle), -yscale*np.sin(angle)], [xscale*np.sin(angle), yscale*np.cos(angle)]] class_transforms.append(m) class_transforms = np.array(class_transforms) """ Explanation: For this example, we will create a data distribution consisting of a set of ellipses in 2D, each with a random position, shape, and orientation. Each class corresponds to a different ellipse. Let's randomly generate the ellipses. For each one we select a random center position, X and Y size, and rotation angle. We then create a transformation matrix that maps the unit circle to the ellipse. End of explanation """ def generate_data(n_points): classes = np.random.randint(n_classes, size=n_points) r = np.random.random(n_points) angle = 2*np.pi*np.random.random(n_points) points = (r*np.array([np.cos(angle), np.sin(angle)])).T points = np.einsum('ijk,ik->ij', class_transforms[classes], points) points += class_centers[classes] return classes, points """ Explanation: This function generates random data from the distribution. For each point it chooses a random class, then a random position in that class' ellipse. End of explanation """ %matplotlib inline import matplotlib.pyplot as plot classes, points = generate_data(1000) plot.scatter(x=points[:,0], y=points[:,1], c=classes) """ Explanation: Let's plot a bunch of random points drawn from this distribution to see what it looks like. Points are colored based on their class label. End of explanation """ from tensorflow.keras.layers import Concatenate, Dense, Input class ExampleGAN(dc.models.GAN): def get_noise_input_shape(self): return (10,) def get_data_input_shapes(self): return [(2,)] def get_conditional_input_shapes(self): return [(n_classes,)] def create_generator(self): noise_in = Input(shape=(10,)) conditional_in = Input(shape=(n_classes,)) gen_in = Concatenate()([noise_in, conditional_in]) gen_dense1 = Dense(30, activation=tf.nn.relu)(gen_in) gen_dense2 = Dense(30, activation=tf.nn.relu)(gen_dense1) generator_points = Dense(2)(gen_dense2) return tf.keras.Model(inputs=[noise_in, conditional_in], outputs=[generator_points]) def create_discriminator(self): data_in = Input(shape=(2,)) conditional_in = Input(shape=(n_classes,)) discrim_in = Concatenate()([data_in, conditional_in]) discrim_dense1 = Dense(30, activation=tf.nn.relu)(discrim_in) discrim_dense2 = Dense(30, activation=tf.nn.relu)(discrim_dense1) discrim_prob = Dense(1, activation=tf.sigmoid)(discrim_dense2) return tf.keras.Model(inputs=[data_in, conditional_in], outputs=[discrim_prob]) gan = ExampleGAN(learning_rate=1e-4) """ Explanation: Now let's create the model for our CGAN. DeepChem's GAN class makes this very easy. We just subclass it and implement a few methods. The two most important are: create_generator() constructs a model implementing the generator. The model takes as input a batch of random noise plus any condition variables (in our case, the one-hot encoded class of each sample). Its output is a synthetic sample that is supposed to resemble the training data. create_discriminator() constructs a model implementing the discriminator. The model takes as input the samples to evaluate (which might be either real training data or synthetic samples created by the generator) and the condition variables. Its output is a single number for each sample, which will be interpreted as the probability that the sample is real training data. In this case, we use very simple models. They just concatenate the inputs together and pass them through a few dense layers. Notice that the final layer of the discriminator uses a sigmoid activation. This ensures it produces an output between 0 and 1 that can be interpreted as a probability. We also need to implement a few methods that define the shapes of the various inputs. We specify that the random noise provided to the generator should consist of ten numbers for each sample; that each data sample consists of two numbers (the X and Y coordinates of a point in 2D); and that the conditional input consists of n_classes numbers for each sample (the one-hot encoded class index). End of explanation """ def iterbatches(batches): for i in range(batches): classes, points = generate_data(gan.batch_size) classes = dc.metrics.to_one_hot(classes, n_classes) yield {gan.data_inputs[0]: points, gan.conditional_inputs[0]: classes} gan.fit_gan(iterbatches(5000)) """ Explanation: Now to fit the model. We do this by calling fit_gan(). The argument is an iterator that produces batches of training data. More specifically, it needs to produce dicts that map all data inputs and conditional inputs to the values to use for them. In our case we can easily create as much random data as we need, so we define a generator that calls the generate_data() function defined above for each new batch. End of explanation """ classes, points = generate_data(1000) one_hot_classes = dc.metrics.to_one_hot(classes, n_classes) gen_points = gan.predict_gan_generator(conditional_inputs=[one_hot_classes]) plot.scatter(x=gen_points[:,0], y=gen_points[:,1], c=classes) """ Explanation: Have the trained model generate some data, and see how well it matches the training distribution we plotted before. End of explanation """
astarostin/MachineLearningSpecializationCoursera
course2/week3/Preprocessing_LR.ipynb
apache-2.0
import pandas as pd import numpy as np import matplotlib from matplotlib import pyplot as plt matplotlib.style.use('ggplot') %matplotlib inline """ Explanation: Предобработка данных и логистическая регрессия для задачи бинарной классификации Programming assignment В задании вам будет предложено ознакомиться с основными техниками предобработки данных, а так же применить их для обучения модели логистической регрессии. Ответ потребуется загрузить в соответствующую форму в виде 6 текстовых файлов. Для выполнения задания требуется Python версии 2.7, а также актуальные версии библиотек: - NumPy: 1.10.4 и выше - Pandas: 0.17.1 и выше - Scikit-learn: 0.17 и выше End of explanation """ data = pd.read_csv('data.csv') data.shape """ Explanation: Описание датасета Задача: по 38 признакам, связанных с заявкой на грант (область исследований учёных, информация по их академическому бэкграунду, размер гранта, область, в которой он выдаётся) предсказать, будет ли заявка принята. Датасет включает в себя информацию по 6000 заявкам на гранты, которые были поданы в университете Мельбурна в период с 2004 по 2008 год. Полную версию данных с большим количеством признаков можно найти на https://www.kaggle.com/c/unimelb. End of explanation """ X = data.drop('Grant.Status', 1) y = data['Grant.Status'] """ Explanation: Выделим из датасета целевую переменную Grant.Status и обозначим её за y Теперь X обозначает обучающую выборку, y - ответы на ней End of explanation """ data.head() """ Explanation: Теория по логистической регрессии После осознания того, какую именно задачу требуется решить на этих данных, следующим шагом при реальном анализе был бы подбор подходящего метода. В данном задании выбор метода было произведён за вас, это логистическая регрессия. Кратко напомним вам используемую модель. Логистическая регрессия предсказывает вероятности принадлежности объекта к каждому классу. Сумма ответов логистической регрессии на одном объекте для всех классов равна единице. $$ \sum_{k=1}^K \pi_{ik} = 1, \quad \pi_k \equiv P\,(y_i = k \mid x_i, \theta), $$ где: - $\pi_{ik}$ - вероятность принадлежности объекта $x_i$ из выборки $X$ к классу $k$ - $\theta$ - внутренние параметры алгоритма, которые настраиваются в процессе обучения, в случае логистической регрессии - $w, b$ Из этого свойства модели в случае бинарной классификации требуется вычислить лишь вероятность принадлежности объекта к одному из классов (вторая вычисляется из условия нормировки вероятностей). Эта вероятность вычисляется, используя логистическую функцию: $$ P\,(y_i = 1 \mid x_i, \theta) = \frac{1}{1 + \exp(-w^T x_i-b)} $$ Параметры $w$ и $b$ находятся, как решения следующей задачи оптимизации (указаны функционалы с L1 и L2 регуляризацией, с которыми вы познакомились в предыдущих заданиях): L2-regularization: $$ Q(X, y, \theta) = \frac{1}{2} w^T w + C \sum_{i=1}^l \log ( 1 + \exp(-y_i (w^T x_i + b ) ) ) \longrightarrow \min\limits_{w,b} $$ L1-regularization: $$ Q(X, y, \theta) = \sum_{d=1}^D |w_d| + C \sum_{i=1}^l \log ( 1 + \exp(-y_i (w^T x_i + b ) ) ) \longrightarrow \min\limits_{w,b} $$ $C$ - это стандартный гиперпараметр модели, который регулирует то, насколько сильно мы позволяем модели подстраиваться под данные. Предобработка данных Из свойств данной модели следует, что: - все $X$ должны быть числовыми данными (в случае наличия среди них категорий, их требуется некоторым способом преобразовать в вещественные числа) - среди $X$ не должно быть пропущенных значений (т.е. все пропущенные значения перед применением модели следует каким-то образом заполнить) Поэтому базовым этапом в предобработке любого датасета для логистической регрессии будет кодирование категориальных признаков, а так же удаление или интерпретация пропущенных значений (при наличии того или другого). End of explanation """ numeric_cols = ['RFCD.Percentage.1', 'RFCD.Percentage.2', 'RFCD.Percentage.3', 'RFCD.Percentage.4', 'RFCD.Percentage.5', 'SEO.Percentage.1', 'SEO.Percentage.2', 'SEO.Percentage.3', 'SEO.Percentage.4', 'SEO.Percentage.5', 'Year.of.Birth.1', 'Number.of.Successful.Grant.1', 'Number.of.Unsuccessful.Grant.1'] categorical_cols = list(set(X.columns.values.tolist()) - set(numeric_cols)) """ Explanation: Видно, что в датасете есть как числовые, так и категориальные признаки. Получим списки их названий: End of explanation """ data.dropna().shape """ Explanation: Также в нём присутствуют пропущенные значения. Очевидны решением будет исключение всех данных, у которых пропущено хотя бы одно значение. Сделаем это: End of explanation """ def calculate_means(numeric_data): means = np.zeros(numeric_data.shape[1]) for j in range(numeric_data.shape[1]): to_sum = numeric_data.iloc[:,j] indices = np.nonzero(~numeric_data.iloc[:,j].isnull())[0] correction = np.amax(to_sum[indices]) to_sum /= correction for i in indices: means[j] += to_sum[i] means[j] /= indices.size means[j] *= correction return pd.Series(means, numeric_data.columns) # place your code here X_real_zeros = X[numeric_cols].fillna(0) X_real_mean = X[numeric_cols].fillna(calculate_means(X[numeric_cols])) X_cat = X[categorical_cols].fillna('NA').astype(str) """ Explanation: Видно, что тогда мы выбросим почти все данные, и такой метод решения в данном случае не сработает. Пропущенные значения можно так же интерпретировать, для этого существует несколько способов, они различаются для категориальных и вещественных признаков. Для вещественных признаков: - заменить на 0 (данный признак давать вклад в предсказание для данного объекта не будет) - заменить на среднее (каждый пропущенный признак будет давать такой же вклад, как и среднее значение признака на датасете) Для категориальных: - интерпретировать пропущенное значение, как ещё одну категорию (данный способ является самым естественным, так как в случае категорий у нас есть уникальная возможность не потерять информацию о наличии пропущенных значений; обратите внимание, что в случае вещественных признаков данная информация неизбежно теряется) Задание 0. Обработка пропущенных значений. Заполните пропущенные вещественные значения в X нулями и средними по столбцам, назовите полученные датафреймы X_real_zeros и X_real_mean соответственно. Для подсчёта средних используйте описанную ниже функцию calculate_means, которой требуется передать на вход вешественные признаки из исходного датафрейма. Все категориальные признаки в X преобразуйте в строки, пропущенные значения требуется также преобразовать в какие-либо строки, которые не являются категориями (например, 'NA'), полученный датафрейм назовите X_cat. Для объединения выборок здесь и далее в задании рекомендуется использовать функции np.hstack(...) np.vstack(...) End of explanation """ from sklearn.linear_model import LogisticRegression as LR from sklearn.feature_extraction import DictVectorizer as DV categorial_data = pd.DataFrame({'sex': ['male', 'female', 'male', 'female'], 'nationality': ['American', 'European', 'Asian', 'European']}) print('Исходные данные:\n') print(categorial_data) print('Исходные данные:\n') print(categorial_data.T.to_dict().values()) encoder = DV(sparse = False) encoded_data = encoder.fit_transform(categorial_data.T.to_dict().values()) print('\nЗакодированные данные:\n') print(encoded_data) """ Explanation: Преобразование категориальных признаков. В предыдущей ячейке мы разделили наш датасет ещё на две части: в одной присутствуют только вещественные признаки, в другой только категориальные. Это понадобится нам для раздельной последующей обработке этих данных, а так же для сравнения качества работы тех или иных методов. Для использования модели регрессии требуется преобразовать категориальные признаки в вещественные. Рассмотрим основной способ преоборазования категориальных признаков в вещественные: one-hot encoding. Его идея заключается в том, что мы преобразуем категориальный признак при помощи бинарного кода: каждой категории ставим в соответствие набор из нулей и единиц. Посмотрим, как данный метод работает на простом наборе данных. End of explanation """ encoder = DV(sparse = False) X_cat_oh = encoder.fit_transform(X_cat.T.to_dict().values()) """ Explanation: Как видно, в первые три колонки оказалась закодированна информация о стране, а во вторые две - о поле. При этом для совпадающих элементов выборки строки будут полностью совпадать. Также из примера видно, что кодирование признаков сильно увеличивает их количество, но полностью сохраняет информацию, в том числе о наличии пропущенных значений (их наличие просто становится одним из бинарных признаков в преобразованных данных). Теперь применим one-hot encoding к категориальным признакам из исходного датасета. Обратите внимание на общий для всех методов преобработки данных интерфейс. Функция encoder.fit_transform(X) позволяет вычислить необходимые параметры преобразования, впоследствии к новым данным можно уже применять функцию encoder.transform(X) Очень важно применять одинаковое преобразование как к обучающим, так и тестовым данным, потому что в противном случае вы получите непредсказуемые, и, скорее всего, плохие результаты. В частности, если вы отдельно закодируете обучающую и тестовую выборку, то получите вообще говоря разные коды для одних и тех же признаков, и ваше решение работать не будет. Также параметры многих преобразований (например, рассмотренное ниже масштабирование) нельзя вычислять одновременно на данных из обучения и теста, потому что иначе подсчитанные на тесте метрики качества будут давать смещённые оценки на качество работы алгоритма. Кодирование категориальных признаков не считает на обучающей выборке никаких параметров, поэтому его можно применять сразу к всему датасету. End of explanation """ from sklearn.cross_validation import train_test_split (X_train_real_zeros, X_test_real_zeros, y_train, y_test) = train_test_split(X_real_zeros, y, test_size=0.3, random_state=0) (X_train_real_mean, X_test_real_mean) = train_test_split(X_real_mean, test_size=0.3, random_state=0) (X_train_cat_oh, X_test_cat_oh) = train_test_split(X_cat_oh, test_size=0.3, random_state=0) """ Explanation: Для построения метрики качества по результату обучения требуется разделить исходный датасет на обучающую и тестовую выборки. Обращаем внимание на заданный параметр для генератора случайных чисел: random_state. Так как результаты на обучении и тесте будут зависеть от того, как именно вы разделите объекты, то предлагается использовать заранее определённое значение для получение результатов, согласованных с ответами в системе проверки заданий. End of explanation """ from sklearn.linear_model import LogisticRegression from sklearn.grid_search import GridSearchCV from sklearn.metrics import roc_auc_score def plot_scores(optimizer): scores = [[item[0]['C'], item[1], (np.sum((item[2]-item[1])**2)/(item[2].size-1))**0.5] for item in optimizer.grid_scores_] scores = np.array(scores) plt.semilogx(scores[:,0], scores[:,1]) plt.fill_between(scores[:,0], scores[:,1]-scores[:,2], scores[:,1]+scores[:,2], alpha=0.3) plt.show() def write_answer_1(auc_1, auc_2): auc = (auc_1 + auc_2)/2 with open("preprocessing_lr_answer1.txt", "w") as fout: fout.write(str(auc)) param_grid = {'C': [0.01, 0.05, 0.1, 0.5, 1, 5, 10]} cv = 3 # place your code here train_mean = pd.DataFrame(np.hstack((X_train_real_mean.values, X_train_cat_oh))) train_zero = pd.DataFrame(np.hstack((X_train_real_zeros.values, X_train_cat_oh))) test_mean = pd.DataFrame(np.hstack((X_test_real_mean.values, X_test_cat_oh))) test_zero = pd.DataFrame(np.hstack((X_test_real_zeros.values, X_test_cat_oh))) estimator = LogisticRegression() optimizer_mean = GridSearchCV(estimator, param_grid, cv=cv) optimizer_zero = GridSearchCV(estimator, param_grid, cv=cv) optimizer_mean.fit(train_mean,y_train) optimizer_zero.fit(train_zero,y_train) plot_scores(optimizer_mean) print(optimizer_mean.best_params_) score_mean = roc_auc_score(y_test, optimizer_mean.predict_proba(test_mean)[:,1]) plot_scores(optimizer_zero) print(optimizer_zero.best_params_) score_zero = roc_auc_score(y_test, optimizer_zero.predict_proba(test_zero)[:,1]) write_answer_1(score_mean, score_zero) X_train_real_mean print score_mean, score_zero """ Explanation: Описание классов Итак, мы получили первые наборы данных, для которых выполнены оба ограничения логистической регрессии на входные данные. Обучим на них регрессию, используя имеющийся в библиотеке sklearn функционал по подбору гиперпараметров модели optimizer = GridSearchCV(estimator, param_grid) где: - estimator - обучающий алгоритм, для которого будет производиться подбор параметров - param_grid - словарь параметров, ключами которого являются строки-названия, которые передаются алгоритму estimator, а значения - набор параметров для перебора Данный класс выполняет кросс-валидацию обучающей выборки для каждого набора параметров и находит те, на которых алгоритм работает лучше всего. Этот метод позволяет настраивать гиперпараметры по обучающей выборке, избегая переобучения. Некоторые опциональные параметры вызова данного класса, которые нам понадобятся: - scoring - функционал качества, максимум которого ищется кросс валидацией, по умолчанию используется функция score() класса esimator - n_jobs - позволяет ускорить кросс-валидацию, выполняя её параллельно, число определяет количество одновременно запущенных задач - cv - количество фолдов, на которые разбивается выборка при кросс-валидации После инициализации класса GridSearchCV, процесс подбора параметров запускается следующим методом: optimizer.fit(X, y) На выходе для получения предсказаний можно пользоваться функцией optimizer.predict(X) для меток или optimizer.predict_proba(X) для вероятностей (в случае использования логистической регрессии). Также можно напрямую получить оптимальный класс estimator и оптимальные параметры, так как они является атрибутами класса GridSearchCV: - best_estimator_ - лучший алгоритм - best_params_ - лучший набор параметров Класс логистической регрессии выглядит следующим образом: estimator = LogisticRegression(penalty) где penalty принимает либо значение 'l2', либо 'l1'. По умолчанию устанавливается значение 'l2', и везде в задании, если об этом не оговорено особо, предполагается использование логистической регрессии с L2-регуляризацией. Задание 1. Сравнение способов заполнения вещественных пропущенных значений. Составьте две обучающие выборки из вещественных и категориальных признаков: в одной вещественные признаки, где пропущенные значения заполнены нулями, в другой - средними. Рекомендуется записывать в выборки сначала вещественные, а потом категориальные признаки. Обучите на них логистическую регрессию, подбирая параметры из заданной сетки param_grid по методу кросс-валидации с числом фолдов cv=3. В качестве оптимизируемой функции используйте заданную по умолчанию. Постройте два графика оценок точности +- их стандратного отклонения в зависимости от гиперпараметра и убедитесь, что вы действительно нашли её максимум. Также обратите внимание на большую дисперсию получаемых оценок (уменьшить её можно увеличением числа фолдов cv). Получите две метрики качества AUC ROC на тестовой выборке и сравните их между собой. Какой способ заполнения пропущенных вещественных значений работает лучше? В дальнейшем для выполнения задания в качестве вещественных признаков используйте ту выборку, которая даёт лучшее качество на тесте. Передайте два значения AUC ROC (сначала для выборки, заполненной средними, потом для выборки, заполненной нулями) в функцию write_answer_1 и запустите её. Полученный файл является ответом на 1 задание. Информация для интересующихся: вообще говоря, не вполне логично оптимизировать на кросс-валидации заданный по умолчанию в классе логистической регрессии функционал accuracy, а измерять на тесте AUC ROC, но это, как и ограничение размера выборки, сделано для ускорения работы процесса кросс-валидации. End of explanation """ from pandas.tools.plotting import scatter_matrix data_numeric = pd.DataFrame(X_train_real_zeros, columns=numeric_cols) list_cols = ['Number.of.Successful.Grant.1', 'SEO.Percentage.2', 'Year.of.Birth.1'] scatter_matrix(data_numeric[list_cols], alpha=0.5, figsize=(10, 10)) plt.show() """ Explanation: Масштабирование вещественных признаков. Попробуем как-то улучшить качество классификации. Для этого посмотрим на сами данные: End of explanation """ from sklearn.preprocessing import StandardScaler # place your code here scaler = StandardScaler() X_train_real_scaled = scaler.fit_transform(X_train_real_mean) X_test_real_scaled = scaler.fit_transform(X_test_real_mean) """ Explanation: Как видно из графиков, разные признаки очень сильно отличаются друг от друга по модулю значений (обратите внимание на диапазоны значений осей x и y). В случае обычной регрессии это никак не влияет на качество обучаемой модели, т.к. у меньших по модулю признаков будут большие веса, но при использовании регуляризации, которая штрафует модель за большие веса, регрессия, как правило, начинает работать хуже. В таких случаях всегда рекомендуется делать стандартизацию (масштабирование) признаков, для того чтобы они меньше отличались друг друга по модулю, но при этом не нарушались никакие другие свойства признакового пространства. При этом даже если итоговое качество модели на тесте уменьшается, это повышает её интерпретабельность, потому что новые веса имеют смысл "значимости" данного признака для итоговой классификации. Стандартизация осуществляется посредством вычета из каждого признака среднего значения и нормировки на выборочное стандартное отклонение: $$ x^{scaled}{id} = \dfrac{x{id} - \mu_d}{\sigma_d}, \quad \mu_d = \frac{1}{N} \sum_{i=1}^l x_{id}, \quad \sigma_d = \sqrt{\frac{1}{N-1} \sum_{i=1}^l (x_{id} - \mu_d)^2} $$ Задание 1.5. Масштабирование вещественных признаков. По аналогии с вызовом one-hot encoder примените масштабирование вещественных признаков для обучающих и тестовых выборок X_train_real_zeros и X_test_real_zeros, используя класс StandardScaler и методы StandardScaler.fit_transform(...) StandardScaler.transform(...) Сохраните ответ в переменные X_train_real_scaled и X_test_real_scaled соответственно End of explanation """ data_numeric_scaled = pd.DataFrame(X_train_real_scaled, columns=numeric_cols) list_cols = ['Number.of.Successful.Grant.1', 'SEO.Percentage.2', 'Year.of.Birth.1'] scatter_matrix(data_numeric_scaled[list_cols], alpha=0.5, figsize=(10, 10)) plt.show() """ Explanation: Сравнение признаковых пространств. Построим такие же графики для преобразованных данных: End of explanation """ def write_answer_2(auc): with open("preprocessing_lr_answer2.txt", "w") as fout: fout.write(str(auc)) # place your code here train_mean = pd.DataFrame(np.hstack((X_train_real_scaled, X_train_cat_oh))) test_mean = pd.DataFrame(np.hstack((X_test_real_scaled, X_test_cat_oh))) estimator = LogisticRegression() optimizer_mean = GridSearchCV(estimator, param_grid, cv=cv) optimizer_mean.fit(train_mean,y_train) plot_scores(optimizer_mean) print(optimizer_mean.best_params_) score_mean = roc_auc_score(y_test, optimizer_mean.predict_proba(test_mean)[:,1]) write_answer_2(score_mean) score_mean """ Explanation: Как видно из графиков, мы не поменяли свойства признакового пространства: гистограммы распределений значений признаков, как и их scatter-plots, выглядят так же, как и до нормировки, но при этом все значения теперь находятся примерно в одном диапазоне, тем самым повышая интерпретабельность результатов, а также лучше сочетаясь с идеологией регуляризации. Задание 2. Сравнение качества классификации до и после масштабирования вещественных признаков. Обучите ещё раз регрессию и гиперпараметры на новых признаках, объединив их с закодированными категориальными. Проверьте, был ли найден оптимум accuracy по гиперпараметрам во время кроссвалидации. Получите значение ROC AUC на тестовой выборке, сравните с лучшим результатом, полученными ранее. Запишите полученный ответ в файл при помощи функции write_answer_2. End of explanation """ np.random.seed(0) """Сэмплируем данные из первой гауссианы""" data_0 = np.random.multivariate_normal([0,0], [[0.5,0],[0,0.5]], size=40) """И из второй""" data_1 = np.random.multivariate_normal([0,1], [[0.5,0],[0,0.5]], size=40) """На обучение берём 20 объектов из первого класса и 10 из второго""" example_data_train = np.vstack([data_0[:20,:], data_1[:10,:]]) example_labels_train = np.concatenate([np.zeros((20)), np.ones((10))]) """На тест - 20 из первого и 30 из второго""" example_data_test = np.vstack([data_0[20:,:], data_1[10:,:]]) example_labels_test = np.concatenate([np.zeros((20)), np.ones((30))]) """Задаём координатную сетку, на которой будем вычислять область классификации""" xx, yy = np.meshgrid(np.arange(-3, 3, 0.02), np.arange(-3, 3, 0.02)) """Обучаем регрессию без балансировки по классам""" optimizer = GridSearchCV(LogisticRegression(), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train, example_labels_train) """Строим предсказания регрессии для сетки""" Z = optimizer.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') """Считаем AUC""" auc_wo_class_weights = roc_auc_score(example_labels_test, optimizer.predict_proba(example_data_test)[:,1]) plt.title('Without class weights') plt.show() print('AUC: %f'%auc_wo_class_weights) """Для второй регрессии в LogisticRegression передаём параметр class_weight='balanced'""" optimizer = GridSearchCV(LogisticRegression(class_weight='balanced') , param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train, example_labels_train) Z = optimizer.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') auc_w_class_weights = roc_auc_score(example_labels_test, optimizer.predict_proba(example_data_test)[:,1]) plt.title('With class weights') plt.show() print('AUC: %f'%auc_w_class_weights) """ Explanation: Балансировка классов. Алгоритмы классификации могут быть очень чувствительны к несбалансированным классам. Рассмотрим пример с выборками, сэмплированными из двух гауссиан. Их мат. ожидания и матрицы ковариации заданы так, что истинная разделяющая поверхность должна проходить параллельно оси x. Поместим в обучающую выборку 20 объектов, сэмплированных из 1-й гауссианы, и 10 объектов из 2-й. После этого обучим на них линейную регрессию, и построим на графиках объекты и области классификации. End of explanation """ print(np.sum(y_train==0)) print(np.sum(y_train==1)) """ Explanation: Как видно, во втором случае классификатор находит разделяющую поверхность, которая ближе к истинной, т.е. меньше переобучается. Поэтому на сбалансированность классов в обучающей выборке всегда следует обращать внимание. Посмотрим, сбалансированны ли классы в нашей обучающей выборке: End of explanation """ def write_answer_3(auc_1, auc_2): auc = (auc_1 + auc_2) / 2 with open("preprocessing_lr_answer3.txt", "w") as fout: fout.write(str(auc)) estimator = LogisticRegression(class_weight='balanced') X_train = pd.DataFrame(train_mean) optimizer_mean = GridSearchCV(estimator, param_grid, cv=cv) optimizer_mean.fit(X_train,y_train) auc_1 = roc_auc_score(y_test, optimizer_mean.predict_proba(test_mean)[:,1]) print auc_1 np.random.seed(0) number_to_add = np.sum(y_train == 0) - np.sum(y_train == 1) indices_to_add = np.random.randint(np.sum(y_train == 1), size = number_to_add) #y_t = pd.DataFrame(y_train) #X_train[y_train.as_matrix() == 1,:] #indices_to_add.shape X_train_to_add = X_train.iloc[indices_to_add,:] y_train_to_add = y_train.iloc[indices_to_add] X_train = pd.concat([X_train, X_train_to_add]) y_train = pd.concat([y_train, y_train_to_add]) optimizer_mean = GridSearchCV(estimator, param_grid, cv=cv) optimizer_mean.fit(X_train,y_train) auc_2 = roc_auc_score(y_test, optimizer_mean.predict_proba(test_mean)[:,1]) print auc_2 write_answer_3(auc_1, auc_2) """ Explanation: Видно, что нет. Исправить ситуацию можно разными способами, мы рассмотрим два: - давать объектам миноритарного класса больший вес при обучении классификатора (рассмотрен в примере выше) - досэмплировать объекты миноритарного класса, пока число объектов в обоих классах не сравняется Задание 3. Балансировка классов. Обучите логистическую регрессию и гиперпараметры с балансировкой классов, используя веса (параметр class_weight='balanced' регрессии) на отмасштабированных выборках, полученных в предыдущем задании. Убедитесь, что вы нашли максимум accuracy по гиперпараметрам. Получите метрику ROC AUC на тестовой выборке. Сбалансируйте выборку, досэмплировав в неё объекты из меньшего класса. Для получения индексов объектов, которые требуется добавить в обучающую выборку, используйте следующую комбинацию вызовов функций: np.random.seed(0) indices_to_add = np.random.randint(...) X_train_to_add = X_train[y_train.as_matrix() == 1,:][indices_to_add,:] После этого добавьте эти объекты в начало или конец обучающей выборки. Дополните соответствующим образом вектор ответов. Получите метрику ROC AUC на тестовой выборке, сравните с предыдущим результатом. Внесите ответы в выходной файл при помощи функции write_asnwer_3, передав в неё сначала ROC AUC для балансировки весами, а потом балансировки выборки вручную. End of explanation """ print('AUC ROC for classifier without weighted classes', auc_wo_class_weights) print('AUC ROC for classifier with weighted classes: ', auc_w_class_weights) """ Explanation: Стратификация выборок. Рассмотрим ещё раз пример с выборками из нормальных распределений. Посмотрим ещё раз на качество классификаторов, получаемое на тестовых выборках: End of explanation """ """Разделим данные по классам поровну между обучающей и тестовой выборками""" example_data_train = np.vstack([data_0[:20,:], data_1[:20,:]]) example_labels_train = np.concatenate([np.zeros((20)), np.ones((20))]) example_data_test = np.vstack([data_0[20:,:], data_1[20:,:]]) example_labels_test = np.concatenate([np.zeros((20)), np.ones((20))]) """Обучим классификатор""" optimizer = GridSearchCV(LogisticRegression(class_weight='balanced'), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train, example_labels_train) Z = optimizer.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') auc_stratified = roc_auc_score(example_labels_test, optimizer.predict_proba(example_data_test)[:,1]) plt.title('With class weights') plt.show() print('AUC ROC for stratified samples: ', auc_stratified) """ Explanation: Насколько эти цифры реально отражают качество работы алгоритма, если учесть, что тестовая выборка так же несбалансирована, как обучающая? При этом мы уже знаем, что алгоритм логистический регрессии чувствителен к балансировке классов в обучающей выборке, т.е. в данном случае на тесте он будет давать заведомо заниженные результаты. Метрика классификатора на тесте имела бы гораздо больший смысл, если бы объекты были разделы в выборках поровну: по 20 из каждого класса на обучени и на тесте. Переформируем выборки и подсчитаем новые ошибки: End of explanation """ def write_answer_4(auc): with open("preprocessing_lr_answer4.txt", "w") as fout: fout.write(str(auc)) # place your code here (X_train_real_zeros, X_test_real_zeros, y_train, y_test) = train_test_split(X_real_zeros, y, test_size=0.3, stratify=y, random_state=0) (X_train_cat_oh, X_test_cat_oh) = train_test_split(X_cat_oh, test_size=0.3, stratify=y, random_state=0) X_train_real_scaled = scaler.fit_transform(X_train_real_zeros) X_test_real_scaled = scaler.fit_transform(X_test_real_zeros) train_zero = pd.DataFrame(np.hstack((X_train_real_scaled, X_train_cat_oh))) test_zero = pd.DataFrame(np.hstack((X_test_real_scaled, X_test_cat_oh))) estimator = LogisticRegression(class_weight='balanced') optimizer_zero = GridSearchCV(estimator, param_grid, cv=cv) optimizer_zero.fit(train_zero,y_train) score_zero = roc_auc_score(y_test, optimizer_zero.predict_proba(test_zero)[:,1]) write_answer_4(score_zero) """ Explanation: Как видно, после данной процедуры ответ классификатора изменился незначительно, а вот качество увеличилось. При этом, в зависимости от того, как вы разбили изначально данные на обучение и тест, после сбалансированного разделения выборок итоговая метрика на тесте может как увеличиться, так и уменьшиться, но доверять ей можно значительно больше, т.к. она построена с учётом специфики работы классификатора. Данный подход является частным случаем т.н. метода стратификации. Задание 4. Стратификация выборки. По аналогии с тем, как это было сделано в начале задания, разбейте выборки X_real_zeros и X_cat_oh на обучение и тест, передавая в функцию train_test_split(...) дополнительно параметр stratify=y Также обязательно передайте в функцию переменную random_state=0. Выполните масштабирование новых вещественных выборок, обучите классификатор и его гиперпараметры при помощи метода кросс-валидации, делая поправку на несбалансированные классы при помощи весов. Убедитесь в том, что нашли оптимум accuracy по гиперпараметрам. Оцените качество классификатора метрике AUC ROC на тестовой выборке. Полученный ответ передайте функции write_answer_4 End of explanation """ from sklearn.preprocessing import PolynomialFeatures """Инициализируем класс, который выполняет преобразование""" transform = PolynomialFeatures(2) """Обучаем преобразование на обучающей выборке, применяем его к тестовой""" example_data_train_poly = transform.fit_transform(example_data_train) example_data_test_poly = transform.transform(example_data_test) """Обращаем внимание на параметр fit_intercept=False""" optimizer = GridSearchCV(LogisticRegression(class_weight='balanced', fit_intercept=False), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train_poly, example_labels_train) Z = optimizer.predict(transform.transform(np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') plt.title('With class weights') plt.show() """ Explanation: Теперь вы разобрались с основными этапами предобработки данных для линейных классификаторов. Напомним основные этапы: - обработка пропущенных значений - обработка категориальных признаков - стратификация - балансировка классов - масштабирование Данные действия с данными рекомендуется проводить всякий раз, когда вы планируете использовать линейные методы. Рекомендация по выполнению многих из этих пунктов справедлива и для других методов машинного обучения. Трансформация признаков. Теперь рассмотрим способы преобразования признаков. Существует достаточно много различных способов трансформации признаков, которые позволяют при помощи линейных методов получать более сложные разделяющие поверхности. Самым базовым является полиномиальное преобразование признаков. Его идея заключается в том, что помимо самих признаков вы дополнительно включаете набор все полиномы степени $p$, которые можно из них построить. Для случая $p=2$ преобразование выглядит следующим образом: $$ \phi(x_i) = [x_{i,1}^2, ..., x_{i,D}^2, x_{i,1}x_{i,2}, ..., x_{i,D}, x_{i,D-1}, x_{i,1}, ..., x_{i,D}, 1] $$ Рассмотрим принцип работы данных признаков на данных, сэмплированных их гауссиан: End of explanation """ print(example_data_train_poly.shape) """ Explanation: Видно, что данный метод преобразования данных уже позволяет строить нелинейные разделяющие поверхности, которые могут более тонко подстраиваться под данные и находить более сложные зависимости. Число признаков в новой модели: End of explanation """ transform = PolynomialFeatures(11) example_data_train_poly = transform.fit_transform(example_data_train) example_data_test_poly = transform.transform(example_data_test) optimizer = GridSearchCV(LogisticRegression(class_weight='balanced', fit_intercept=False), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train_poly, example_labels_train) Z = optimizer.predict(transform.transform(np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') plt.title('Corrected class weights') plt.show() """ Explanation: Но при этом одновременно данный метод способствует более сильной способности модели к переобучению из-за быстрого роста числа признаком с увеличением степени $p$. Рассмотрим пример с $p=11$: End of explanation """ print(example_data_train_poly.shape) """ Explanation: Количество признаков в данной модели: End of explanation """ def write_answer_5(auc): with open("preprocessing_lr_answer5.txt", "w") as fout: fout.write(str(auc)) # place your code here transform = PolynomialFeatures(2) X_train_real_zeros_poly = transform.fit_transform(X_train_real_zeros) X_test_real_zeros_poly = transform.transform(X_test_real_zeros) X_train_real_scaled = scaler.fit_transform(X_train_real_zeros_poly) X_test_real_scaled = scaler.fit_transform(X_test_real_zeros_poly) train_zero = pd.DataFrame(np.hstack((X_train_real_scaled, X_train_cat_oh))) test_zero = pd.DataFrame(np.hstack((X_test_real_scaled, X_test_cat_oh))) estimator = LogisticRegression(class_weight='balanced', fit_intercept=False) optimizer_zero = GridSearchCV(estimator, param_grid, cv=cv) optimizer_zero.fit(train_zero,y_train) score_zero = roc_auc_score(y_test, optimizer_zero.predict_proba(test_zero)[:,1]) print score_zero write_answer_5(score_zero) """ Explanation: Задание 5. Трансформация вещественных признаков. Реализуйте по аналогии с примером преобразование вещественных признаков модели при помощи полиномиальных признаков степени 2 Постройте логистическую регрессию на новых данных, одновременно подобрав оптимальные гиперпараметры. Обращаем внимание, что в преобразованных признаках уже присутствует столбец, все значения которого равны 1, поэтому обучать дополнительно значение $b$ не нужно, его функцию выполняет один из весов $w$. В связи с этим во избежание линейной зависимости в датасете, в вызов класса логистической регрессии требуется передавать параметр fit_intercept=False. Для обучения используйте стратифицированные выборки с балансировкой классов при помощи весов, преобразованные признаки требуется заново отмасштабировать. Получите AUC ROC на тесте и сравните данный результат с использованием обычных признаков. Передайте полученный ответ в функцию write_answer_5. End of explanation """ def write_answer_6(features): with open("preprocessing_lr_answer6.txt", "w") as fout: fout.write(" ".join([str(num) for num in features])) # place your code here X_train_real_scaled = scaler.fit_transform(X_train_real_zeros) X_test_real_scaled = scaler.fit_transform(X_test_real_zeros) train_zero = pd.DataFrame(np.hstack((X_train_real_scaled, X_train_cat_oh))) test_zero = pd.DataFrame(np.hstack((X_test_real_scaled, X_test_cat_oh))) estimator = LogisticRegression(class_weight='balanced', penalty='l1') optimizer_zero = GridSearchCV(estimator, param_grid, cv=cv) optimizer_zero.fit(train_zero,y_train) score_zero = roc_auc_score(y_test, optimizer_zero.predict_proba(test_zero)[:,1]) print score_zero write_answer_6(score_zero) zero_features = [x for x in np.where(optimizer_zero.best_estimator_.coef_[0] == 0)[0] if x < 14] print zero_features write_answer_6(zero_features) """ Explanation: Регрессия Lasso. К логистической регрессии также можно применить L1-регуляризацию (Lasso), вместо регуляризации L2, которая будет приводить к отбору признаков. Вам предлагается применить L1-регуляцию к исходным признакам и проинтерпретировать полученные результаты (применение отбора признаков к полиномиальным так же можно успешно применять, но в нём уже будет отсутствовать компонента интерпретации, т.к. смысловое значение оригинальных признаков известно, а полиномиальных - уже может быть достаточно нетривиально). Для вызова логистической регрессии с L1-регуляризацией достаточно передать параметр penalty='l1' в инициализацию класса. Задание 6. Отбор признаков при помощи регрессии Lasso. Обучите регрессию Lasso на стратифицированных отмасштабированных выборках, используя балансировку классов при помощи весов. Получите ROC AUC регрессии, сравните его с предыдущими результатами. Найдите номера вещественных признаков, которые имеют нулевые веса в итоговой модели. Передайте их список функции write_answer_6. End of explanation """
harshays/papers
graph_matching/graph_matching_notes.ipynb
mit
from IPython.display import IFrame IFrame("./projection_onto_bistochastic_matrices.pdf", width=800, height=500) """ Explanation: Notes Permuation matrices and graphs $P$ obtained by permuting rows of an identity matrix. $N!$ possile permutations possible of an identity matrix. $PA$ permutes the $i^{th}$ row of A to $\pi(i^{th})$ row of $PA$. $AP$ moves the $i^{th}$ column of $A$ to $\pi(i^{th})$ column of $AP$. $PP^T = P^TP = I$ so $P^T = P^{-1}$ $Me_j$ selects the $j^{th}$ column of $M$. $e^T_iM$ selects the $i^{th}$ row of $M$. $e^T_iMe_j$ selects the $i^{th}$ row of $j^{th}$ column, which is equal to $M_{ij}$ let $A_1$ and $A_2$ be the adjacency matrices of two isomorphic graphs with permutation $\pi_A$. Edge $(i,j)$ in $A_1$ corresponds to $(\pi_A(i),\pi_A(j))$ in $A_2$, so $$(A_2){(\pi_A(i),\pi_A(j))} = e^T_iA_1e_j$$ $$(Pe_i)^TA_1(Pe_j) = e^T{i}A_1e_{j}$$ more generally, $A_2 = PA_1P^T$, which is equivalent to $A_2P=PA_1$ Projection onto Bistochastic Matrices End of explanation """ IFrame("./wip/bounding_erdos_renyi/main.pdf", width=800, height=500) """ Explanation: Matrix Concentration Inequalities End of explanation """ import numpy as np import igraph as ig import matplotlib.pyplot as plt %matplotlib inline def get_graph(n, m): num_edges = int(round(n*m)) g = ig.Graph.Erdos_Renyi(n, m=num_edges) p = ig.RainbowPalette(num_edges) g.es['color'] = [p.get(idx) for idx in xrange(num_edges)] return g def get_ones_graph(n): J = np.ones((n,n)) return ig.Graph.Adjacency(J.tolist(), mode=ig.ADJ_UNDIRECTED) def adj_mat(g): return np.matrix(g.get_adjacency().data) def get_kronecker_graph(g1, g2, graph_first=True): # setup graph = g1 if graph_first else g2 ones = g2 if graph_first else g1 p = len(g2.vs) # map colors to idx eid_color_map = {} for e, col in zip(graph.es, graph.es['color']): eid_color_map[(e.source, e.target)] = eid_color_map[(e.target, e.source)] = col # kron ak = np.kron(adj_mat(g1), adj_mat(g2)) gk = ig.Graph.Adjacency(ak.tolist(), mode=ig.ADJ_UNDIRECTED) # map kron edge to color for edge in gk.es: i, j = edge.source, edge.target if graph_first: gi, gj = (i)//p, (j)//p else: gi, gj = i % p, j % p edge['color'] = eid_color_map[(gi, gj)] return gk def plot_graph(graph, **kw2): kw = dict(bbox=(150,150), vertex_size=7, vertex_color='gray', edge_width=1) if 'color' in graph.es.attributes(): kw['edge_color'] =graph.es['color'] kw.update(kw2) return ig.plot(graph, **kw) G2 = get_graph(2,.5) G3 = get_graph(3,1) J2 = get_ones_graph(2) J3 = get_ones_graph(3) J = lambda p: get_ones_graph(p) plot_graph(G2) # G2 kron J2 gk = get_kronecker_graph(G2, J2) print (adj_mat(gk)) plot_graph(gk) # G2 kron J3 gk = get_kronecker_graph(G2, J(3)) print (adj_mat(gk)) plot_graph(gk) # J2 kron G2 plot_graph(get_kronecker_graph(J2, G2, graph_first=False)) plot_graph(G3) # G3 kron J2 G3J2 = get_kronecker_graph(G3, J(2)) plot_graph(G3J2) # J2 kron G3 J2G3 = get_kronecker_graph(J2, G3, graph_first=False) plot_graph(J2G3) """ Explanation: Visualizing Erdos Renyi Kronecker Products End of explanation """
gschivley/ERCOT_power
Raw Data/ERCOT/Hourly wind generation/Exploring hourly wind data.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns fn2009 = 'rpt.00013424.0000000000000000.20141016.182537070.ERCOT_2009_Hourly_Wind_Output.xls' fn2015 = 'rpt.00013424.0000000000000000.ERCOT_2015_Hourly_Wind_Output.xlsx' df_2009 = pd.read_excel(fn2009, index_col=0, sn='2009') df_2015 = pd.read_excel(fn2015, index_col=0, sn='2015') df_2009.head() df_2009['hour'] = df_2009.index.hour df_2009['year'] = df_2009.index.year df_2015.head() df_2015['hour'] = df_2015.index.hour df_2015['year'] = df_2015.index.year df = """ Explanation: Hourly wind generation Looks like the column headings changed from year to year. We'll have to fix this. End of explanation """ sns.factorplot('hour', '% Installed Wind Capacity', data=df_2009, aspect=1.5) plt.title('2009 Wind Capacity by Hour of Day') sns.factorplot('hour', 'Wind Output, % of Installed', data=df_2015, aspect=1.5) plt.title('2015 Wind Capacity by Hour of Day') """ Explanation: Wind capacity factor The figures below show average CF of installed wind in 2009 and 2015. Looks like they got ~5 percentage points up over that time. End of explanation """ sns.factorplot('hour', 'Wind % of ERCOT Load', data=df_2009, aspect=1.5) plt.title('2009 Wind as fraction of total load') sns.factorplot('hour', 'Wind Output, % of Load', data=df_2015, aspect=1.5) plt.title('2015 Wind as fraction of total load') """ Explanation: Wind as % of total ERCOT load The figures below show average values of wind as a percent of total ERCOT load by hour in 2009 and 2015. This is a much bigger difference - 2015 values are about double 2009 values. End of explanation """ import glob files = glob.glob('*.xls') files.extend(glob.glob('*.xlsx')) # df = pd.DataFrame() # for fn in files: # temp_df = pd.read_excel(fn, sn='numbers', index_col=0) # if temp_df.index.values[0] != pd.Timestamp: # print fn # try: # df = pd.concat[] df = pd.concat([pd.read_excel(fn, sn='numbers', index_col=0) for fn in files]) df.sort_index(inplace=True) df.head() df.tail() df = df.iloc[:-1,:] df.tail() cols = df_2009.columns[:-2] cols df_final = df.loc[:,cols] df_final.head() df_final.dtypes df_final.plot(y='Total Wind Installed, MW', use_index=True) sns.jointplot('ERCOT Load, MW', 'Total Wind Output, MW', data=df_final, kind='hex') """ Explanation: Try reading in all .xls and .xlsx files End of explanation """ import os filename = 'ERCOT wind data.csv' path = '../../../Clean Data' fullpath = os.path.join(path, filename) df_final.to_csv(fullpath) """ Explanation: Export the clean ERCOT wind and load data End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/csiro-bom/cmip6/models/sandbox-1/seaice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-1', 'seaice') """ Explanation: ES-DOC CMIP6 Model Properties - Seaice MIP Era: CMIP6 Institute: CSIRO-BOM Source ID: SANDBOX-1 Topic: Seaice Sub-Topics: Dynamics, Thermodynamics, Radiative Processes. Properties: 80 (63 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:56 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Model 2. Key Properties --&gt; Variables 3. Key Properties --&gt; Seawater Properties 4. Key Properties --&gt; Resolution 5. Key Properties --&gt; Tuning Applied 6. Key Properties --&gt; Key Parameter Values 7. Key Properties --&gt; Assumptions 8. Key Properties --&gt; Conservation 9. Grid --&gt; Discretisation --&gt; Horizontal 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Seaice Categories 12. Grid --&gt; Snow On Seaice 13. Dynamics 14. Thermodynamics --&gt; Energy 15. Thermodynamics --&gt; Mass 16. Thermodynamics --&gt; Salt 17. Thermodynamics --&gt; Salt --&gt; Mass Transport 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics 19. Thermodynamics --&gt; Ice Thickness Distribution 20. Thermodynamics --&gt; Ice Floe Size Distribution 21. Thermodynamics --&gt; Melt Ponds 22. Thermodynamics --&gt; Snow Processes 23. Radiative Processes 1. Key Properties --&gt; Model Name of seaice model used. 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of sea ice model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.variables.prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea ice temperature" # "Sea ice concentration" # "Sea ice thickness" # "Sea ice volume per grid cell area" # "Sea ice u-velocity" # "Sea ice v-velocity" # "Sea ice enthalpy" # "Internal ice stress" # "Salinity" # "Snow temperature" # "Snow depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Variables List of prognostic variable in the sea ice model. 2.1. Prognostic Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the sea ice component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS-10" # "Constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Seawater Properties Properties of seawater relevant to sea ice 3.1. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Ocean Freezing Point Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant seawater freezing point, specify this value. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Resolution Resolution of the sea ice grid 4.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning applied to sea ice model component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Target Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Simulations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 *Which simulations had tuning applied, e.g. all, not historical, only pi-control? * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Metrics Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any observed metrics used in tuning model/parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.5. Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Which variables were changed during the tuning process? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ice strength (P*) in units of N m{-2}" # "Snow conductivity (ks) in units of W m{-1} K{-1} " # "Minimum thickness of ice created in leads (h0) in units of m" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Key Parameter Values Values of key parameters 6.1. Typical Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N What values were specificed for the following parameters if used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Additional Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.description') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Assumptions Assumptions made in the sea ice model 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General overview description of any key assumptions made in this model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. On Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Missing Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the sea ice component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Provide a general description of conservation methodology. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.properties') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Mass" # "Salt" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Properties Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in sea ice by the numerical schemes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.4. Was Flux Correction Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does conservation involved flux correction? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Corrected Conserved Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Ocean grid" # "Atmosphere Grid" # "Own Grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Grid --&gt; Discretisation --&gt; Horizontal Sea ice discretisation in the horizontal 9.1. Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Grid on which sea ice is horizontal discretised? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Structured grid" # "Unstructured grid" # "Adaptive grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the type of sea ice grid? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite differences" # "Finite elements" # "Finite volumes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the advection scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Thermodynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model thermodynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.5. Dynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model dynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.6. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional horizontal discretisation details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Zero-layer" # "Two-layers" # "Multi-layers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Sea ice vertical properties 10.1. Layering Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 10.2. Number Of Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using multi-layers specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional vertical grid details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Seaice Categories What method is used to represent sea ice categories ? 11.1. Has Mulitple Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Set to true if the sea ice model has multiple sea ice categories. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Number Of Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Category Limits Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify each of the category limits. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Ice Thickness Distribution Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the sea ice thickness distribution scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.other') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Other Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Grid --&gt; Snow On Seaice Snow on sea ice details 12.1. Has Snow On Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is snow on ice represented in this model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 12.2. Number Of Snow Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels of snow on ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Snow Fraction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how the snow fraction on sea ice is determined End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.4. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional details related to snow on ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.horizontal_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamics Sea Ice Dynamics 13.1. Horizontal Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of horizontal advection of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Transport In Thickness Space Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice transport in thickness space (i.e. in thickness categories)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Hibler 1979" # "Rothrock 1975" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Ice Strength Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which method of sea ice strength formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.redistribution') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Rafting" # "Ridging" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which processes can redistribute sea ice (including thickness)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.rheology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Free-drift" # "Mohr-Coloumb" # "Visco-plastic" # "Elastic-visco-plastic" # "Elastic-anisotropic-plastic" # "Granular" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Rheology Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Rheology, what is the ice deformation formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice latent heat (Semtner 0-layer)" # "Pure ice latent and sensible heat" # "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)" # "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Thermodynamics --&gt; Energy Processes related to energy in sea ice thermodynamics 14.1. Enthalpy Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the energy formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice" # "Saline ice" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Thermal Conductivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What type of thermal conductivity is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Conduction fluxes" # "Conduction and radiation heat fluxes" # "Conduction, radiation and latent heat transport" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.3. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of heat diffusion? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heat Reservoir" # "Thermal Fixed Salinity" # "Thermal Varying Salinity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.4. Basal Heat Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method by which basal ocean heat flux is handled? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.5. Fixed Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.6. Heat Content Of Precipitation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which the heat content of precipitation is handled. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.7. Precipitation Effects On Salinity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Thermodynamics --&gt; Mass Processes related to mass in sea ice thermodynamics 15.1. New Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which new sea ice is formed in open water. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Ice Vertical Growth And Melt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs the vertical growth and melt of sea ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Floe-size dependent (Bitz et al 2001)" # "Virtual thin ice melting (for single-category)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Ice Lateral Melting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice lateral melting? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.4. Ice Surface Sublimation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs sea ice surface sublimation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.5. Frazil Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method of frazil ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16. Thermodynamics --&gt; Salt Processes related to salt in sea ice thermodynamics. 16.1. Has Multiple Sea Ice Salinities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16.2. Sea Ice Salinity Thermal Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does sea ice salinity impact the thermal properties of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Thermodynamics --&gt; Salt --&gt; Mass Transport Mass transport of salt 17.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the mass transport of salt calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 17.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics Salt thermodynamics 18.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the thermodynamic calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 18.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Virtual (enhancement of thermal conductivity, thin ice melting)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Thermodynamics --&gt; Ice Thickness Distribution Ice thickness distribution details. 19.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice thickness distribution represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Parameterised" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Thermodynamics --&gt; Ice Floe Size Distribution Ice floe-size distribution details. 20.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice floe-size represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Please provide further details on any parameterisation of floe-size. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 21. Thermodynamics --&gt; Melt Ponds Characteristics of melt ponds. 21.1. Are Included Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are melt ponds included in the sea ice model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flocco and Feltham (2010)" # "Level-ice melt ponds" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.2. Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What method of melt pond formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Albedo" # "Freshwater" # "Heat" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.3. Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What do melt ponds have an impact on? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22. Thermodynamics --&gt; Snow Processes Thermodynamic processes in snow on sea ice 22.1. Has Snow Aging Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has a snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Snow Aging Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.3. Has Snow Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has snow ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.4. Snow Ice Formation Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow ice formation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.5. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the impact of ridging on snow cover? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Single-layered heat diffusion" # "Multi-layered heat diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.6. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the heat diffusion through snow methodology in sea ice thermodynamics? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Parameterized" # "Multi-band albedo" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Processes Sea Ice Radiative Processes 23.1. Surface Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used to handle surface albedo. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Exponential attenuation" # "Ice radiation transmission per category" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. Ice Radiation Transmission Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method by which solar radiation through sea ice is handled. End of explanation """
wikistat/Exploration
TutosRudim/Cal1-Python-SVDtoACP.ipynb
gpl-3.0
# Construire la matrice de notes import pandas as pd note=[[6,6,5,5.5],[8,8,8,8],[6,7,11,9.5],[14.5,14.5,15.5,15], [14,14,12,12.5],[11,10,5.5,7],[5.5,7,14,11.5],[13,12.5,8.5,9.5], [9,9.5,12.5,12]] dat=pd.DataFrame(note,index=["jean","alai","anni","moni","didi","andr","pier","brig","evel"], columns=["Math","Phys","Fran","Angl"]) dat # Importation des fonctions from sklearn.decomposition import PCA from sklearn.preprocessing import scale import numpy as np """ Explanation: <center> <a href="http://www.insa-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg" style="float:left; max-width: 120px; display: inline" alt="INSA"/></a> <a href="http://wikistat.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/wikistat.jpg" style="float:right; max-width: 250px; display: inline" alt="Wikistat"/></a> </center> <a href="https://www.python.org/"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Python_logo_and_wordmark.svg/390px-Python_logo_and_wordmark.svg.png" style="max-width: 200px; display: inline" alt="Python"/></a> pour Statistique et Science des Données Anayse en Composantes Principales avec <a href="https://www.python.org/"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Python_logo_and_wordmark.svg/390px-Python_logo_and_wordmark.svg.png" style="max-width: 150px; display: inline" alt="Python"/></a> & <a href="http://scikit-learn.org/stable/#"><img src="http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png" style="max-width: 180px; display: inline" alt="Scikit-Learn"/></a> Résumé: Ce calepin introduit l'utilisation de la librairie scikit-learn pour l'exploration statistique. Ceci est illustré par des exemples de mise en oeuvre de l'(ACP sur des données "jouet" puis sur des images élémentaires de caractères et enfin sur des données économiques sous une la forme particulière d'un cube ou tableauà trois indices. 1 Introduction 1.1 Scikit-learn vs. R L'objectif de ce tutoriel est d'introduire l'utilisation de la librairie scikit-learn de Python pour l'exploration de données multidimensionnelles. Seule l'utilisation directe des fonctions d'exploration est abordée. Se pose rapidement une question: quand utiliser scikit-learn de Python plutôt que R plus complet et plus simple d'emploi? Le choix repose sur les points suivants: - Attention cette librairie manipule des objets de classe array de numpy chargés en mémoire et donc de taille limitée par la RAM de l'ordinateur; de façon analogue R charge en RAM des objets de type data.frame. - Attention toujours, scikit-learn (0.18) ne reconnaît pas (ou pas encore ?) la classe DataFrame de pandas; scikit-learn utilise la classe array de numpy. C'est un problème pour la gestion de variables qualitatives complexes. Une variable binaire est simplement remplacée par un codage $(0,1)$ mais, en présence de plusieurs modalités, traiter celles-ci comme des entiers n'a pas de sens statistique et remplacer une variable qualitative par l'ensemble des indicatrices (dummy variables $(0,1)$) de ses modalités complique l'interprétation statistique. - Les implémentations en Python de certains algorithmes dans scikit-learn sont aussi efficaces (e.g. $k$-means), voire beaucoup plus efficaces pour des données volumineuses car utilisent implicitement les capacités de parallélisation. - R offre beaucoup plus de possibilités pour une exploration, des recherches et comparaisons, des interprétations mais les capacités de parallélisation de Python sont nettement plus performantes. Plus précisément, l'introduction de nouvelles librairies n'est pas ou peu contraintes dans R, alors que celle de nouvelles méthodes dans scikit-learn se fait sous contrôle d'un groupe qui en contrôle la pertinence et l'efficacité. En conséquences: - Préférer R et ses libraires si la présentation graphique des résultats et leur interprétation est prioritaire. - Pour l'emploi de méthodes (analyse factorielle discriminante, canonique, positionnement multidimensionnel...) pas codées en Python. - Préférer Python et scikit-learn pour mettre au point une chaîne de traitements (pipe line) opérationnelle de l'extraction à une analyse privilégiant la prévision brute à l'interprétation et pour des données quantitatives ou rendues quantitatives ("vectorisation" de corpus de textes). 1.2 Fonctions de scikit-learn La communauté qui développe cette librairie est très active, elle évolue rapidement. Ne pas hésiter à consulter la documentation pour des compléments. Voici une sélection de ses principales fonctionnalités. - Transformations (standardisation, discrétisation binaire, regroupement de modalités, imputations rudimentaires de données manquantes) , "vectorisation" de corpus de textes (encodage, catalogue, Tf-idf), images. - Exploration: ACP, classification non supervisée (mélanges gaussiens, propagation d'affinité, ascendante hiérarchique, SOM,...). Une fonction est aojutée pour l'Analyse des Correspondances. - Modélisation et apprentissage, voir le dépôt correspondant. 1.3 ACP avec scikit-learn L'objectif est d'illustrer la mise en oeuvre de l'analyse en composantes principales. Consulter la documentation et ses nombreux exemples pour plus de détails sur l'utilisation de scikit-learn. La librairie scikit-learn a principalement été conçu en vue de l'analyse de signaux. Aussi, de nombreuses options de l'ACP ne sont pas disponibles, notamment les graphiques usuels (biplot, cercle des corrélations...). En revanche des résultats sont liés à la version probabiliste de l'ACP sous hypothèse d'une distribution gaussienne multidimensionnelle des données. Attention, l'ACP est évidemment centrée mais par réduite. L'option n'est pas prévue et les variables doivent être réduites (fonction sklearn.preprocessing.scale) avant si c'est nécessaire. L'attribut transform désigne les composantes principales, sous-entendu: transformation par réduction de la dimension; n_components fixe le nombre de composantes retenues, par défaut toutes; l'attribut components_ contient les n_components vecteurs propres mais non normalisés, c'est-à-dire de norme carrée la valeur propre associée, et donc à utiliser pour représenter les variables. D'autres versions d'analyse en composantes principales sont proposées dans Scikit-learn: kernel PCA, sparse PCA, ICA... Plusieurs jeux de données élémentaires sont utilisés donyt celui "jouet" déjà vu en R afin de bien comprendre les sorties proposées par la fonction disponible. L'autre ensemble de données est un problème classique et simplifié de reconnaissance de caractères qui est inclus dans la librairie scikit-learn. 2. ACP de données "jouet" Les données sont celles de l'exemple introduction à l'ACP: les notes en maths, français, physique et anglais de 9 lycéens virtuels. L'objectif est de contrôler les résultats en les comparant avec ceux obtenus avec R. C'est une façon générique de procéder à l'approche d'un nouveau logiciel ou de fonctionnalités inconnues: traiter des donées triviales dont les résultats de l'analyse sont parfaitement maîtrisés. End of explanation """ pca = PCA() pca.fit(dat).explained_variance_ pca.singular_values_ """ Explanation: 2.1 Valeurs propres et valeurs singulières de l'ACP non réduite Attention Les valeurs singulières sont celles de la décomposition de la matrice centrée par rapport aux métriques usuelles: $(\bar{X}, I_p, I_n)$ alors que le diviseur de la variance est celui d'une estimation sans biais: $(n-1)$. Contrairement à beaucoup de logiciels, l'ACP de scikit-learn n'est pas réduite. End of explanation """ pca.singular_values_/np.sqrt(8) """ Explanation: Les valeurs singulières associées à l'ACP sont celles de $(\bar{X}, I_p, \frac{1}{n-1}I_n)$ End of explanation """ (pca.singular_values_/np.sqrt(8))**2 """ Explanation: Pour retrouver les valeurs propres de l'ACP à partir des valeurs singulières de la matrice centrée: End of explanation """ pca.components_.T """ Explanation: 2.2 Vecteurs propres de l'ACP non réduite End of explanation """ pca.transform(dat) """ Explanation: 2.3 Composantes principales de l'ACP non réduite End of explanation """ # Importations import matplotlib.pyplot as plt from sklearn import datasets %matplotlib inline # les données présentes dnas la librairie digits = datasets.load_digits() # Contenu et mode d'obtention print(digits) # Dimensions digits.images.shape # Sous forme d'un cube d'images 1797 x 8x8 print(digits.images) # Sous forme d'une matrice 1797 x 64 print(digits.data) # Label réel de chaque caractère print(digits.target) """ Explanation: Q Comparer avec les résultats obtenus en R. Tous les autres résultats (contributions, cossinus carrés, corrélations variables facteurs...) et surtout les graphes (éboulis, plans factoriels...) sont à construire car aucune fonction n'est disponible comme dans FactoMineR. C'est partièlement fait dans le jeu de données suivant et complété (biplot) dans les calepins plus completes des cas d'usage. 3 Les données "Caractères" Il s'agit d'explorer les données issues de la pixellisation de tracés de caractères dont les procédés d'obtention et prétraitement sont décrits sur le site de l'UCI (Lichman, 2013). Les chiffres ont été saisies sur des tablettes à l'intérieur de cadres de résolution $500\times 500$. Des procédures de normalisation, ré-échantillonnage spatial puis de lissage ont été appliquées. Chaque caractère apparaît finalement discrétisé sous la forme d'une matrice $8\times 8$ de pixels à 16 niveaux de gris et identifié par un label. Les données sont archivées sous la forme d'une matrice ou tableau à trois indices. Elles sont également archivées après vectorisation des images sous la forme d'une matrice à $p=64$ colonnes. L'étude du même type de données, mais nettement plus complexes (MNIST): 60 000 caractères représentés par des images de 784 pixels (26 $\times$ 26) fait l'objet d'un autre calepin. 3.1 Prise en main des données End of explanation """ images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:8]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Chiffres: %i' % label) """ Explanation: Voici un aperçu des empilements des images à décrire puis ensuite en principe à discriminer: End of explanation """ from sklearn.decomposition import PCA X=digits.data y=digits.target target_name=[0,1,2,3,4,5,6,7,8,9] # définition de la commande pca = PCA() # Estimation, calcul des composantes principales C = pca.fit(X).transform(X) # Décroissance de la variance expliquée plt.plot(pca.explained_variance_ratio_) plt.show() """ Explanation: 3.2 Analyse en composantes principales End of explanation """ plt.boxplot(C[:,0:20]) plt.show() """ Explanation: Diagramme boîte des premières composantes principales. End of explanation """ plt.scatter(C[:,0], C[:,1], c=y, label=target_name) plt.show() """ Explanation: Q Quelle dimension retenir en principe? Représentation des caractères dans le premier plan principal. La représentation des variables (pixels) et le biplot n'ont pas grand intérêt pour ces données. End of explanation """ # attention aux indentations plt.figure() for c, i, target_name in zip("rgbcmykrgb",[0,1,2,3,4,5,6,7,8,9], target_name): plt.scatter(C[y == i,0], C[y == i,1], c=c, label=target_name) plt.legend() plt.title("ACP Digits") plt.show() """ Explanation: Le même graphique avec une légende mais moins de couleurs. End of explanation """ from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) ax.scatter(C[:, 0], C[:, 1], C[:, 2], c=y, cmap=plt.cm.Paired) ax.set_title("ACP: trois premieres composantes") ax.set_xlabel("Comp1") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("Comp2") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("Comp3") ax.w_zaxis.set_ticklabels([]) plt.show() """ Explanation: Graphique en trois dimensions. End of explanation """ # Importaiton des principals librairies et # Affichage des graphiques dans le notebook %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: 4. Données "cubiques" de l'OCDE 4.1 Introduction Objectif L'objectif de cette section est l'exploration de données socio-économiques plus complexes. La principale spécificité de ces données est de se présenter sous la forme d'un cube de données ou tableau à trois entrées: le numéro de ligne, le numéro de variable et l'année d'observation de cette variable. Après une description classique, la mise en oeuvre de l'analyse en composantes principales avec python nécessite un effort particulier afin de produire les graphes adaptés à la structure particulière des données. Les données Les données sont issues de l'Observatoire de l'OCDE. Pour chaque pays membre et pour chacune des années 1975, 1977, 1979, 1981, on connaît les valeurs prises par les variables suivantes qui sont toutes des \emph{taux}~: - Taux brut de natalité, - Taux de chômage, - Pourcentage d'actifs dans le secteur primaire, - Pourcentage d'actifs dans le secteur secondaire, - produit intérieur brut (par habitant), - Formation brute de capital fixe (par habitant), - Hausse des prix, - Recettes courantes (par habitant), - Mortalité infantile, - Consommation de protéines animales (par habitant), - Consommation d'énergie (par habitant). Elles sont disponibles dans le fichier: ocdeR.dat. Les mêmes variables sont donc observées, sur les mêmes pays ou individus à quatre dates différentes. Plusieurs stratégies d'analyse sont possibles (tableau moyen, tableaux concaténés, meilleur compromis ou double ACP). La plus adaptée pour ces données est de considérer les observations des variables pour chacun des individus: pays $\times$ années. End of explanation """ ocde=pd.read_table("Data/ocdeR.dat",sep='\s+',index_col=0) ocde.head() """ Explanation: 4. 2 Lecture des données End of explanation """ ocde.mean() ocde["CNRJ"].hist(bins=20) plt.show() from pandas.plotting import scatter_matrix scatter_matrix(ocde, alpha=0.2, figsize=(15, 15), diagonal='kde') plt.show() """ Explanation: 4.3 Statistiques élémentaires Consulter rapidement ces résultats; Que dire à propos de la symétrie des distributions, de leur normalité, des valeurs atypiques. End of explanation """ from sklearn.decomposition import PCA from sklearn.preprocessing import scale # réduction ocdeS=scale(ocde) pca = PCA() cpOcde = pca.fit_transform(ocdeS) # Eboulis plt.plot(pca.explained_variance_ratio_) plt.show() plt.boxplot(cpOcde) plt.show() """ Explanation: Q Quel est le graphique ci-dessous? Que représentent les blocs dagonaux? Que dire des structures de corrélation? 4.3 Analyse en composantes principales Chaque pays étant observé 4 fois, la principale difficulté technique est de faire apparaître cette structure chronologique dans les graphique afin d'illustrer la dynamique économique de la période considérée. Q Justifier la nécessité de réduire. Q Pourqoi toutes les variables sont des taux? Choix de dimension End of explanation """ coord1=pca.components_[0]*np.sqrt(pca.explained_variance_[0]) coord2=pca.components_[1]*np.sqrt(pca.explained_variance_[1]) fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(1, 1, 1) for i, j, nom in zip(coord1,coord2, ocde.columns): plt.text(i, j, nom) plt.arrow(0,0,i,j,color='black') plt.axis((-1.2,1.2,-1.2,1.2)) # cercle c=plt.Circle((0,0), radius=1, color='gray', fill=False) ax.add_patch(c) plt.show() """ Explanation: Q Quel est le graphe ci-dessus. Que dire de la première composante? Quelle dimension choisir? Représentation des variables End of explanation """ plt.figure(figsize=(10,6)) for i, j, nom in zip(cpOcde[:,0], cpOcde[:,1], ocde.index): # color = int(i/4) plt.text(i, j, nom ,color="blue") plt.axis((-5,7,-4,4)) plt.show() """ Explanation: Q Interpréter chacun des deux premiers axes. Exo représenter le plan (2,3) et interpréter le 3ème axe. Représentation basique des individus End of explanation """ import matplotlib.patheffects as PathEffects comp_0 = 0 comp_1 = 1 cmap = plt.get_cmap("tab20") fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(1,1,1) for i,k in enumerate(np.arange(0,cpOcde.shape[0],4)): country =ocde.index[k] xs = cpOcde[k:k+4,comp_0] ys = cpOcde[k:k+4, comp_1] ax.plot(xs,ys, color=cmap(i), marker=".", markersize=15) txt = ax.text(xs[-4], ys[-4], country, horizontalalignment="left", verticalalignment="top", color=cmap(i), fontweight="bold", fontsize=15) # Add black line around text #txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='black')]) ax.set_xlabel("PC%d" %comp_0, fontsize=20) ax.set_ylabel("PC%d" %comp_1, fontsize=20) plt.tight_layout() plt.show() """ Explanation: Représentation adaptée à ces données La structure particulière des données nécessite un graphique adapté. Ceci est en fait le principal objectif d'une bonne exploration des données: trouver la représentation graphique qui permet d'en comprendre toute la structure en une seule vue. End of explanation """
Jackporter415/phys202-2015-work
assignments/assignment06/DisplayEx01.ipynb
mit
from IPython.display import Image from IPython.display import HTML from IPython.display import IFrame assert True # leave this to grade the import statements """ Explanation: Display Exercise 1 Imports Put any needed imports needed to display rich output the following cell: End of explanation """ Image(url = 'http://newsroom.unl.edu/releases/downloadables/photo/20090923solenoid.jpg', width = 600, height = 600) assert True # leave this to grade the image display """ Explanation: Basic rich display Find a Physics related image on the internet and display it in this notebook using the Image object. Load it using the url argument to Image (don't upload the image to this server). Make sure the set the embed flag so the image is embedded in the notebook data. Set the width and height to 600px. End of explanation """ %%html <table> <th>Name </th> <th>Symbol</th> <th>Antiparticle</th> <th>Charge (e)</th> <th>Mass(MeV/$c^2$)</th> </tr> <tr> <td> up </td> <td> u </td> <td> $\bar{u}$ </td> <td> +$\frac{2}{3}$ </td> <td> 0.511 </td> </tr> <tr> <td> down </td> <td> d </td> <td> $\bar{d}$ </td> <td> -$\frac{1}{3}$ </td> <td> 3.5-6.0 </td> </tr> <tr> <td> charm </td> <td> c </td> <td> $\bar{c}$ </td> <td> +$\frac{2}{3}$ </td> <td> 1,160-1,340 </td> </tr> <tr> <td> strange </td> <td> s </td> <td> $\bar{s}$ </td> <td> -$\frac{1}{3}$ </td> <td> 70-130 </td> </tr> <tr> <td> top </td> <td> t </td> <td> $\bar{t}$ </td> <td> +$\frac{2}{3}$ </td> <td> 169,100-173,300 </td> </tr> <tr> <td> bottom </td> <td> b </td> <td> $\bar{b}$ </td> <td> -$\frac{1}{3}$ </td> <td> 4,130-4,370 </td> assert True # leave this here to grade the quark table """ Explanation: Use the HTML object to display HTML in the notebook that reproduces the table of Quarks on this page. This will require you to learn about how to create HTML tables and then pass that to the HTML object for display. Don't worry about styling and formatting the table, but you should use LaTeX where appropriate. End of explanation """
fujii-team/Henbun
notebooks/Expert_GPR.ipynb
apache-2.0
import numpy as np %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import Henbun as hb """ Explanation: Regression Demo This notebook briefly describes how to make an variational inference with Henbun. Keisuke Fujii, 21st Nov. 2016 We show + Expert model with Gaussian process prior that is much more flexible than the simple Gaussian process regression Import libraries End of explanation """ X = np.linspace(0,6,150).reshape(-1,1) Y = np.sin(0.1*X*X*X) + np.random.randn(*X.shape)*0.1 plt.figure(figsize=(4,3)) plt.plot(X,Y,'o') """ Explanation: Toy data End of explanation """ # Any model should inherite hb.model.Model class ExpertGPR(hb.model.Model): def setUp(self): """ Set up parameters and Data for this model. Model.setUp is immediately called after Model.__init__() """ # Data should be stored in hb.param.Data class. self.X = hb.param.Data(X) self.Y = hb.param.Data(Y) # Variational parameters. # We assume posterior of f_s, f_l, r are independent. self.q_s = hb.variationals.Gaussian(shape=X.shape, q_shape='fullrank') self.q_l = hb.variationals.Gaussian(shape=X.shape, q_shape='fullrank') self.q_r = hb.variationals.Gaussian(shape=X.shape, q_shape='fullrank') # Kernel object for GPR. self.kern_s = hb.gp.kernels.UnitRBF(np.ones(1)*0.2) self.kern_l = hb.gp.kernels.UnitRBF(np.ones(1)*1) self.kern_r = hb.gp.kernels.UnitRBF(np.ones(1)*1) # Since our kernel does not contain the variance term, we multiply by hand. # The variance parameter should be positive. # It is possible to constrain k_var to stay in positive space by setting # transform option. self.k_var = hb.param.Variable(shape=[1], transform=hb.transforms.positive) self.k_var_r = hb.param.Variable(shape=[1], transform=hb.transforms.positive) # likelihood variance self.var = hb.param.Variable(shape=[1], transform=hb.transforms.positive) @hb.model.AutoOptimize() def ELBO(self): """ We calculate ELBO that should be maximized in this method. """ # f_s, f_l, f_r is the latent function. # Here, we assign them as a member of this class, # which makes it easy to draw the result later. self.f_s = tf.matmul(self.kern_s.Cholesky(self.X), self.q_s) self.f_l = tf.matmul(self.kern_l.Cholesky(self.X), self.q_l) self.f_r = tf.matmul(self.kern_r.Cholesky(self.X), self.q_r) * tf.sqrt(self.k_var_r) fraction = tf.sigmoid(self.f_r) self.f = (fraction * self.f_s + (1-fraction) * self.f_l)*self.k_var # Kulback-Leibler divergence can be accessed by self.KL() method. return tf.reduce_sum(hb.densities.gaussian(self.Y, self.f, self.var))\ - self.KL() @hb.model.AutoOptimize() def ELBO_single(self): """ We carry out a usual GPR as an initial estimate. """ # f_s, f_l, f_r is the latent function. # Here, we assign them as a member of this class, # which makes it easy to draw the result later. f_s = tf.matmul(self.kern_s.Cholesky(self.X), self.q_s)*self.k_var # Kulback-Leibler divergence can be accessed by self.KL() method. return tf.reduce_sum(hb.densities.gaussian(self.Y, f_s, self.var))\ - self.KL() model = ExpertGPR() """ Explanation: Construct a Henbun model Here, we demonstrate the expert model by Henbun. We assume three latent functions, one of which has the shorter lengthscale ($f_s(x)$), another with longer lengthscale ($f_l(x)$). The last one has largest lengthscale, $r(x)$ and represents the fraction of $f_s(x)$ and $f_l(x)$ to be contributed with respect to the position, i.e. $$ f(x) = \frac{1}{1+e^{r(x)}} f_s(x) + \frac{1}{1+e^{-r(x)}} f_l(x) $$ End of explanation """ # We can change configuration from setting module custom_config = hb.settings.get_settings() custom_config.numerics.jitter_level = 3.0e-4 """ Explanation: Train the Henbun model Numeric issue We adopt float64 bit computation, although float 32 bit computation is adopted by default. For changing the float type, we can change by setting method. Henbun adopted config structure adopted in GPflow. See https://github.com/GPflow/GPflow/blob/master/doc/source/notebooks/settings.ipynb for the details. End of explanation """ # During the compilation, we adopt context manager by temp_setting with hb.settings.temp_settings(custom_config): model.ELBO_single().compile(tf.train.AdamOptimizer(0.01)) model.ELBO_single().optimize(maxiter=3000) model.kern_s.lengthscales.value """ Explanation: Initial optimization We make an initial estimate by fitting data by a single Gaussian Proces. End of explanation """ model.q_l.q_mu = model.q_s.q_mu.value model.q_l.q_sqrt = model.q_s.q_sqrt.value model.kern_l.lengthscales = model.kern_s.lengthscales.value + 0.2 # During the compilation, we adopt context manager by temp_setting with hb.settings.temp_settings(custom_config): # First, we need compilation of the model model.ELBO().compile(tf.train.AdamOptimizer(0.001)) # To evaluate this method with current parameters, run() method can be used. model.ELBO().run() """ Explanation: Adopt the initial estimates. We copy from the above estimate into model.q_l and model.kern_l. End of explanation """ from IPython import display plt.figure(figsize=(10,3)) logf = [] for i in range(1000): try: # run 10 iteration model.ELBO().optimize(maxiter=10) obj = model.ELBO().run() logf.append(obj) # display if (i % 10) ==0: plt.clf() plt.subplot(1,2,1) plt.plot(logf, '-ko', markersize=3, linewidth=1) plt.ylabel('ELBO') plt.xlabel('iteration') ymin = np.percentile(logf, 10) ymax = np.max(logf) + (np.max(logf)-ymin)*0.1 plt.ylim(ymin, ymax) # --- plot for self. --- plt.subplot(1,2,2) plt.scatter(X, Y, facecolors='none', edgecolors='b', label='data') for s in range(30): plt.plot(X, model.run(model.f), 'k', alpha=0.2) plt.ylim(-2,2) display.display(plt.gcf()) display.clear_output(wait=True) except KeyboardInterrupt: break """ Explanation: Training End of explanation """ plt.figure(figsize=(5,3)) plt.scatter(X, Y, facecolors='none', edgecolors='b', label='data') for s in range(30): plt.plot(X, model.run(model.f), 'k', alpha=0.2) plt.figure(figsize=(5,3)) for s in range(50): plt.plot(X, model.run(model.f_r), 'k', alpha=0.2) plt.title('$f_r$') plt.figure(figsize=(10,3)) for s in range(50): plt.subplot(1,2,1) plt.plot(X, model.run(model.f_s), 'k', alpha=0.2) plt.subplot(1,2,2) plt.plot(X, model.run(model.f_l), 'k', alpha=0.2) plt.subplot(1,2,1) plt.title('$f_s$') plt.subplot(1,2,2) plt.title('$f_l$') """ Explanation: Draw samples from the posterior End of explanation """
jsjol/GaussianProcessRegressionForDiffusionMRI
notebooks/batch_run_SPARC.ipynb
bsd-3-clause
%load_ext autoreload %autoreload 2 import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import seaborn as sns import pickle import json import numpy as np import matplotlib.pyplot as plt import GPy import dipy.reconst.dti as dti from diGP.preprocessing_pipelines import get_SPARC_train_and_test from diGP.dataManipulations import DataHandler from diGP.model import GaussianProcessModel, get_default_kernel, get_default_independent_kernel from diGP.evaluation import get_SPARC_metrics %matplotlib inline with open('../config.json', 'r') as json_file: conf = json.load(json_file) data_paths = conf['SPARC']['data_paths'] q_test_path = conf['SPARC']['q_test_path'] source = 'gradient_20' gtab, data, voxelSize = get_SPARC_train_and_test(data_paths[source], data_paths['goldstandard'], q_test_path) from dipy.core.gradients import gradient_table bval_threshold = 1500 cutoff = gtab['train'].bvals < bval_threshold gtab['cutoff'] = gradient_table(bvals=gtab['train'].bvals[cutoff], bvecs=gtab['train'].bvecs[cutoff]) S0_tenmodel = dti.TensorModel(gtab['cutoff'], return_S0_hat=True) S0_tenfit = S0_tenmodel.fit(data['train'][:, :, None, cutoff]) b0_DTI = np.squeeze(S0_tenfit.S0_hat) b0 = np.squeeze(data['train'][:, :, gtab['train'].b0s_mask]) data['train'] /= b0_DTI[:, :, None] data['test'] /= b0_DTI[:, :, None] """ Explanation: Batch run on SPARC End of explanation """ mean = ['', 'DTI', 'MAPL'] n_max = [0, 2, 4, 6, 8] """ Explanation: Specify the various configurations to run. End of explanation """ tenmodel = dti.TensorModel(gtab['train']) tenfit = tenmodel.fit(data['train']) fitted = {'DTI': tenfit.predict(gtab['train'])} pred = {'DTI': tenfit.predict(gtab['test'])} """ Explanation: Make predictions and save results Fit regular DTI model End of explanation """ fitted['MAPL'] = np.load(os.path.join(data_paths[source], 'map_mri_train.npy')) pred['MAPL'] = np.load(os.path.join(data_paths[source], 'map_mri_test.npy')) """ Explanation: Load precomputed MAPL results End of explanation """ fitted[''] = np.zeros_like(fitted['DTI']) pred[''] = np.zeros_like(pred['DTI']) # Vary the maximum order of the Legendre polynomials, without spatial correlations for base_model in mean: for n in n_max: if base_model == '': name = 'GP_n{}_indep'.format(n) else: name = "{} + GP_n{}_indep".format(base_model, n) print('\nRunning {}'.format(name)) kernel = get_default_independent_kernel(spatial_dims=2, n_max=n) gp_model = GaussianProcessModel(gtab['train'], spatial_dims=2, kernel=kernel) gp_fit = gp_model.fit(data['train'], mean=fitted[base_model], voxel_size=voxelSize[0:2]) pred[name] = gp_fit.predict(gtab['test'], mean=pred[base_model], spatial_shape=data['test'].shape[0:2], voxel_size=voxelSize[0:2]) # Vary the maximum order of the Legendre polynomials, with spatial correlations for base_model in mean: for n in n_max: if base_model == '': name = 'GP_n{}'.format(n) else: name = "{} + GP_n{}".format(base_model, n) print('\nRunning {}'.format(name)) kernel = get_default_kernel(spatial_dims=2, n_max=n) gp_model = GaussianProcessModel(gtab['train'], spatial_dims=2, kernel=kernel) try: gp_fit = gp_model.fit(data['train'], mean=fitted[base_model], voxel_size=voxelSize[0:2]) except: continue pred[name] = gp_fit.predict(gtab['test'], mean=pred[base_model], spatial_shape=data['test'].shape[0:2], voxel_size=voxelSize[0:2]) with open(os.path.join(data_paths[source], 'batch_run_prediction_results.p'), 'wb') as fp: pickle.dump(pred, fp) """ Explanation: fitted['MAPL'] = np.load(os.path.join(data_paths[source], 'map_mri_train_iso.npy')) pred['MAPL'] = np.load(os.path.join(data_paths[source], 'map_mri_test_iso.npy')) Specify dummy model for the case with no mean function. End of explanation """ with open(os.path.join(data_paths[source], 'batch_run_prediction_results.p'), 'rb') as fp: pred = pickle.load(fp) NMSE_low = {} NMSE_high = {} for key in sorted(pred.keys()): this_NMSE_low, this_NMSE_high, _ = get_SPARC_metrics(gtab['test'], data['test'], pred[key], verbose=False) NMSE_low[key] = this_NMSE_low NMSE_high[key] = this_NMSE_high GP_low = np.array([NMSE_low['GP_n0'], NMSE_low['GP_n2'], NMSE_low['GP_n4'], NMSE_low['GP_n6'], NMSE_low['GP_n8']]) GP_high = np.array([NMSE_high['GP_n0'], NMSE_high['GP_n2'], NMSE_high['GP_n4'], NMSE_high['GP_n6'], NMSE_high['GP_n8']]) GP_indep_low = np.array([NMSE_low['GP_n0_indep'], NMSE_low['GP_n2_indep'], NMSE_low['GP_n4_indep'], NMSE_low['GP_n6_indep'], NMSE_low['GP_n8_indep']]) GP_indep_high = np.array([NMSE_high['GP_n0_indep'], NMSE_high['GP_n2_indep'], NMSE_high['GP_n4_indep'], NMSE_high['GP_n6_indep'], NMSE_high['GP_n8_indep']]) DTI_GP_low = np.array([NMSE_low['DTI + GP_n0'], NMSE_low['DTI + GP_n2'], NMSE_low['DTI + GP_n4'], NMSE_low['DTI + GP_n6'], NMSE_low['DTI + GP_n8']]) DTI_GP_high = np.array([NMSE_high['DTI + GP_n0'], NMSE_high['DTI + GP_n2'], NMSE_high['DTI + GP_n4'], NMSE_high['DTI + GP_n6'], NMSE_high['DTI + GP_n8']]) DTI_GP_indep_low = np.array([NMSE_low['DTI + GP_n0_indep'], NMSE_low['DTI + GP_n2_indep'], NMSE_low['DTI + GP_n4_indep'], NMSE_low['DTI + GP_n6_indep'], NMSE_low['DTI + GP_n8_indep']]) DTI_GP_indep_high = np.array([NMSE_high['DTI + GP_n0_indep'], NMSE_high['DTI + GP_n2_indep'], NMSE_high['DTI + GP_n4_indep'], NMSE_high['DTI + GP_n6_indep'], NMSE_high['DTI + GP_n8_indep']]) MAPL_GP_low = np.array([NMSE_low['MAPL + GP_n0'], NMSE_low['MAPL + GP_n2'], NMSE_low['MAPL + GP_n4'], NMSE_low['MAPL + GP_n6'], NMSE_low['MAPL + GP_n8']]) MAPL_GP_high = np.array([NMSE_high['MAPL + GP_n0'], NMSE_high['MAPL + GP_n2'], NMSE_high['MAPL + GP_n4'], NMSE_high['MAPL + GP_n6'], NMSE_high['MAPL + GP_n8']]) MAPL_GP_indep_low = np.array([NMSE_low['MAPL + GP_n0_indep'], NMSE_low['MAPL + GP_n2_indep'], NMSE_low['MAPL + GP_n4_indep'], NMSE_low['MAPL + GP_n6_indep'], NMSE_low['MAPL + GP_n8_indep']]) MAPL_GP_indep_high = np.array([NMSE_high['MAPL + GP_n0_indep'], NMSE_high['MAPL + GP_n2_indep'], NMSE_high['MAPL + GP_n4_indep'], NMSE_high['MAPL + GP_n6_indep'], NMSE_high['MAPL + GP_n8_indep']]) sns.set_style('white') fig = plt.figure() ax = plt.subplot(111) plt.plot(n_max, GP_low, 'b-', n_max, GP_indep_low, 'b--', 0, NMSE_low['DTI'], 'go', n_max, DTI_GP_low, 'g-', n_max, DTI_GP_indep_low, 'g--', 0, NMSE_low['MAPL'], 'ro', n_max, MAPL_GP_low, 'r-', n_max, MAPL_GP_indep_low, 'r--') plt.axis([-0.5, 8.5, 0, 0.25]) plt.xticks(n_max) plt.xlabel('Angular order') plt.ylabel('NMSE') # Shrink current axis by 20% to match size of the next figure box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig('NMSE_low.png', transparent=True) fig = plt.figure() ax = plt.subplot(111) plt.plot(n_max, GP_high, 'b-', label='GP') plt.plot(n_max, GP_indep_high, 'b--', label='GP (independent voxels)') plt.plot(0, NMSE_high['DTI'], 'go', label='DTI') plt.plot(n_max, DTI_GP_high, 'g-', label='DTI + GP') plt.plot(n_max, DTI_GP_indep_high, 'g--', label='DTI + GP (independent voxels)') plt.plot(0, NMSE_high['MAPL'], 'ro', label='MAPL') plt.plot(n_max, MAPL_GP_high, 'r-', label='MAPL + GP') plt.plot(n_max, MAPL_GP_indep_high, 'r--', label='MAPL + GP (independent voxels)') plt.axis([-0.5, 8.5, 0, 0.25]) plt.xticks(n_max) plt.xlabel('Angular order') plt.ylabel('NMSE') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # Shrink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # Put a legend to the right of the current axis lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)); plt.savefig('NMSE_high.png', transparent=True, bbox_extra_artists=(lgd,), bbox_inches='tight') sns.set_style('whitegrid') fig, ax = plt.subplots(1, 2, figsize=(7, 4)) ax[0].plot(n_max, GP_low, 'b-', n_max, GP_indep_low, 'b--', 0, NMSE_low['DTI'], 'go', n_max, DTI_GP_low, 'g-', n_max, DTI_GP_indep_low, 'g--', 0, NMSE_low['MAPL'], 'ro', n_max, MAPL_GP_low, 'r-', n_max, MAPL_GP_indep_low, 'r--') ax[1].plot(n_max, GP_high, 'b-', label='GP') ax[1].plot(n_max, GP_indep_high, 'b--', label='GP (independent voxels)') ax[1].plot(0, NMSE_high['DTI'], 'go', label='DTI') ax[1].plot(n_max, DTI_GP_high, 'g-', label='DTI + GP') ax[1].plot(n_max, DTI_GP_indep_high, 'g--', label='DTI + GP (independent voxels)') ax[1].plot(0, NMSE_high['MAPL'], 'ro', label='MAPL') ax[1].plot(n_max, MAPL_GP_high, 'r-', label='MAPL + GP') ax[1].plot(n_max, MAPL_GP_indep_high, 'r--', label='MAPL + GP (independent voxels)') for a in ax: a.axis([-0.5, 8.5, 0, 0.25]) a.set_xticks(n_max) a.set_xlabel('Angular order') a.set_ylabel('NMSE') a.spines['right'].set_visible(False) a.spines['top'].set_visible(False) box = a.get_position() a.set_position([box.x0, box.y0, box.width, box.height]) lgd = ax[1].legend(loc='upper center', bbox_to_anchor=(-0.2, -0.2), ncol=4) fig.tight_layout(w_pad=5) plt.savefig('NMSE.png', transparent=True, bbox_extra_artists=(lgd,), bbox_inches='tight') """ Explanation: Load and plot End of explanation """
navierula/Subreddit-Analysis-on-Eating-Disorders
Prediction.ipynb
mit
import pandas as pd anorexiaSubreddits = pd.read_csv("data/subreddits_anorexia.csv", encoding='ISO-8859-1') obesitySubreddits = pd.read_csv("data/subreddits_obesity.csv", encoding='ISO-8859-1') bothSubreddits = pd.read_csv("data/subreddits_both.csv", encoding='ISO-8859-1') """ Explanation: Load datasets into Pandas. End of explanation """ import hashlib anorexia_authors = anorexiaSubreddits.drop_duplicates(subset="author")['author'].apply(lambda a : hashlib.md5(a.encode()).hexdigest()).to_frame() obesity_authors = obesitySubreddits.drop_duplicates(subset="author")['author'].apply(lambda a : hashlib.md5(a.encode()).hexdigest()).to_frame() both_authors = bothSubreddits.drop_duplicates(subset="author")['author'].apply(lambda a : hashlib.md5(a.encode()).hexdigest()).to_frame() from tqdm import tqdm csv_filename = '../../data_full_preprocessed.csv' chunksize = 10000 count = 0 obesity_author_data_frames = [] anorexia_author_data_frames = [] neither_author_data_frames = [] anorexia_record_count = 0 obesity_record_count = 0 neither_record_count = 0 for chunk in tqdm(pd.read_csv(csv_filename, chunksize=chunksize)): chunk['author'] = chunk['author'].apply(lambda a : hashlib.md5(a.encode()).hexdigest()) anorexia_df = anorexia_authors.join(chunk.set_index('author'), on='author', how='inner', lsuffix='_left', rsuffix='_right') if anorexia_record_count < 10000 and not anorexia_df.empty: anorexia_author_data_frames.append(anorexia_df) anorexia_record_count += len(anorexia_df) obesity_df = obesity_authors.join(chunk.set_index('author'), on='author', how='inner', lsuffix='_left', rsuffix='_right') if obesity_record_count < 10000 and not obesity_df.empty: obesity_author_data_frames.append(obesity_df) obesity_record_count += len(obesity_df) # Use an outer join to get comments from users who have not posted about anorexia/obesity. neither_df = chunk.join(both_authors, on='author', how='outer', lsuffix='_left', rsuffix='_right') neither_df = neither_df[neither_df['author_right'].isnull()] if neither_record_count < 10000 and not neither_df.empty: neither_author_data_frames.append(neither_df) neither_record_count += len(neither_df) count += 1 if anorexia_record_count > 10000 and obesity_record_count > 10000 and neither_record_count > 10000: break print('Total # chunks processed: {}.'.format(count)) pd.concat(anorexia_author_data_frames).to_csv('data/anorexia_author_data.csv', index=False) pd.concat(obesity_author_data_frames).to_csv('data/obesity_author_data.csv', index=False) pd.concat(neither_author_data_frames).to_csv('data/neither_author_data.csv', index=False) """ Explanation: Extract authors for each class (use hashes instead of usernames to protect privacy). End of explanation """ import pandas as pd anorexia_author_data = pd.read_csv('data/anorexia_author_data.csv', encoding='ISO-8859-1') obesity_author_data = pd.read_csv('data/obesity_author_data.csv', encoding='ISO-8859-1') neither_author_data = pd.read_csv('data/neither_author_data.csv', encoding='ISO-8859-1') anorexia_author_data.insert(len(anorexia_author_data.columns), 'category', 'anorexia') obesity_author_data.insert(len(obesity_author_data.columns), 'category', 'obesity') neither_author_data.insert(len(neither_author_data.columns), 'category', 'neither') # Each dataset has ~10K rows so split into training and test sets of 5000 rows each. anorexia_author_data_train = anorexia_author_data.head(5000) anorexia_author_data_test = anorexia_author_data.head(5000) obesity_author_data_train = obesity_author_data.head(5000) obesity_author_data_test = obesity_author_data.tail(5000) neither_author_data_train = neither_author_data.head(5000) neither_author_data_test = neither_author_data.tail(5000) train_data = pd.concat([anorexia_author_data_train, obesity_author_data_train, neither_author_data_train]) test_data = pd.concat([anorexia_author_data_test, obesity_author_data_test, neither_author_data_test]) """ Explanation: Sample the data. End of explanation """ from pprint import pprint from time import time import logging from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.metrics import classification_report from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') pipeline = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier()), ]) parameters = { 'vect__max_df': (0.5, 0.75, 1.0), 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams 'clf__alpha': (0.00001, 0.000001), 'clf__penalty': ('l2', 'elasticnet'), } if __name__ == "__main__": # multiprocessing requires the fork to happen in a __main__ protected # block # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1) print("Performing grid search...") print("pipeline:", [name for name, _ in pipeline.steps]) print("parameters:") pprint(parameters) t0 = time() grid_search.fit(train_data['body'].values.tolist(), train_data['category'].values.tolist()) print("done in %0.3fs" % (time() - t0)) print() print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters_a = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters_a[param_name])) y_true, y_pred = test_data['category'].values.tolist(), grid_search.predict(test_data['body'].values.tolist()) print(classification_report(y_true, y_pred)) """ Explanation: Feature extraction/Model selection pipeline Based heavily on: * http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html * http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_digits.html End of explanation """
vbsteja/code
Python/probabilistic/stat101.ipynb
apache-2.0
import matplotlib.pyplot as plt import numpy as np import scipy.stats as stats np.seed = 33 """ Explanation: <a href="https://colab.research.google.com/github/vbsteja/code/blob/master/Python/probabilistic/stat101.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> End of explanation """ a = np.random.randint(100,1000,10) a_zscores = stats.zscore(a) plt.figure(1,figsize=(9,3)) plt.subplot(2,1,1) plt.plot(a) plt.subplot(2,1,2) plt.plot(a_zscores) plt.show() """ Explanation: Z Score significance End of explanation """